summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2014-06-19 18:58:38 -0400
committerMike Pagano <mpagano@gentoo.org>2014-06-19 18:58:38 -0400
commitdfba7a1cd3503f660f93fbc5523db817cda81909 (patch)
treea5597c0062cb0e57973e8113b6ead36955d0f150
parentAdding generic patches for all versions (diff)
downloadlinux-patches-dfba7a1cd3503f660f93fbc5523db817cda81909.tar.gz
linux-patches-dfba7a1cd3503f660f93fbc5523db817cda81909.tar.bz2
linux-patches-dfba7a1cd3503f660f93fbc5523db817cda81909.zip
Adding patches for Linux 3.12.X3.12-25
-rw-r--r--0000_README149
-rw-r--r--1000_linux-3.12.1.patch583
-rw-r--r--1001_linux-3.12.2.patch3790
-rw-r--r--1002_linux-3.12.3.patch8639
-rw-r--r--1003_linux-3.12.4.patch4725
-rw-r--r--1004_linux-3.12.5.patch2489
-rw-r--r--1005_linux-3.12.6.patch4485
-rw-r--r--1006_linux-3.12.7.patch5182
-rw-r--r--1007_linux-3.12.8.patch2622
-rw-r--r--1008_linux-3.12.9.patch761
-rw-r--r--1009_linux-3.12.10.patch5391
-rw-r--r--1010_linux-3.12.11.patch4640
-rw-r--r--1011_linux-3.12.12.patch1111
-rw-r--r--1012_linux-3.12.13.patch2461
-rw-r--r--1013_linux-3.12.14.patch5950
-rw-r--r--1014_linux-3.12.15.patch7386
-rw-r--r--1015_linux-3.12.16.patch2135
-rw-r--r--1016_linux-3.12.17.patch1896
-rw-r--r--1017_linux-3.12.18.patch3245
-rw-r--r--1018_linux-3.12.19.patch1760
-rw-r--r--1019_linux-3.12.20.patch7196
-rw-r--r--1020_linux-3.12.21.patch4218
-rw-r--r--1021_linux-3.12.22.patch5096
-rw-r--r--1500_XATTR_USER_PREFIX.patch54
-rw-r--r--1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch56
-rw-r--r--1700_enable-thinkpad-micled.patch23
-rw-r--r--1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch32
-rw-r--r--2400_kcopy-patch-for-infiniband-driver.patch731
-rw-r--r--2700_ThinkPad-30-brightness-control-fix.patch67
-rw-r--r--2900_dev-root-proc-mount-fix.patch29
-rw-r--r--2905_s2disk-resume-image-fix.patch24
-rw-r--r--4200_fbcondecor-0.9.6.patch2179
-rw-r--r--4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch160
-rw-r--r--4500_support-for-pogoplug-e02.patch172
-rw-r--r--5000_BFQ-1-block-cgroups-kconfig-build-bits-for-v6r2-3.11.patch97
-rw-r--r--5000_BFQ-2-block-introduce-the-v6r2-I-O-sched-for-3.11.patch15773
-rw-r--r--5000_BFQ-3-block-add-Early-Queue-Merge-EQM-v6r2-for-3.11.0.patch11049
-rwxr-xr-x5000_BFQ-4-block-Switch-from-BFQ-v6r2-for-3.11.0-to-BFQ-v6r2-fo.patch362
38 files changed, 96717 insertions, 1 deletions
diff --git a/0000_README b/0000_README
index 90189932..b0d0097e 100644
--- a/0000_README
+++ b/0000_README
@@ -1,6 +1,6 @@
README
--------------------------------------------------------------------------
-This patchset is to be the series of patches for gentoo-sources.
+This patchset is to be the 2.6 series of gentoo-sources.
It is designed for cross-compatibility, fixes and stability, with performance
and additional features/driver support being a second.
@@ -42,7 +42,154 @@ EXPERIMENTAL
Individual Patch Descriptions:
--------------------------------------------------------------------------
+Patch: 1000_linux-3.12.1.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.1
+
+Patch: 1001_linux-3.12.2.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.2
+
+Patch: 1002_linux-3.12.3.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.3
+
+Patch: 1003_linux-3.12.4.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.4
+
+Patch: 1004_linux-3.12.5.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.5
+
+Patch: 1005_linux-3.12.6.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.6
+
+Patch: 1006_linux-3.12.7.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.7
+
+Patch: 1007_linux-3.12.8.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.8
+
+Patch: 1008_linux-3.12.9.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.9
+
+Patch: 1009_linux-3.12.10.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.10
+
+Patch: 1010_linux-3.12.11.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.11
+
+Patch: 1011_linux-3.12.12.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.12
+
+Patch: 1012_linux-3.12.13.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.13
+
+Patch: 1013_linux-3.12.14.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.14
+
+Patch: 1014_linux-3.12.15.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.15
+
+Patch: 1015_linux-3.12.16.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.16
+
+Patch: 1016_linux-3.12.17.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.17
+
+Patch: 1017_linux-3.12.18.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.18
+
+Patch: 1018_linux-3.12.19.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.19
+
+Patch: 1019_linux-3.12.20.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.20
+
+Patch: 1020_linux-3.12.21.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.21
+
+Patch: 1021_linux-3.12.22.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.22
+
+Patch: 1500_XATTR_USER_PREFIX.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=470644
+Desc: Support for namespace user.pax.* on tmpfs.
+
+Patch: 1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch
+From: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=6a96e15096da6e7491107321cfa660c7c2aa119d
+Desc: selinux: add SOCK_DIAG_BY_FAMILY to the list of netlink message types
+
+Patch: 1700_enable-thinkpad-micled.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=449248
+Desc: Enable mic mute led in thinkpads
+
+Patch: 1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=493002
+Desc: Modify pipe_write to first call sb_start_write_try(),skip time update on fail
+
+Patch: 2400_kcopy-patch-for-infiniband-driver.patch
+From: Alexey Shvetsov <alexxy@gentoo.org>
+Desc: Zero copy for infiniband psm userspace driver
+
+Patch: 2700_ThinkPad-30-brightness-control-fix.patch
+From: Seth Forshee <seth.forshee@canonical.com>
+Desc: ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads
+
+Patch: 2900_dev-root-proc-mount-fix.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=438380
+Desc: Ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs.
+
+Patch: 2905_s2disk-resume-image-fix.patch
+From: Al Viro <viro <at> ZenIV.linux.org.uk>
+Desc: Do not lock when UMH is waiting on current thread spawned by linuxrc. (bug #481344)
+
+Patch: 4200_fbcondecor-0.9.6.patch
+From: http://dev.gentoo.org/~spock
+Desc: Bootsplash successor by Michal Januszewski ported by Jeremy (bug #452574)
+
+Patch: 4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=512980
+Desc: Change inode_capable to capable_wrt_inode_uidgid, fixes CVE-2014-4014
+
+Patch: 4500_support-for-pogoplug-e02.patch
+From: Cristoph Junghans <ottxor@gentoo.org>
+Desc: Support for Pogoplug e02 (bug #460350), adjusted to be opt-in by TomWij.
Patch: 4567_distro-Gentoo-Kconfig.patch
From: Tom Wijsman <TomWij@gentoo.org>
Desc: Add Gentoo Linux support config settings and defaults.
+
+Patch: 5000_BFQ-1-block-cgroups-kconfig-build-bits-for-v6r2-3.11.patch
+From: http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc: BFQ v6r2 patch 1 for 3.11: Build, cgroups and kconfig bits
+
+Patch: 5000_BFQ-2-block-introduce-the-v6r2-I-O-sched-for-3.11.patch1
+From: http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc: BFQ v6r2 patch 2 for 3.10: BFQ Scheduler
+
+Patch: 5000_BFQ-3-block-add-Early-Queue-Merge-EQM-v6r2-for-3.11.0.patch1
+From: http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc: BFQ v6r2 patch 3 for 3.10: Early Queue Merge (EQM)
+
+Patch: 5000_BFQ-4-block-Switch-from-BFQ-v6r2-for-3.11.0-to-BFQ-v6r2-fo.patch
+From: http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc: BFQ v6r2 for 3.11.0 to BFQ v6r2 for 3.12.0.
diff --git a/1000_linux-3.12.1.patch b/1000_linux-3.12.1.patch
new file mode 100644
index 00000000..7fd2814c
--- /dev/null
+++ b/1000_linux-3.12.1.patch
@@ -0,0 +1,583 @@
+diff --git a/Makefile b/Makefile
+index 67077ad..eb29ec7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
+index 494d050..a6dc56e 100644
+--- a/drivers/misc/atmel_pwm.c
++++ b/drivers/misc/atmel_pwm.c
+@@ -90,8 +90,10 @@ int pwm_channel_alloc(int index, struct pwm_channel *ch)
+ unsigned long flags;
+ int status = 0;
+
+- /* insist on PWM init, with this signal pinned out */
+- if (!pwm || !(pwm->mask & 1 << index))
++ if (!pwm)
++ return -EPROBE_DEFER;
++
++ if (!(pwm->mask & 1 << index))
+ return -ENODEV;
+
+ if (index < 0 || index >= PWM_NCHAN || !ch)
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+index 9c89dc8..632b318 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+@@ -1599,7 +1599,8 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
+ flits = skb_transport_offset(skb) / 8;
+ sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+- skb->tail - skb->transport_header,
++ skb_tail_pointer(skb) -
++ skb_transport_header(skb),
+ adap->pdev);
+ if (need_skb_unmap()) {
+ setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+index ea20182..bb11624 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+@@ -1691,7 +1691,7 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
+ vp_oper->vlan_idx = NO_INDX;
+ }
+ if (NO_INDX != vp_oper->mac_idx) {
+- __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
++ __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
+ vp_oper->mac_idx = NO_INDX;
+ }
+ }
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 9fbdfcd..bbc9cb8 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1118,11 +1118,6 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
+ {
+ struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+
+- mutex_lock(&vi->config_lock);
+-
+- if (!vi->config_enable)
+- goto done;
+-
+ switch(action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+@@ -1136,8 +1131,6 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
+ break;
+ }
+
+-done:
+- mutex_unlock(&vi->config_lock);
+ return NOTIFY_OK;
+ }
+
+@@ -1699,6 +1692,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
+ struct virtnet_info *vi = vdev->priv;
+ int i;
+
++ unregister_hotcpu_notifier(&vi->nb);
++
+ /* Prevent config work handler from accessing the device */
+ mutex_lock(&vi->config_lock);
+ vi->config_enable = false;
+@@ -1747,6 +1742,10 @@ static int virtnet_restore(struct virtio_device *vdev)
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
+ rtnl_unlock();
+
++ err = register_hotcpu_notifier(&vi->nb);
++ if (err)
++ return err;
++
+ return 0;
+ }
+ #endif
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 5715318..400fea1 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -163,6 +163,7 @@ struct xenvif {
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
++ u64 credit_window_start;
+
+ /* Statistics */
+ unsigned long rx_gso_checksum_fixup;
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 01bb854..459935a 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -312,8 +312,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ vif->credit_bytes = vif->remaining_credit = ~0UL;
+ vif->credit_usec = 0UL;
+ init_timer(&vif->credit_timeout);
+- /* Initialize 'expires' now: it's used to track the credit window. */
+- vif->credit_timeout.expires = jiffies;
++ vif->credit_window_start = get_jiffies_64();
+
+ dev->netdev_ops = &xenvif_netdev_ops;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index f3e591c..900da4b 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1185,9 +1185,8 @@ out:
+
+ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ {
+- unsigned long now = jiffies;
+- unsigned long next_credit =
+- vif->credit_timeout.expires +
++ u64 now = get_jiffies_64();
++ u64 next_credit = vif->credit_window_start +
+ msecs_to_jiffies(vif->credit_usec / 1000);
+
+ /* Timer could already be pending in rare cases. */
+@@ -1195,8 +1194,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ return true;
+
+ /* Passed the point where we can replenish credit? */
+- if (time_after_eq(now, next_credit)) {
+- vif->credit_timeout.expires = now;
++ if (time_after_eq64(now, next_credit)) {
++ vif->credit_window_start = now;
+ tx_add_credit(vif);
+ }
+
+@@ -1208,6 +1207,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ tx_credit_callback;
+ mod_timer(&vif->credit_timeout,
+ next_credit);
++ vif->credit_window_start = next_credit;
+
+ return true;
+ }
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index d6a8d23..f20a044 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1033,6 +1033,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ dev_name(&usb_dev->dev), retval);
+ return retval;
+ }
++ usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
+ }
+
+ retval = usb_new_device (usb_dev);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index e6b682c..879651c 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -135,7 +135,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
+ return usb_get_intfdata(hdev->actconfig->interface[0]);
+ }
+
+-static int usb_device_supports_lpm(struct usb_device *udev)
++int usb_device_supports_lpm(struct usb_device *udev)
+ {
+ /* USB 2.1 (and greater) devices indicate LPM support through
+ * their USB 2.0 Extended Capabilities BOS descriptor.
+@@ -156,6 +156,11 @@ static int usb_device_supports_lpm(struct usb_device *udev)
+ "Power management will be impacted.\n");
+ return 0;
+ }
++
++ /* udev is root hub */
++ if (!udev->parent)
++ return 1;
++
+ if (udev->parent->lpm_capable)
+ return 1;
+
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 8238577..c493836 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -35,6 +35,7 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
+ unsigned int size);
+ extern int usb_get_bos_descriptor(struct usb_device *dev);
+ extern void usb_release_bos_descriptor(struct usb_device *dev);
++extern int usb_device_supports_lpm(struct usb_device *udev);
+ extern char *usb_cache_string(struct usb_device *udev, int index);
+ extern int usb_set_configuration(struct usb_device *dev, int configuration);
+ extern int usb_choose_configuration(struct usb_device *udev);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index acaee06..c3d9485 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1376,6 +1376,23 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
+index 0393d82..f7447f7 100644
+--- a/drivers/video/backlight/atmel-pwm-bl.c
++++ b/drivers/video/backlight/atmel-pwm-bl.c
+@@ -118,7 +118,7 @@ static const struct backlight_ops atmel_pwm_bl_ops = {
+ .update_status = atmel_pwm_bl_set_intensity,
+ };
+
+-static int __init atmel_pwm_bl_probe(struct platform_device *pdev)
++static int atmel_pwm_bl_probe(struct platform_device *pdev)
+ {
+ struct backlight_properties props;
+ const struct atmel_pwm_bl_platform_data *pdata;
+@@ -202,7 +202,7 @@ err_free_mem:
+ return retval;
+ }
+
+-static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
++static int atmel_pwm_bl_remove(struct platform_device *pdev)
+ {
+ struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
+
+@@ -220,10 +220,11 @@ static struct platform_driver atmel_pwm_bl_driver = {
+ .name = "atmel-pwm-bl",
+ },
+ /* REVISIT add suspend() and resume() */
+- .remove = __exit_p(atmel_pwm_bl_remove),
++ .probe = atmel_pwm_bl_probe,
++ .remove = atmel_pwm_bl_remove,
+ };
+
+-module_platform_driver_probe(atmel_pwm_bl_driver, atmel_pwm_bl_probe);
++module_platform_driver(atmel_pwm_bl_driver);
+
+ MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
+ MODULE_DESCRIPTION("Atmel PWM backlight driver");
+diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
+index 8ac99b8..8d456dc 100644
+--- a/drivers/video/hyperv_fb.c
++++ b/drivers/video/hyperv_fb.c
+@@ -795,12 +795,21 @@ static int hvfb_remove(struct hv_device *hdev)
+ }
+
+
++static DEFINE_PCI_DEVICE_TABLE(pci_stub_id_table) = {
++ {
++ .vendor = PCI_VENDOR_ID_MICROSOFT,
++ .device = PCI_DEVICE_ID_HYPERV_VIDEO,
++ },
++ { /* end of list */ }
++};
++
+ static const struct hv_vmbus_device_id id_table[] = {
+ /* Synthetic Video Device GUID */
+ {HV_SYNTHVID_GUID},
+ {}
+ };
+
++MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
+ MODULE_DEVICE_TABLE(vmbus, id_table);
+
+ static struct hv_driver hvfb_drv = {
+@@ -810,14 +819,43 @@ static struct hv_driver hvfb_drv = {
+ .remove = hvfb_remove,
+ };
+
++static int hvfb_pci_stub_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ return 0;
++}
++
++static void hvfb_pci_stub_remove(struct pci_dev *pdev)
++{
++}
++
++static struct pci_driver hvfb_pci_stub_driver = {
++ .name = KBUILD_MODNAME,
++ .id_table = pci_stub_id_table,
++ .probe = hvfb_pci_stub_probe,
++ .remove = hvfb_pci_stub_remove,
++};
+
+ static int __init hvfb_drv_init(void)
+ {
+- return vmbus_driver_register(&hvfb_drv);
++ int ret;
++
++ ret = vmbus_driver_register(&hvfb_drv);
++ if (ret != 0)
++ return ret;
++
++ ret = pci_register_driver(&hvfb_pci_stub_driver);
++ if (ret != 0) {
++ vmbus_driver_unregister(&hvfb_drv);
++ return ret;
++ }
++
++ return 0;
+ }
+
+ static void __exit hvfb_drv_exit(void)
+ {
++ pci_unregister_driver(&hvfb_pci_stub_driver);
+ vmbus_driver_unregister(&hvfb_drv);
+ }
+
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index 48ec25a..5e661a9 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -165,6 +165,7 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
+ static inline void rt6_clean_expires(struct rt6_info *rt)
+ {
+ rt->rt6i_flags &= ~RTF_EXPIRES;
++ rt->dst.expires = 0;
+ }
+
+ static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 7974ba2..b778e96 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -843,9 +843,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ if (isspace(ch)) {
+ parser->buffer[parser->idx] = 0;
+ parser->cont = false;
+- } else {
++ } else if (parser->idx < parser->size - 1) {
+ parser->cont = true;
+ parser->buffer[parser->idx++] = ch;
++ } else {
++ ret = -EINVAL;
++ goto out;
+ }
+
+ *ppos += read;
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 8d7d0dd..143b6fd 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -40,7 +40,7 @@ again:
+ struct iphdr _iph;
+ ip:
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+- if (!iph)
++ if (!iph || iph->ihl < 5)
+ return false;
+
+ if (ip_is_fragment(iph))
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index a16b01b..068c8fb 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2856,7 +2856,8 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ * left edge of the send window.
+ * See draft-ietf-tcplw-high-performance-00, section 3.3.
+ */
+- if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
++ if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
++ flag & FLAG_ACKED)
+ seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+
+ if (seq_rtt < 0)
+@@ -2871,14 +2872,19 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ }
+
+ /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
+-static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
++static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ s32 seq_rtt = -1;
+
+- if (tp->lsndtime && !tp->total_retrans)
+- seq_rtt = tcp_time_stamp - tp->lsndtime;
+- tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
++ if (synack_stamp && !tp->total_retrans)
++ seq_rtt = tcp_time_stamp - synack_stamp;
++
++ /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
++ * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
++ */
++ if (!tp->srtt)
++ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
+ }
+
+ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+@@ -2981,6 +2987,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+ s32 seq_rtt = -1;
+ s32 ca_seq_rtt = -1;
+ ktime_t last_ackt = net_invalid_timestamp();
++ bool rtt_update;
+
+ while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
+ struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+@@ -3057,14 +3064,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+ if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
+ flag |= FLAG_SACK_RENEGING;
+
+- if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) ||
+- (flag & FLAG_ACKED))
+- tcp_rearm_rto(sk);
++ rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
+
+ if (flag & FLAG_ACKED) {
+ const struct tcp_congestion_ops *ca_ops
+ = inet_csk(sk)->icsk_ca_ops;
+
++ tcp_rearm_rto(sk);
+ if (unlikely(icsk->icsk_mtup.probe_size &&
+ !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
+ tcp_mtup_probe_success(sk);
+@@ -3103,6 +3109,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+
+ ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
+ }
++ } else if (skb && rtt_update && sack_rtt >= 0 &&
++ sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
++ /* Do not re-arm RTO if the sack RTT is measured from data sent
++ * after when the head was last (re)transmitted. Otherwise the
++ * timeout may continue to extend in loss recovery.
++ */
++ tcp_rearm_rto(sk);
+ }
+
+ #if FASTRETRANS_DEBUG > 0
+@@ -5587,6 +5600,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req;
+ int queued = 0;
+ bool acceptable;
++ u32 synack_stamp;
+
+ tp->rx_opt.saw_tstamp = 0;
+
+@@ -5669,9 +5683,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ * so release it.
+ */
+ if (req) {
++ synack_stamp = tcp_rsk(req)->snt_synack;
+ tp->total_retrans = req->num_retrans;
+ reqsk_fastopen_remove(sk, req, false);
+ } else {
++ synack_stamp = tp->lsndtime;
+ /* Make sure socket is routed, for correct metrics. */
+ icsk->icsk_af_ops->rebuild_header(sk);
+ tcp_init_congestion_control(sk);
+@@ -5694,7 +5710,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
+ tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
+ tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+- tcp_synack_rtt_meas(sk, req);
++ tcp_synack_rtt_meas(sk, synack_stamp);
+
+ if (tp->rx_opt.tstamp_ok)
+ tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 3a7525e..533c58a 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -18,6 +18,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+ {
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
++ unsigned int sum_truesize = 0;
+ struct tcphdr *th;
+ unsigned int thlen;
+ unsigned int seq;
+@@ -102,13 +103,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+ if (copy_destructor) {
+ skb->destructor = gso_skb->destructor;
+ skb->sk = gso_skb->sk;
+- /* {tcp|sock}_wfree() use exact truesize accounting :
+- * sum(skb->truesize) MUST be exactly be gso_skb->truesize
+- * So we account mss bytes of 'true size' for each segment.
+- * The last segment will contain the remaining.
+- */
+- skb->truesize = mss;
+- gso_skb->truesize -= mss;
++ sum_truesize += skb->truesize;
+ }
+ skb = skb->next;
+ th = tcp_hdr(skb);
+@@ -125,7 +120,9 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+ if (copy_destructor) {
+ swap(gso_skb->sk, skb->sk);
+ swap(gso_skb->destructor, skb->destructor);
+- swap(gso_skb->truesize, skb->truesize);
++ sum_truesize += skb->truesize;
++ atomic_add(sum_truesize - gso_skb->truesize,
++ &skb->sk->sk_wmem_alloc);
+ }
+
+ delta = htonl(oldlen + (skb_tail_pointer(skb) -
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index f54e3a1..04e17b3 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1087,10 +1087,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
+ if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
+ return NULL;
+
+- if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
+- return dst;
++ if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
++ return NULL;
+
+- return NULL;
++ if (rt6_check_expired(rt))
++ return NULL;
++
++ return dst;
+ }
+
+ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 666c668..1a6eef3 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -860,7 +860,6 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
+ (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
+ return;
+
+- BUG_ON(asoc->peer.primary_path == NULL);
+ sctp_unhash_established(asoc);
+ sctp_association_free(asoc);
+ }
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 50173d4..8d2d01b 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -740,9 +740,10 @@ static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
+ static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
+ {
+ int i;
++ int ordered_ca = get_channel_allocation_order(ca);
+ for (i = 0; i < 8; i++) {
+- if (i < channel_allocations[ca].channels)
+- map[i] = from_cea_slot((hdmi_channel_mapping[ca][i] >> 4) & 0x0f);
++ if (i < channel_allocations[ordered_ca].channels)
++ map[i] = from_cea_slot(hdmi_channel_mapping[ca][i] & 0x0f);
+ else
+ map[i] = 0;
+ }
diff --git a/1001_linux-3.12.2.patch b/1001_linux-3.12.2.patch
new file mode 100644
index 00000000..461ca3ce
--- /dev/null
+++ b/1001_linux-3.12.2.patch
@@ -0,0 +1,3790 @@
+diff --git a/Makefile b/Makefile
+index eb29ec754a9e..e6e72b629da7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index b0de86b56c13..cb79a5dd6d96 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -307,6 +307,17 @@ out:
+ return err;
+ }
+
++static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
++{
++ if (!is_vmalloc_addr(kaddr)) {
++ BUG_ON(!virt_addr_valid(kaddr));
++ return __pa(kaddr);
++ } else {
++ return page_to_phys(vmalloc_to_page(kaddr)) +
++ offset_in_page(kaddr);
++ }
++}
++
+ /**
+ * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
+ * @from: The virtual kernel start address of the range
+@@ -318,16 +329,27 @@ out:
+ */
+ int create_hyp_mappings(void *from, void *to)
+ {
+- unsigned long phys_addr = virt_to_phys(from);
++ phys_addr_t phys_addr;
++ unsigned long virt_addr;
+ unsigned long start = KERN_TO_HYP((unsigned long)from);
+ unsigned long end = KERN_TO_HYP((unsigned long)to);
+
+- /* Check for a valid kernel memory mapping */
+- if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
+- return -EINVAL;
++ start = start & PAGE_MASK;
++ end = PAGE_ALIGN(end);
+
+- return __create_hyp_mappings(hyp_pgd, start, end,
+- __phys_to_pfn(phys_addr), PAGE_HYP);
++ for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
++ int err;
++
++ phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
++ err = __create_hyp_mappings(hyp_pgd, virt_addr,
++ virt_addr + PAGE_SIZE,
++ __phys_to_pfn(phys_addr),
++ PAGE_HYP);
++ if (err)
++ return err;
++ }
++
++ return 0;
+ }
+
+ /**
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index d9ee0ff094d4..3d5db8c83b3c 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2361,21 +2361,23 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
+ * Cache the virtual address used by the MPU to access this IP block's
+ * registers. This address is needed early so the OCP registers that
+ * are part of the device's address space can be ioremapped properly.
+- * No return value.
++ *
++ * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
++ * -ENXIO on absent or invalid register target address space.
+ */
+-static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
++static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
+ {
+ struct omap_hwmod_addr_space *mem;
+ void __iomem *va_start = NULL;
+ struct device_node *np;
+
+ if (!oh)
+- return;
++ return -EINVAL;
+
+ _save_mpu_port_index(oh);
+
+ if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
+- return;
++ return -ENXIO;
+
+ mem = _find_mpu_rt_addr_space(oh);
+ if (!mem) {
+@@ -2384,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
+
+ /* Extract the IO space from device tree blob */
+ if (!of_have_populated_dt())
+- return;
++ return -ENXIO;
+
+ np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
+ if (np)
+@@ -2395,13 +2397,14 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
+
+ if (!va_start) {
+ pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name);
+- return;
++ return -ENXIO;
+ }
+
+ pr_debug("omap_hwmod: %s: MPU register target at va %p\n",
+ oh->name, va_start);
+
+ oh->_mpu_rt_va = va_start;
++ return 0;
+ }
+
+ /**
+@@ -2414,8 +2417,8 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
+ * registered at this point. This is the first of two phases for
+ * hwmod initialization. Code called here does not touch any hardware
+ * registers, it simply prepares internal data structures. Returns 0
+- * upon success or if the hwmod isn't registered, or -EINVAL upon
+- * failure.
++ * upon success or if the hwmod isn't registered or if the hwmod's
++ * address space is not defined, or -EINVAL upon failure.
+ */
+ static int __init _init(struct omap_hwmod *oh, void *data)
+ {
+@@ -2424,8 +2427,14 @@ static int __init _init(struct omap_hwmod *oh, void *data)
+ if (oh->_state != _HWMOD_STATE_REGISTERED)
+ return 0;
+
+- if (oh->class->sysc)
+- _init_mpu_rt_base(oh, NULL);
++ if (oh->class->sysc) {
++ r = _init_mpu_rt_base(oh, NULL);
++ if (r < 0) {
++ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
++ oh->name);
++ return 0;
++ }
++ }
+
+ r = _init_clocks(oh, NULL);
+ if (r < 0) {
+diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
+index 5d3047e5563b..4353cf239a13 100644
+--- a/arch/cris/include/asm/io.h
++++ b/arch/cris/include/asm/io.h
+@@ -3,6 +3,7 @@
+
+ #include <asm/page.h> /* for __va, __pa */
+ #include <arch/io.h>
++#include <asm-generic/iomap.h>
+ #include <linux/kernel.h>
+
+ struct cris_io_operations
+diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
+index e0a899a1a8a6..5a84b3a50741 100644
+--- a/arch/ia64/include/asm/processor.h
++++ b/arch/ia64/include/asm/processor.h
+@@ -319,7 +319,7 @@ struct thread_struct {
+ regs->loadrs = 0; \
+ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
+- if (unlikely(!get_dumpable(current->mm))) { \
++ if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
+ /* \
+ * Zap scratch regs to avoid leaking bits between processes with different \
+ * uid/privileges. \
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index 55593ee2d5aa..c766cf575520 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -687,6 +687,15 @@ void eeh_save_bars(struct eeh_dev *edev)
+
+ for (i = 0; i < 16; i++)
+ eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
++
++ /*
++ * For PCI bridges including root port, we need enable bus
++ * master explicitly. Otherwise, it can't fetch IODA table
++ * entries correctly. So we cache the bit in advance so that
++ * we can restore it after reset, either PHB range or PE range.
++ */
++ if (edev->mode & EEH_DEV_BRIDGE)
++ edev->config_space[1] |= PCI_COMMAND_MASTER;
+ }
+
+ /**
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index bebdf1a1a540..36d49e6b7c4c 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -457,7 +457,15 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+ if (copy_vsx_to_user(&frame->mc_vsregs, current))
+ return 1;
+ msr |= MSR_VSX;
+- }
++ } else if (!ctx_has_vsx_region)
++ /*
++ * With a small context structure we can't hold the VSX
++ * registers, hence clear the MSR value to indicate the state
++ * was not saved.
++ */
++ msr &= ~MSR_VSX;
++
++
+ #endif /* CONFIG_VSX */
+ #ifdef CONFIG_SPE
+ /* save spe registers */
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 192b051df97e..b3b144121cc9 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -213,8 +213,6 @@ static u64 scan_dispatch_log(u64 stop_tb)
+ if (i == be64_to_cpu(vpa->dtl_idx))
+ return 0;
+ while (i < be64_to_cpu(vpa->dtl_idx)) {
+- if (dtl_consumer)
+- dtl_consumer(dtl, i);
+ dtb = be64_to_cpu(dtl->timebase);
+ tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
+ be32_to_cpu(dtl->ready_to_enqueue_time);
+@@ -227,6 +225,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
+ }
+ if (dtb > stop_tb)
+ break;
++ if (dtl_consumer)
++ dtl_consumer(dtl, i);
+ stolen += tb_delta;
+ ++i;
+ ++dtl;
+diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
+index d38cc08b16c7..cb92d8204ec7 100644
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -1531,12 +1531,12 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+
+ dn = dev->of_node;
+ if (!dn) {
+- strcat(buf, "\n");
++ strcpy(buf, "\n");
+ return strlen(buf);
+ }
+ cp = of_get_property(dn, "compatible", NULL);
+ if (!cp) {
+- strcat(buf, "\n");
++ strcpy(buf, "\n");
+ return strlen(buf);
+ }
+
+diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
+index 6936547018b8..c5f734e20b0f 100644
+--- a/arch/powerpc/mm/gup.c
++++ b/arch/powerpc/mm/gup.c
+@@ -123,6 +123,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
++ unsigned long flags;
+ pgd_t *pgdp;
+ int nr = 0;
+
+@@ -156,7 +157,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ * So long as we atomically load page table pointers versus teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ */
+- local_irq_disable();
++ local_irq_save(flags);
+
+ pgdp = pgd_offset(mm, addr);
+ do {
+@@ -179,7 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ break;
+ } while (pgdp++, addr = next, addr != end);
+
+- local_irq_enable();
++ local_irq_restore(flags);
+
+ return nr;
+ }
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 3e99c149271a..7ce9cf3b6988 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr,
+ slice = GET_HIGH_SLICE_INDEX(addr);
+ *boundary_addr = (slice + end) ?
+ ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
+- return !!(available.high_slices & (1u << slice));
++ return !!(available.high_slices & (1ul << slice));
+ }
+ }
+
+diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
+index a82a41b4fd91..1a7b1d0f41df 100644
+--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
++++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
+@@ -303,6 +303,9 @@ void __init mpc512x_setup_diu(void)
+ diu_ops.release_bootmem = mpc512x_release_bootmem;
+ }
+
++#else
++void __init mpc512x_setup_diu(void) { /* EMPTY */ }
++void __init mpc512x_init_diu(void) { /* EMPTY */ }
+ #endif
+
+ void __init mpc512x_init_IRQ(void)
+diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
+index 90f4496017e4..af54174801f7 100644
+--- a/arch/powerpc/platforms/52xx/Kconfig
++++ b/arch/powerpc/platforms/52xx/Kconfig
+@@ -57,5 +57,5 @@ config PPC_MPC5200_BUGFIX
+
+ config PPC_MPC5200_LPBFIFO
+ tristate "MPC5200 LocalPlus bus FIFO driver"
+- depends on PPC_MPC52xx
++ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select PPC_BESTCOMM_GEN_BD
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 74a5a5773b1f..930e1fe78214 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -153,13 +153,23 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
+ rid_end = pe->rid + 1;
+ }
+
+- /* Associate PE in PELT */
++ /*
++ * Associate PE in PELT. We need add the PE into the
++ * corresponding PELT-V as well. Otherwise, the error
++ * originated from the PE might contribute to other
++ * PEs.
++ */
+ rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
+ bcomp, dcomp, fcomp, OPAL_MAP_PE);
+ if (rc) {
+ pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
+ return -ENXIO;
+ }
++
++ rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
++ pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
++ if (rc)
++ pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
+ opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index b4dbade8ca24..2e4b5be31a1b 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -35,7 +35,6 @@ static u8 *ctrblk;
+ static char keylen_flag;
+
+ struct s390_aes_ctx {
+- u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ long enc;
+ long dec;
+@@ -441,30 +440,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ return aes_set_key(tfm, in_key, key_len);
+ }
+
+-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
++static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct blkcipher_walk *walk)
+ {
++ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
++ struct {
++ u8 iv[AES_BLOCK_SIZE];
++ u8 key[AES_MAX_KEY_SIZE];
++ } param;
+
+ if (!nbytes)
+ goto out;
+
+- memcpy(param, walk->iv, AES_BLOCK_SIZE);
++ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
++ memcpy(param.key, sctx->key, sctx->key_len);
+ do {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+- ret = crypt_s390_kmc(func, param, out, in, n);
++ ret = crypt_s390_kmc(func, &param, out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+- memcpy(walk->iv, param, AES_BLOCK_SIZE);
++ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
+
+ out:
+ return ret;
+@@ -481,7 +486,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
++ return cbc_aes_crypt(desc, sctx->enc, &walk);
+ }
+
+ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+@@ -495,7 +500,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
++ return cbc_aes_crypt(desc, sctx->dec, &walk);
+ }
+
+ static struct crypto_alg cbc_aes_alg = {
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index 819b94d22720..8beee1cceba4 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -71,9 +71,11 @@ static inline void local_tick_enable(unsigned long long comp)
+
+ typedef unsigned long long cycles_t;
+
+-static inline void get_tod_clock_ext(char *clk)
++static inline void get_tod_clock_ext(char clk[16])
+ {
+- asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
++ typedef struct { char _[sizeof(clk)]; } addrtype;
++
++ asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
+ }
+
+ static inline unsigned long long get_tod_clock(void)
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 1a4313a1b60f..93439cd04406 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -929,7 +929,7 @@ static ssize_t show_idle_count(struct device *dev,
+ idle_count = ACCESS_ONCE(idle->idle_count);
+ if (ACCESS_ONCE(idle->clock_idle_enter))
+ idle_count++;
+- } while ((sequence & 1) || (idle->sequence != sequence));
++ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ return sprintf(buf, "%llu\n", idle_count);
+ }
+ static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
+@@ -947,7 +947,7 @@ static ssize_t show_idle_time(struct device *dev,
+ idle_time = ACCESS_ONCE(idle->idle_time);
+ idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+ idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
+- } while ((sequence & 1) || (idle->sequence != sequence));
++ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+ return sprintf(buf, "%llu\n", idle_time >> 12);
+ }
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index abcfab55f99b..bb06a76040bf 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu)
+ sequence = ACCESS_ONCE(idle->sequence);
+ idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+ idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
+- } while ((sequence & 1) || (idle->sequence != sequence));
++ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
+ }
+
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index e0e0841eef45..18677a90d6a3 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -127,12 +127,12 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ cpu_emergency_vmxoff();
+ cpu_emergency_svm_disable();
+
+- lapic_shutdown();
+ #ifdef CONFIG_X86_IO_APIC
+ /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
+ ioapic_zap_locks();
+ disable_IO_APIC();
+ #endif
++ lapic_shutdown();
+ #ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+ #endif
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 42a392a9fd02..d4bdd253fea7 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -248,6 +248,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+ return ret;
+ }
+
++static int is_ftrace_caller(unsigned long ip)
++{
++ if (ip == (unsigned long)(&ftrace_call) ||
++ ip == (unsigned long)(&ftrace_regs_call))
++ return 1;
++
++ return 0;
++}
++
+ /*
+ * A breakpoint was added to the code address we are about to
+ * modify, and this is the handle that will just skip over it.
+@@ -257,10 +266,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+ */
+ int ftrace_int3_handler(struct pt_regs *regs)
+ {
++ unsigned long ip;
++
+ if (WARN_ON_ONCE(!regs))
+ return 0;
+
+- if (!ftrace_location(regs->ip - 1))
++ ip = regs->ip - 1;
++ if (!ftrace_location(ip) && !is_ftrace_caller(ip))
+ return 0;
+
+ regs->ip += MCOUNT_INSN_SIZE - 1;
+diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
+index af99f71aeb7f..c3d4cc972eca 100644
+--- a/arch/x86/kernel/microcode_amd.c
++++ b/arch/x86/kernel/microcode_amd.c
+@@ -431,7 +431,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
+ snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
+
+ if (request_firmware(&fw, (const char *)fw_name, device)) {
+- pr_err("failed to load file %s\n", fw_name);
++ pr_debug("failed to load file %s\n", fw_name);
+ goto out;
+ }
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index c83516be1052..3fb8d95ab8b5 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -391,9 +391,9 @@ static void amd_e400_idle(void)
+ * The switch back from broadcast mode needs to be
+ * called with interrupts disabled.
+ */
+- local_irq_disable();
+- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+- local_irq_enable();
++ local_irq_disable();
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
++ local_irq_enable();
+ } else
+ default_idle();
+ }
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 7e920bff99a3..618ce264b237 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -550,6 +550,10 @@ static void native_machine_emergency_restart(void)
+ void native_machine_shutdown(void)
+ {
+ /* Stop the cpus and apics */
++#ifdef CONFIG_X86_IO_APIC
++ disable_IO_APIC();
++#endif
++
+ #ifdef CONFIG_SMP
+ /*
+ * Stop all of the others. Also disable the local irq to
+@@ -562,10 +566,6 @@ void native_machine_shutdown(void)
+
+ lapic_shutdown();
+
+-#ifdef CONFIG_X86_IO_APIC
+- disable_IO_APIC();
+-#endif
+-
+ #ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+ #endif
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index ddc3f3d2afdb..92e6f4a8ba0e 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4040,7 +4040,10 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
+ case OpMem8:
+ ctxt->memop.bytes = 1;
+ if (ctxt->memop.type == OP_REG) {
+- ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1);
++ int highbyte_regs = ctxt->rex_prefix == 0;
++
++ ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm,
++ highbyte_regs);
+ fetch_register_operand(&ctxt->memop);
+ }
+ goto mem_common;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 0a00e4ecf87c..5e00b5a58f6a 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2227,6 +2227,7 @@ void blk_start_request(struct request *req)
+ if (unlikely(blk_bidi_rq(req)))
+ req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+
++ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+ blk_add_timer(req);
+ }
+ EXPORT_SYMBOL(blk_start_request);
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index c50ecf0ea3b1..53309333c2f0 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -144,6 +144,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
+ lim->discard_zeroes_data = 1;
+ lim->max_segments = USHRT_MAX;
+ lim->max_hw_sectors = UINT_MAX;
++ lim->max_segment_size = UINT_MAX;
+ lim->max_sectors = UINT_MAX;
+ lim->max_write_same_sectors = UINT_MAX;
+ }
+diff --git a/block/blk-timeout.c b/block/blk-timeout.c
+index 65f103563969..655ba909cd6a 100644
+--- a/block/blk-timeout.c
++++ b/block/blk-timeout.c
+@@ -91,8 +91,8 @@ static void blk_rq_timed_out(struct request *req)
+ __blk_complete_request(req);
+ break;
+ case BLK_EH_RESET_TIMER:
+- blk_clear_rq_complete(req);
+ blk_add_timer(req);
++ blk_clear_rq_complete(req);
+ break;
+ case BLK_EH_NOT_HANDLED:
+ /*
+@@ -174,7 +174,6 @@ void blk_add_timer(struct request *req)
+ return;
+
+ BUG_ON(!list_empty(&req->timeout_list));
+- BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+
+ /*
+ * Some LLDs, like scsi, peek at the timeout to prevent a
+diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
+index c0bb3778f1ae..666f1962a160 100644
+--- a/crypto/ansi_cprng.c
++++ b/crypto/ansi_cprng.c
+@@ -230,11 +230,11 @@ remainder:
+ */
+ if (byte_count < DEFAULT_BLK_SZ) {
+ empty_rbuf:
+- for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
+- ctx->rand_data_valid++) {
++ while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
+ *ptr = ctx->rand_data[ctx->rand_data_valid];
+ ptr++;
+ byte_count--;
++ ctx->rand_data_valid++;
+ if (byte_count == 0)
+ goto done;
+ }
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index a06d98374705..15986f32009e 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -175,9 +175,10 @@ static void start_transaction(struct acpi_ec *ec)
+ static void advance_transaction(struct acpi_ec *ec, u8 status)
+ {
+ unsigned long flags;
+- struct transaction *t = ec->curr;
++ struct transaction *t;
+
+ spin_lock_irqsave(&ec->lock, flags);
++ t = ec->curr;
+ if (!t)
+ goto unlock;
+ if (t->wlen > t->wi) {
+diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
+index d3874f425653..d7e53ea53d6c 100644
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -608,9 +608,12 @@ static void handle_root_bridge_removal(struct acpi_device *device)
+ ej_event->device = device;
+ ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+
++ get_device(&device->dev);
+ status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
+- if (ACPI_FAILURE(status))
++ if (ACPI_FAILURE(status)) {
++ put_device(&device->dev);
+ kfree(ej_event);
++ }
+ }
+
+ static void _handle_hotplug_event_root(struct work_struct *work)
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index f98dd00b51a9..c7414a545a4f 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -119,17 +119,10 @@ static struct dmi_system_id processor_power_dmi_table[] = {
+ */
+ static void acpi_safe_halt(void)
+ {
+- current_thread_info()->status &= ~TS_POLLING;
+- /*
+- * TS_POLLING-cleared state must be visible before we
+- * test NEED_RESCHED:
+- */
+- smp_mb();
+- if (!need_resched()) {
++ if (!tif_need_resched()) {
+ safe_halt();
+ local_irq_disable();
+ }
+- current_thread_info()->status |= TS_POLLING;
+ }
+
+ #ifdef ARCH_APICTIMER_STOPS_ON_C3
+@@ -737,6 +730,11 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
+ if (unlikely(!pr))
+ return -EINVAL;
+
++ if (cx->entry_method == ACPI_CSTATE_FFH) {
++ if (current_set_polling_and_test())
++ return -EINVAL;
++ }
++
+ lapic_timer_state_broadcast(pr, cx, 1);
+ acpi_idle_do_entry(cx);
+
+@@ -790,18 +788,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
+ if (unlikely(!pr))
+ return -EINVAL;
+
+- if (cx->entry_method != ACPI_CSTATE_FFH) {
+- current_thread_info()->status &= ~TS_POLLING;
+- /*
+- * TS_POLLING-cleared state must be visible before we test
+- * NEED_RESCHED:
+- */
+- smp_mb();
+-
+- if (unlikely(need_resched())) {
+- current_thread_info()->status |= TS_POLLING;
++ if (cx->entry_method == ACPI_CSTATE_FFH) {
++ if (current_set_polling_and_test())
+ return -EINVAL;
+- }
+ }
+
+ /*
+@@ -819,9 +808,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
+
+ sched_clock_idle_wakeup_event(0);
+
+- if (cx->entry_method != ACPI_CSTATE_FFH)
+- current_thread_info()->status |= TS_POLLING;
+-
+ lapic_timer_state_broadcast(pr, cx, 0);
+ return index;
+ }
+@@ -858,18 +844,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
+ }
+ }
+
+- if (cx->entry_method != ACPI_CSTATE_FFH) {
+- current_thread_info()->status &= ~TS_POLLING;
+- /*
+- * TS_POLLING-cleared state must be visible before we test
+- * NEED_RESCHED:
+- */
+- smp_mb();
+-
+- if (unlikely(need_resched())) {
+- current_thread_info()->status |= TS_POLLING;
++ if (cx->entry_method == ACPI_CSTATE_FFH) {
++ if (current_set_polling_and_test())
+ return -EINVAL;
+- }
+ }
+
+ acpi_unlazy_tlb(smp_processor_id());
+@@ -915,9 +892,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
+
+ sched_clock_idle_wakeup_event(0);
+
+- if (cx->entry_method != ACPI_CSTATE_FFH)
+- current_thread_info()->status |= TS_POLLING;
+-
+ lapic_timer_state_broadcast(pr, cx, 0);
+ return index;
+ }
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index fee8a297c7d9..3601738ef6f4 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -331,8 +331,6 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
+ goto out;
+ }
+ }
+- acpi_evaluate_hotplug_ost(handle, ost_source,
+- ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
+ error = acpi_bus_scan(handle);
+ if (error) {
+ acpi_handle_warn(handle, "Namespace scan failure\n");
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index aebcf6355df4..f193285968f8 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -832,7 +832,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
+ for (i = 2; i < br->count; i++)
+ if (level_old == br->levels[i])
+ break;
+- if (i == br->count)
++ if (i == br->count || !level)
+ level = max_level;
+ }
+
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 9bf4371755f2..d91f1a56e861 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -545,7 +545,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
+
+ mutex_lock(&brd_devices_mutex);
+ brd = brd_init_one(MINOR(dev) >> part_shift);
+- kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
++ kobj = brd ? get_disk(brd->brd_disk) : NULL;
+ mutex_unlock(&brd_devices_mutex);
+
+ *part = 0;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 40e715531aa6..2f036ca4b6ee 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1741,7 +1741,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ if (err < 0)
+ err = loop_add(&lo, MINOR(dev) >> part_shift);
+ if (err < 0)
+- kobj = ERR_PTR(err);
++ kobj = NULL;
+ else
+ kobj = get_disk(lo->lo_disk);
+ mutex_unlock(&loop_index_mutex);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index f3dfc0a88fdc..d593c99121c3 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1628,7 +1628,6 @@ static struct usb_driver btusb_driver = {
+ #ifdef CONFIG_PM
+ .suspend = btusb_suspend,
+ .resume = btusb_resume,
+- .reset_resume = btusb_resume,
+ #endif
+ .id_table = btusb_table,
+ .supports_autosuspend = 1,
+diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+index e5be3ee7f172..71b4283f7fad 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
++++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+@@ -587,6 +587,7 @@ nvc1_grctx_init_unk58xx[] = {
+ { 0x405870, 4, 0x04, 0x00000001 },
+ { 0x405a00, 2, 0x04, 0x00000000 },
+ { 0x405a18, 1, 0x04, 0x00000000 },
++ {}
+ };
+
+ static struct nvc0_graph_init
+@@ -598,6 +599,7 @@ nvc1_grctx_init_rop[] = {
+ { 0x408904, 1, 0x04, 0x62000001 },
+ { 0x408908, 1, 0x04, 0x00c80929 },
+ { 0x408980, 1, 0x04, 0x0000011d },
++ {}
+ };
+
+ static struct nvc0_graph_init
+@@ -671,6 +673,7 @@ nvc1_grctx_init_gpc_0[] = {
+ { 0x419000, 1, 0x04, 0x00000780 },
+ { 0x419004, 2, 0x04, 0x00000000 },
+ { 0x419014, 1, 0x04, 0x00000004 },
++ {}
+ };
+
+ static struct nvc0_graph_init
+@@ -717,6 +720,7 @@ nvc1_grctx_init_tpc[] = {
+ { 0x419e98, 1, 0x04, 0x00000000 },
+ { 0x419ee0, 1, 0x04, 0x00011110 },
+ { 0x419f30, 11, 0x04, 0x00000000 },
++ {}
+ };
+
+ void
+diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+index 438e78410808..c4740d528532 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
++++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+@@ -258,6 +258,7 @@ nvd7_grctx_init_hub[] = {
+ nvc0_grctx_init_unk78xx,
+ nvc0_grctx_init_unk80xx,
+ nvd9_grctx_init_rop,
++ NULL
+ };
+
+ struct nvc0_graph_init *
+diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+index 818a4751df46..a1102cbf2fdc 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
++++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+@@ -466,6 +466,7 @@ nvd9_grctx_init_hub[] = {
+ nvc0_grctx_init_unk78xx,
+ nvc0_grctx_init_unk80xx,
+ nvd9_grctx_init_rop,
++ NULL
+ };
+
+ struct nvc0_graph_init *
+diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
+index ca498d151a76..5240690b96c3 100644
+--- a/drivers/gpu/drm/shmobile/Kconfig
++++ b/drivers/gpu/drm/shmobile/Kconfig
+@@ -1,6 +1,7 @@
+ config DRM_SHMOBILE
+ tristate "DRM Support for SH Mobile"
+ depends on DRM && (ARM || SUPERH)
++ select BACKLIGHT_CLASS_DEVICE
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index bbff5f200bef..fa920469bf10 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -203,7 +203,8 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
+ struct vmbus_channel *primary_channel;
+ struct vmbus_channel_relid_released msg;
+
+- vmbus_device_unregister(channel->device_obj);
++ if (channel->device_obj)
++ vmbus_device_unregister(channel->device_obj);
+ memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
+ msg.child_relid = channel->offermsg.child_relid;
+ msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
+@@ -216,7 +217,7 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
+ } else {
+ primary_channel = channel->primary_channel;
+ spin_lock_irqsave(&primary_channel->sc_lock, flags);
+- list_del(&channel->listentry);
++ list_del(&channel->sc_list);
+ spin_unlock_irqrestore(&primary_channel->sc_lock, flags);
+ }
+ free_channel(channel);
+diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
+index cdff74282955..14e36c114d61 100644
+--- a/drivers/hwmon/lm90.c
++++ b/drivers/hwmon/lm90.c
+@@ -278,7 +278,7 @@ static const struct lm90_params lm90_params[] = {
+ [max6696] = {
+ .flags = LM90_HAVE_EMERGENCY
+ | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
+- .alert_alarms = 0x187c,
++ .alert_alarms = 0x1c7c,
+ .max_convrate = 6,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ },
+@@ -1500,19 +1500,22 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
+ if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
+ dev_info(&client->dev, "Everything OK\n");
+ } else {
+- if (alarms & 0x61)
++ if ((alarms & 0x61) || (alarms2 & 0x80))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 1);
+- if (alarms & 0x1a)
++ if ((alarms & 0x1a) || (alarms2 & 0x20))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 2);
+ if (alarms & 0x04)
+ dev_warn(&client->dev,
+ "temp%d diode open, please check!\n", 2);
+
+- if (alarms2 & 0x18)
++ if (alarms2 & 0x5a)
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 3);
++ if (alarms2 & 0x04)
++ dev_warn(&client->dev,
++ "temp%d diode open, please check!\n", 3);
+
+ /*
+ * Disable ALERT# output, because these chips don't implement
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index fa6964d8681a..f116d664b473 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -359,7 +359,7 @@ static int intel_idle(struct cpuidle_device *dev,
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+- if (!need_resched()) {
++ if (!current_set_polling_and_test()) {
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
+index 08e70232062f..9188ef5d677e 100644
+--- a/drivers/memstick/core/ms_block.c
++++ b/drivers/memstick/core/ms_block.c
+@@ -401,7 +401,7 @@ again:
+ sizeof(struct ms_status_register)))
+ return 0;
+
+- msb->state = MSB_RP_RECEIVE_OOB_READ;
++ msb->state = MSB_RP_RECIVE_STATUS_REG;
+ return 0;
+
+ case MSB_RP_RECIVE_STATUS_REG:
+diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
+index 2fc0586ce3bb..9cbd0370ca44 100644
+--- a/drivers/misc/lkdtm.c
++++ b/drivers/misc/lkdtm.c
+@@ -297,6 +297,14 @@ static void do_nothing(void)
+ return;
+ }
+
++static noinline void corrupt_stack(void)
++{
++ /* Use default char array length that triggers stack protection. */
++ char data[8];
++
++ memset((void *)data, 0, 64);
++}
++
+ static void execute_location(void *dst)
+ {
+ void (*func)(void) = dst;
+@@ -327,13 +335,9 @@ static void lkdtm_do_action(enum ctype which)
+ case CT_OVERFLOW:
+ (void) recursive_loop(0);
+ break;
+- case CT_CORRUPT_STACK: {
+- /* Make sure the compiler creates and uses an 8 char array. */
+- volatile char data[8];
+-
+- memset((void *)data, 0, 64);
++ case CT_CORRUPT_STACK:
++ corrupt_stack();
+ break;
+- }
+ case CT_UNALIGNED_LOAD_STORE_WRITE: {
+ static u8 data[5] __attribute__((aligned(4))) = {1, 2,
+ 3, 4, 5};
+diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
+index d0c6907dfd92..994ca4aff1a3 100644
+--- a/drivers/misc/mei/nfc.c
++++ b/drivers/misc/mei/nfc.c
+@@ -485,8 +485,11 @@ int mei_nfc_host_init(struct mei_device *dev)
+ if (ndev->cl_info)
+ return 0;
+
+- cl_info = mei_cl_allocate(dev);
+- cl = mei_cl_allocate(dev);
++ ndev->cl_info = mei_cl_allocate(dev);
++ ndev->cl = mei_cl_allocate(dev);
++
++ cl = ndev->cl;
++ cl_info = ndev->cl_info;
+
+ if (!cl || !cl_info) {
+ ret = -ENOMEM;
+@@ -527,10 +530,9 @@ int mei_nfc_host_init(struct mei_device *dev)
+
+ cl->device_uuid = mei_nfc_guid;
+
++
+ list_add_tail(&cl->device_link, &dev->device_list);
+
+- ndev->cl_info = cl_info;
+- ndev->cl = cl;
+ ndev->req_id = 1;
+
+ INIT_WORK(&ndev->init_work, mei_nfc_init);
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index a668cd491cb3..e3fc07cf2f62 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -814,9 +814,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
+ msg_ctrl_save = priv->read_reg(priv,
+ C_CAN_IFACE(MSGCTRL_REG, 0));
+
+- if (msg_ctrl_save & IF_MCONT_EOB)
+- return num_rx_pkts;
+-
+ if (msg_ctrl_save & IF_MCONT_MSGLST) {
+ c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+ num_rx_pkts++;
+@@ -824,6 +821,9 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
+ continue;
+ }
+
++ if (msg_ctrl_save & IF_MCONT_EOB)
++ return num_rx_pkts;
++
+ if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+ continue;
+
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 3b9546588240..4b2d5ed62b11 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -1544,9 +1544,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
+ return 0;
+ }
+
+-static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
+- struct usb_endpoint_descriptor **in,
+- struct usb_endpoint_descriptor **out)
++static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
++ struct usb_endpoint_descriptor **in,
++ struct usb_endpoint_descriptor **out)
+ {
+ const struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+@@ -1557,12 +1557,18 @@ static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+- if (usb_endpoint_is_bulk_in(endpoint))
++ if (!*in && usb_endpoint_is_bulk_in(endpoint))
+ *in = endpoint;
+
+- if (usb_endpoint_is_bulk_out(endpoint))
++ if (!*out && usb_endpoint_is_bulk_out(endpoint))
+ *out = endpoint;
++
++ /* use first bulk endpoint for in and out */
++ if (*in && *out)
++ return 0;
+ }
++
++ return -ENODEV;
+ }
+
+ static int kvaser_usb_probe(struct usb_interface *intf,
+@@ -1576,8 +1582,8 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ if (!dev)
+ return -ENOMEM;
+
+- kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
+- if (!dev->bulk_in || !dev->bulk_out) {
++ err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
++ if (err) {
+ dev_err(&intf->dev, "Cannot get usb endpoint(s)");
+ return err;
+ }
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index fc95b235e210..6305a5d29db2 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -1389,6 +1389,9 @@ static int alx_resume(struct device *dev)
+ {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct alx_priv *alx = pci_get_drvdata(pdev);
++ struct alx_hw *hw = &alx->hw;
++
++ alx_reset_phy(hw);
+
+ if (!netif_running(alx->dev))
+ return 0;
+diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
+index 668dd27616a0..cc6a0a586f0b 100644
+--- a/drivers/net/wireless/libertas/debugfs.c
++++ b/drivers/net/wireless/libertas/debugfs.c
+@@ -913,7 +913,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
+ char *p2;
+ struct debug_data *d = f->private_data;
+
+- pdata = kmalloc(cnt, GFP_KERNEL);
++ if (cnt == 0)
++ return 0;
++
++ pdata = kmalloc(cnt + 1, GFP_KERNEL);
+ if (pdata == NULL)
+ return 0;
+
+@@ -922,6 +925,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
+ kfree(pdata);
+ return 0;
+ }
++ pdata[cnt] = '\0';
+
+ p0 = pdata;
+ for (i = 0; i < num_of_items; i++) {
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 88ce656f96cd..14007870302b 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -4461,10 +4461,13 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
+
+ vgc = rt2800_get_default_vgc(rt2x00dev);
+
+- if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
+- vgc += 0x20;
+- else if (qual->rssi > -80)
+- vgc += 0x10;
++ if (rt2x00_rt(rt2x00dev, RT5592)) {
++ if (qual->rssi > -65)
++ vgc += 0x20;
++ } else {
++ if (qual->rssi > -80)
++ vgc += 0x10;
++ }
+
+ rt2800_set_vgc(rt2x00dev, qual, vgc);
+ }
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 96961b9a395c..4feb35aef990 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -148,6 +148,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
+ return false;
+ }
+
++#define TXSTATUS_READ_INTERVAL 1000000
++
+ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
+ int urb_status, u32 tx_status)
+ {
+@@ -176,8 +178,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
+
+ if (rt2800usb_txstatus_pending(rt2x00dev)) {
+- /* Read register after 250 us */
+- hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
++ /* Read register after 1 ms */
++ hrtimer_start(&rt2x00dev->txstatus_timer,
++ ktime_set(0, TXSTATUS_READ_INTERVAL),
+ HRTIMER_MODE_REL);
+ return false;
+ }
+@@ -202,8 +205,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
+ if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
+ return;
+
+- /* Read TX_STA_FIFO register after 500 us */
+- hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
++ /* Read TX_STA_FIFO register after 2 ms */
++ hrtimer_start(&rt2x00dev->txstatus_timer,
++ ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
+ HRTIMER_MODE_REL);
+ }
+
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index 712eea9d398f..f12e909cbb48 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -181,6 +181,7 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
+ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+ {
++ struct ieee80211_tx_control control = {};
+ struct rt2x00_dev *rt2x00dev = data;
+ struct sk_buff *skb;
+
+@@ -195,7 +196,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
+ */
+ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
+ while (skb) {
+- rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
++ rt2x00mac_tx(rt2x00dev->hw, &control, skb);
+ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
+ }
+ }
+diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
+index a0935987fa3a..7f40ab8e1bd8 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
++++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
+@@ -146,7 +146,7 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length);
+ * @local: frame is not from mac80211
+ */
+ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+- bool local);
++ struct ieee80211_sta *sta, bool local);
+
+ /**
+ * rt2x00queue_update_beacon - Send new beacon from mac80211
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index f883802f3505..f8cff1f0b6b7 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -90,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
+ frag_skb->data, data_length, tx_info,
+ (struct ieee80211_rts *)(skb->data));
+
+- retval = rt2x00queue_write_tx_frame(queue, skb, true);
++ retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
+ if (retval) {
+ dev_kfree_skb_any(skb);
+ rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
+@@ -151,7 +151,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
+ goto exit_fail;
+ }
+
+- if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
++ if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
+ goto exit_fail;
+
+ /*
+@@ -754,6 +754,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct data_queue *queue;
+
++ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
++ return;
++
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_flush_queue(queue, drop);
+ }
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index 6c8a33b6ee22..66a2db8c260d 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -635,7 +635,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry)
+ }
+
+ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+- bool local)
++ struct ieee80211_sta *sta, bool local)
+ {
+ struct ieee80211_tx_info *tx_info;
+ struct queue_entry *entry;
+@@ -649,7 +649,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+ * after that we are free to use the skb->cb array
+ * for our information.
+ */
+- rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
++ rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
+
+ /*
+ * All information is retrieved from the skb->cb array,
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 03ca6c139f1a..4e86e9767ba6 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -6420,7 +6420,12 @@ static struct ibm_struct brightness_driver_data = {
+ #define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
+ #define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
+
+-static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */
++#if SNDRV_CARDS <= 32
++#define DEFAULT_ALSA_IDX ~((1 << (SNDRV_CARDS - 3)) - 1)
++#else
++#define DEFAULT_ALSA_IDX ~((1 << (32 - 3)) - 1)
++#endif
++static int alsa_index = DEFAULT_ALSA_IDX; /* last three slots */
+ static char *alsa_id = "ThinkPadEC";
+ static bool alsa_enable = SNDRV_DEFAULT_ENABLE1;
+
+diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
+index d85ac1a9d2c0..fbcd48d0bfc3 100644
+--- a/drivers/scsi/aacraid/commctrl.c
++++ b/drivers/scsi/aacraid/commctrl.c
+@@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+ goto cleanup;
+ }
+
+- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
++ if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
++ (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index 8e76ddca0999..5a5e9c915c25 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -706,7 +706,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ .gfp_mask = GFP_KERNEL,
+ .nr_to_scan = LONG_MAX,
+ };
+-
++ ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
+ nodes_setall(sc.nodes_to_scan);
+ ashmem_shrink_scan(&ashmem_shrinker, &sc);
+ }
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 1636c7ca57e2..a3af4699eb4d 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -543,7 +543,7 @@ void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size)
+ {
+ s->private = kzalloc(size, GFP_KERNEL);
+ if (s->private)
+- comedi_set_subdevice_runflags(s, ~0, SRF_FREE_SPRIV);
++ s->runflags |= SRF_FREE_SPRIV;
+ return s->private;
+ }
+ EXPORT_SYMBOL_GPL(comedi_alloc_spriv);
+@@ -1485,7 +1485,8 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ if (async->cmd.flags & TRIG_WAKE_EOS)
+ async->cb_mask |= COMEDI_CB_EOS;
+
+- comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
++ comedi_set_subdevice_runflags(s, SRF_USER | SRF_ERROR | SRF_RUNNING,
++ SRF_USER | SRF_RUNNING);
+
+ /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
+ * comedi_read() or comedi_write() */
+diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+index 63bc913eba6d..8b2b4a8d1f08 100644
+--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
++++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+@@ -707,6 +707,10 @@ int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
+ return 0;
+ }
+
++static const struct device_type wlan_type = {
++ .name = "wlan",
++};
++
+ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
+ {
+ struct adapter *padapter;
+@@ -722,6 +726,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
+ if (!pnetdev)
+ return NULL;
+
++ pnetdev->dev.type = &wlan_type;
+ padapter = rtw_netdev_priv(pnetdev);
+ padapter->pnetdev = pnetdev;
+ DBG_88E("register rtw_netdev_ops to netdev_ops\n");
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 2c4ed52ca849..012ba15ec490 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -648,6 +648,9 @@ static ssize_t reset_store(struct device *dev,
+ zram = dev_to_zram(dev);
+ bdev = bdget_disk(zram->disk, 0);
+
++ if (!bdev)
++ return -ENOMEM;
++
+ /* Do not reset an active device! */
+ if (bdev->bd_holders)
+ return -EBUSY;
+@@ -660,8 +663,7 @@ static ssize_t reset_store(struct device *dev,
+ return -EINVAL;
+
+ /* Make sure all pending I/O is finished */
+- if (bdev)
+- fsync_bdev(bdev);
++ fsync_bdev(bdev);
+
+ zram_reset_device(zram, true);
+ return len;
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index f7841d44feda..689433cdef25 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1790,6 +1790,9 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int ret = -EPERM;
+
++ if (enable && !udev->usb2_hw_lpm_allowed)
++ return 0;
++
+ if (hcd->driver->set_usb2_hw_lpm) {
+ ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
+ if (!ret)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 879651cb6b45..243c6729c320 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1135,6 +1135,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_ENABLE);
+ }
++ if (portchange & USB_PORT_STAT_C_RESET) {
++ need_debounce_delay = true;
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_RESET);
++ }
+ if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
+ hub_is_superspeed(hub->hdev)) {
+ need_debounce_delay = true;
+@@ -3954,6 +3959,32 @@ static int hub_set_address(struct usb_device *udev, int devnum)
+ return retval;
+ }
+
++/*
++ * There are reports of USB 3.0 devices that say they support USB 2.0 Link PM
++ * when they're plugged into a USB 2.0 port, but they don't work when LPM is
++ * enabled.
++ *
++ * Only enable USB 2.0 Link PM if the port is internal (hardwired), or the
++ * device says it supports the new USB 2.0 Link PM errata by setting the BESL
++ * support bit in the BOS descriptor.
++ */
++static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
++{
++ int connect_type;
++
++ if (!udev->usb2_hw_lpm_capable)
++ return;
++
++ connect_type = usb_get_hub_port_connect_type(udev->parent,
++ udev->portnum);
++
++ if ((udev->bos->ext_cap->bmAttributes & USB_BESL_SUPPORT) ||
++ connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
++ udev->usb2_hw_lpm_allowed = 1;
++ usb_set_usb2_hardware_lpm(udev, 1);
++ }
++}
++
+ /* Reset device, (re)assign address, get device descriptor.
+ * Device connection must be stable, no more debouncing needed.
+ * Returns device in USB_STATE_ADDRESS, except on error.
+@@ -4247,6 +4278,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ /* notify HCD that we have a device connected and addressed */
+ if (hcd->driver->update_device)
+ hcd->driver->update_device(hcd, udev);
++ hub_set_initial_usb2_lpm_policy(udev);
+ fail:
+ if (retval) {
+ hub_port_disable(hub, port1, 0);
+@@ -5091,6 +5123,12 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ }
+ parent_hub = usb_hub_to_struct_hub(parent_hdev);
+
++ /* Disable USB2 hardware LPM.
++ * It will be re-enabled by the enumeration process.
++ */
++ if (udev->usb2_hw_lpm_enabled == 1)
++ usb_set_usb2_hardware_lpm(udev, 0);
++
+ bos = udev->bos;
+ udev->bos = NULL;
+
+@@ -5198,6 +5236,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+
+ done:
+ /* Now that the alt settings are re-installed, enable LTM and LPM. */
++ usb_set_usb2_hardware_lpm(udev, 1);
+ usb_unlocked_enable_lpm(udev);
+ usb_enable_ltm(udev);
+ usb_release_bos_descriptor(udev);
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 6d2c8edb1ffe..ca516ac0f234 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -449,7 +449,7 @@ static ssize_t usb2_hardware_lpm_show(struct device *dev,
+ struct usb_device *udev = to_usb_device(dev);
+ const char *p;
+
+- if (udev->usb2_hw_lpm_enabled == 1)
++ if (udev->usb2_hw_lpm_allowed == 1)
+ p = "enabled";
+ else
+ p = "disabled";
+@@ -469,8 +469,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
+
+ ret = strtobool(buf, &value);
+
+- if (!ret)
++ if (!ret) {
++ udev->usb2_hw_lpm_allowed = value;
+ ret = usb_set_usb2_hardware_lpm(udev, value);
++ }
+
+ usb_unlock_device(udev);
+
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 83bcd13622c3..49b8bd063fab 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1693,9 +1693,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
+ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ {
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+- struct dev_info *dev_info, *next;
+ struct xhci_cd *cur_cd, *next_cd;
+- unsigned long flags;
+ int size;
+ int i, j, num_ports;
+
+@@ -1756,13 +1754,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+
+ scratchpad_free(xhci);
+
+- spin_lock_irqsave(&xhci->lock, flags);
+- list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
+- list_del(&dev_info->list);
+- kfree(dev_info);
+- }
+- spin_unlock_irqrestore(&xhci->lock, flags);
+-
+ if (!xhci->rh_bw)
+ goto no_bw;
+
+@@ -2231,7 +2222,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ u32 page_size, temp;
+ int i;
+
+- INIT_LIST_HEAD(&xhci->lpm_failed_devs);
+ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+
+ page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 6e0d886bcce5..ed6c186a5393 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4025,133 +4025,6 @@ static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
+ return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
+ }
+
+-static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
+- struct usb_device *udev)
+-{
+- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+- struct dev_info *dev_info;
+- __le32 __iomem **port_array;
+- __le32 __iomem *addr, *pm_addr;
+- u32 temp, dev_id;
+- unsigned int port_num;
+- unsigned long flags;
+- int hird;
+- int ret;
+-
+- if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
+- !udev->lpm_capable)
+- return -EINVAL;
+-
+- /* we only support lpm for non-hub device connected to root hub yet */
+- if (!udev->parent || udev->parent->parent ||
+- udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+- return -EINVAL;
+-
+- spin_lock_irqsave(&xhci->lock, flags);
+-
+- /* Look for devices in lpm_failed_devs list */
+- dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
+- le16_to_cpu(udev->descriptor.idProduct);
+- list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
+- if (dev_info->dev_id == dev_id) {
+- ret = -EINVAL;
+- goto finish;
+- }
+- }
+-
+- port_array = xhci->usb2_ports;
+- port_num = udev->portnum - 1;
+-
+- if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
+- xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
+- ret = -EINVAL;
+- goto finish;
+- }
+-
+- /*
+- * Test USB 2.0 software LPM.
+- * FIXME: some xHCI 1.0 hosts may implement a new register to set up
+- * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
+- * in the June 2011 errata release.
+- */
+- xhci_dbg(xhci, "test port %d software LPM\n", port_num);
+- /*
+- * Set L1 Device Slot and HIRD/BESL.
+- * Check device's USB 2.0 extension descriptor to determine whether
+- * HIRD or BESL shoule be used. See USB2.0 LPM errata.
+- */
+- pm_addr = port_array[port_num] + PORTPMSC;
+- hird = xhci_calculate_hird_besl(xhci, udev);
+- temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
+- xhci_writel(xhci, temp, pm_addr);
+-
+- /* Set port link state to U2(L1) */
+- addr = port_array[port_num];
+- xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
+-
+- /* wait for ACK */
+- spin_unlock_irqrestore(&xhci->lock, flags);
+- msleep(10);
+- spin_lock_irqsave(&xhci->lock, flags);
+-
+- /* Check L1 Status */
+- ret = xhci_handshake(xhci, pm_addr,
+- PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
+- if (ret != -ETIMEDOUT) {
+- /* enter L1 successfully */
+- temp = xhci_readl(xhci, addr);
+- xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
+- port_num, temp);
+- ret = 0;
+- } else {
+- temp = xhci_readl(xhci, pm_addr);
+- xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
+- port_num, temp & PORT_L1S_MASK);
+- ret = -EINVAL;
+- }
+-
+- /* Resume the port */
+- xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
+-
+- spin_unlock_irqrestore(&xhci->lock, flags);
+- msleep(10);
+- spin_lock_irqsave(&xhci->lock, flags);
+-
+- /* Clear PLC */
+- xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
+-
+- /* Check PORTSC to make sure the device is in the right state */
+- if (!ret) {
+- temp = xhci_readl(xhci, addr);
+- xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
+- if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
+- (temp & PORT_PLS_MASK) != XDEV_U0) {
+- xhci_dbg(xhci, "port L1 resume fail\n");
+- ret = -EINVAL;
+- }
+- }
+-
+- if (ret) {
+- /* Insert dev to lpm_failed_devs list */
+- xhci_warn(xhci, "device LPM test failed, may disconnect and "
+- "re-enumerate\n");
+- dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
+- if (!dev_info) {
+- ret = -ENOMEM;
+- goto finish;
+- }
+- dev_info->dev_id = dev_id;
+- INIT_LIST_HEAD(&dev_info->list);
+- list_add(&dev_info->list, &xhci->lpm_failed_devs);
+- } else {
+- xhci_ring_device(xhci, udev->slot_id);
+- }
+-
+-finish:
+- spin_unlock_irqrestore(&xhci->lock, flags);
+- return ret;
+-}
+-
+ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ struct usb_device *udev, int enable)
+ {
+@@ -4228,7 +4101,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ }
+
+ pm_val &= ~PORT_HIRD_MASK;
+- pm_val |= PORT_HIRD(hird) | PORT_RWE;
++ pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
+ xhci_writel(xhci, pm_val, pm_addr);
+ pm_val = xhci_readl(xhci, pm_addr);
+ pm_val |= PORT_HLE;
+@@ -4236,7 +4109,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ /* flush write */
+ xhci_readl(xhci, pm_addr);
+ } else {
+- pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
++ pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
+ xhci_writel(xhci, pm_val, pm_addr);
+ /* flush write */
+ xhci_readl(xhci, pm_addr);
+@@ -4279,24 +4152,26 @@ static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
+ int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+- int ret;
+ int portnum = udev->portnum - 1;
+
+- ret = xhci_usb2_software_lpm_test(hcd, udev);
+- if (!ret) {
+- xhci_dbg(xhci, "software LPM test succeed\n");
+- if (xhci->hw_lpm_support == 1 &&
+- xhci_check_usb2_port_capability(xhci, portnum, XHCI_HLC)) {
+- udev->usb2_hw_lpm_capable = 1;
+- udev->l1_params.timeout = XHCI_L1_TIMEOUT;
+- udev->l1_params.besl = XHCI_DEFAULT_BESL;
+- if (xhci_check_usb2_port_capability(xhci, portnum,
+- XHCI_BLC))
+- udev->usb2_hw_lpm_besl_capable = 1;
+- ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
+- if (!ret)
+- udev->usb2_hw_lpm_enabled = 1;
+- }
++ if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
++ !udev->lpm_capable)
++ return 0;
++
++ /* we only support lpm for non-hub device connected to root hub yet */
++ if (!udev->parent || udev->parent->parent ||
++ udev->descriptor.bDeviceClass == USB_CLASS_HUB)
++ return 0;
++
++ if (xhci->hw_lpm_support == 1 &&
++ xhci_check_usb2_port_capability(
++ xhci, portnum, XHCI_HLC)) {
++ udev->usb2_hw_lpm_capable = 1;
++ udev->l1_params.timeout = XHCI_L1_TIMEOUT;
++ udev->l1_params.besl = XHCI_DEFAULT_BESL;
++ if (xhci_check_usb2_port_capability(xhci, portnum,
++ XHCI_BLC))
++ udev->usb2_hw_lpm_besl_capable = 1;
+ }
+
+ return 0;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 941d5f59e4dc..ed3a425de8ce 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -383,6 +383,7 @@ struct xhci_op_regs {
+ #define PORT_RWE (1 << 3)
+ #define PORT_HIRD(p) (((p) & 0xf) << 4)
+ #define PORT_HIRD_MASK (0xf << 4)
++#define PORT_L1DS_MASK (0xff << 8)
+ #define PORT_L1DS(p) (((p) & 0xff) << 8)
+ #define PORT_HLE (1 << 16)
+
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index cd70cc886171..0d0d11880968 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1809,6 +1809,7 @@ static void musb_free(struct musb *musb)
+ disable_irq_wake(musb->nIrq);
+ free_irq(musb->nIrq, musb);
+ }
++ cancel_work_sync(&musb->irq_work);
+ if (musb->dma_controller)
+ dma_controller_destroy(musb->dma_controller);
+
+@@ -1946,6 +1947,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ if (status < 0)
+ goto fail3;
+ status = musb_gadget_setup(musb);
++ if (status)
++ musb_host_cleanup(musb);
+ break;
+ default:
+ dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
+@@ -1972,6 +1975,7 @@ fail5:
+
+ fail4:
+ musb_gadget_cleanup(musb);
++ musb_host_cleanup(musb);
+
+ fail3:
+ if (musb->dma_controller)
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index bd4138d80a48..1edee7906b73 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -121,6 +121,7 @@ struct dsps_glue {
+ unsigned long last_timer; /* last timer data for each instance */
+ };
+
++static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout);
+ /**
+ * dsps_musb_enable - enable interrupts
+ */
+@@ -143,6 +144,7 @@ static void dsps_musb_enable(struct musb *musb)
+ /* Force the DRVVBUS IRQ so we can start polling for ID change. */
+ dsps_writel(reg_base, wrp->coreintr_set,
+ (1 << wrp->drvvbus) << wrp->usb_shift);
++ dsps_musb_try_idle(musb, 0);
+ }
+
+ /**
+@@ -171,6 +173,7 @@ static void otg_timer(unsigned long _musb)
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ u8 devctl;
+ unsigned long flags;
++ int skip_session = 0;
+
+ /*
+ * We poll because DSPS IP's won't expose several OTG-critical
+@@ -183,10 +186,12 @@ static void otg_timer(unsigned long _musb)
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_WAIT_BCON:
+- devctl &= ~MUSB_DEVCTL_SESSION;
+- dsps_writeb(musb->mregs, MUSB_DEVCTL, devctl);
++ dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
++ skip_session = 1;
++ /* fall */
+
+- devctl = dsps_readb(musb->mregs, MUSB_DEVCTL);
++ case OTG_STATE_A_IDLE:
++ case OTG_STATE_B_IDLE:
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+@@ -194,20 +199,15 @@ static void otg_timer(unsigned long _musb)
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
++ if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
++ dsps_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
++ mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ dsps_writel(musb->ctrl_base, wrp->coreintr_set,
+ MUSB_INTR_VBUSERROR << wrp->usb_shift);
+ break;
+- case OTG_STATE_B_IDLE:
+- devctl = dsps_readb(mregs, MUSB_DEVCTL);
+- if (devctl & MUSB_DEVCTL_BDEVICE)
+- mod_timer(&glue->timer,
+- jiffies + wrp->poll_seconds * HZ);
+- else
+- musb->xceiv->state = OTG_STATE_A_IDLE;
+- break;
+ default:
+ break;
+ }
+@@ -234,6 +234,9 @@ static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
+ if (musb->port_mode == MUSB_PORT_MODE_HOST)
+ return;
+
++ if (!musb->g.dev.driver)
++ return;
++
+ if (time_after(glue->last_timer, timeout) &&
+ timer_pending(&glue->timer)) {
+ dev_dbg(musb->controller,
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index d1d6b83aabca..9af6bba5eac9 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -220,6 +220,23 @@ int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
+ return retval;
+ }
+
++static int musb_has_gadget(struct musb *musb)
++{
++ /*
++ * In host-only mode we start a connection right away. In OTG mode
++ * we have to wait until we loaded a gadget. We don't really need a
++ * gadget if we operate as a host but we should not start a session
++ * as a device without a gadget or else we explode.
++ */
++#ifdef CONFIG_USB_MUSB_HOST
++ return 1;
++#else
++ if (musb->port_mode == MUSB_PORT_MODE_HOST)
++ return 1;
++ return musb->g.dev.driver != NULL;
++#endif
++}
++
+ int musb_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+@@ -362,7 +379,7 @@ int musb_hub_control(
+ * initialization logic, e.g. for OTG, or change any
+ * logic relating to VBUS power-up.
+ */
+- if (!hcd->self.is_b_host)
++ if (!hcd->self.is_b_host && musb_has_gadget(musb))
+ musb_start(musb);
+ break;
+ case USB_PORT_FEAT_RESET:
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index fdf953539c62..e5bdd987b9e8 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1532,7 +1532,11 @@ static int mos7840_tiocmget(struct tty_struct *tty)
+ return -ENODEV;
+
+ status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
++ if (status != 1)
++ return -EIO;
+ status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
++ if (status != 1)
++ return -EIO;
+ result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
+ | ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
+ | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
+diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
+index fd4f1ce6256a..b5e4fc19dec0 100644
+--- a/drivers/usb/wusbcore/wa-rpipe.c
++++ b/drivers/usb/wusbcore/wa-rpipe.c
+@@ -333,7 +333,10 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
+ /* FIXME: compute so seg_size > ep->maxpktsize */
+ rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
+ /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
+- rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
++ if (usb_endpoint_xfer_isoc(&ep->desc))
++ rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize;
++ else
++ rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;
+
+ rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int,
+ epcd->bMaxBurst, 16U), 1U);
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 4a355726151e..26450d850f14 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -4481,6 +4481,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+ u64 disk_bytenr;
++ u64 new_bytenr;
+ LIST_HEAD(list);
+
+ ordered = btrfs_lookup_ordered_extent(inode, file_pos);
+@@ -4492,13 +4493,24 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
+ if (ret)
+ goto out;
+
+- disk_bytenr = ordered->start;
+ while (!list_empty(&list)) {
+ sums = list_entry(list.next, struct btrfs_ordered_sum, list);
+ list_del_init(&sums->list);
+
+- sums->bytenr = disk_bytenr;
+- disk_bytenr += sums->len;
++ /*
++ * We need to offset the new_bytenr based on where the csum is.
++ * We need to do this because we will read in entire prealloc
++ * extents but we may have written to say the middle of the
++ * prealloc extent, so we need to make sure the csum goes with
++ * the right disk offset.
++ *
++ * We can do this because the data reloc inode refers strictly
++ * to the on disk bytes, so we don't have to worry about
++ * disk_len vs real len like with real inodes since it's all
++ * disk length.
++ */
++ new_bytenr = ordered->start + (sums->bytenr - disk_bytenr);
++ sums->bytenr = new_bytenr;
+
+ btrfs_add_ordered_sum(inode, ordered, sums);
+ }
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 277bd1be21fd..511d41546791 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -56,10 +56,19 @@ static void configfs_d_iput(struct dentry * dentry,
+ struct configfs_dirent *sd = dentry->d_fsdata;
+
+ if (sd) {
+- BUG_ON(sd->s_dentry != dentry);
+ /* Coordinate with configfs_readdir */
+ spin_lock(&configfs_dirent_lock);
+- sd->s_dentry = NULL;
++ /* Coordinate with configfs_attach_attr where will increase
++ * sd->s_count and update sd->s_dentry to new allocated one.
++ * Only set sd->dentry to null when this dentry is the only
++ * sd owner.
++ * If not do so, configfs_d_iput may run just after
++ * configfs_attach_attr and set sd->s_dentry to null
++ * even it's still in use.
++ */
++ if (atomic_read(&sd->s_count) <= 2)
++ sd->s_dentry = NULL;
++
+ spin_unlock(&configfs_dirent_lock);
+ configfs_put(sd);
+ }
+@@ -426,8 +435,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
+ struct configfs_attribute * attr = sd->s_element;
+ int error;
+
++ spin_lock(&configfs_dirent_lock);
+ dentry->d_fsdata = configfs_get(sd);
+ sd->s_dentry = dentry;
++ spin_unlock(&configfs_dirent_lock);
++
+ error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
+ configfs_init_file);
+ if (error) {
+diff --git a/fs/dcache.c b/fs/dcache.c
+index ae6ebb88ceff..89f96719a29b 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2881,9 +2881,9 @@ static int prepend_path(const struct path *path,
+ const struct path *root,
+ char **buffer, int *buflen)
+ {
+- struct dentry *dentry = path->dentry;
+- struct vfsmount *vfsmnt = path->mnt;
+- struct mount *mnt = real_mount(vfsmnt);
++ struct dentry *dentry;
++ struct vfsmount *vfsmnt;
++ struct mount *mnt;
+ int error = 0;
+ unsigned seq = 0;
+ char *bptr;
+@@ -2893,6 +2893,9 @@ static int prepend_path(const struct path *path,
+ restart:
+ bptr = *buffer;
+ blen = *buflen;
++ dentry = path->dentry;
++ vfsmnt = path->mnt;
++ mnt = real_mount(vfsmnt);
+ read_seqbegin_or_lock(&rename_lock, &seq);
+ while (dentry != root->dentry || vfsmnt != root->mnt) {
+ struct dentry * parent;
+diff --git a/fs/exec.c b/fs/exec.c
+index 8875dd10ae7a..bb8afc1d1df4 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1668,6 +1668,12 @@ int __get_dumpable(unsigned long mm_flags)
+ return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
+ }
+
++/*
++ * This returns the actual value of the suid_dumpable flag. For things
++ * that are using this for checking for privilege transitions, it must
++ * test against SUID_DUMP_USER rather than treating it as a boolean
++ * value.
++ */
+ int get_dumpable(struct mm_struct *mm)
+ {
+ return __get_dumpable(mm->flags);
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index ced3257f06e8..968d4c56e5eb 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -584,17 +584,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ if (!IS_ERR(inode)) {
+ d = d_splice_alias(inode, dentry);
+ error = 0;
+- if (file && !IS_ERR(d)) {
+- if (d == NULL)
+- d = dentry;
+- if (S_ISREG(inode->i_mode))
+- error = finish_open(file, d, gfs2_open_common, opened);
+- else
++ if (file) {
++ if (S_ISREG(inode->i_mode)) {
++ WARN_ON(d != NULL);
++ error = finish_open(file, dentry, gfs2_open_common, opened);
++ } else {
+ error = finish_no_open(file, d);
++ }
++ } else {
++ dput(d);
+ }
+ gfs2_glock_dq_uninit(ghs);
+- if (IS_ERR(d))
+- return PTR_ERR(d);
+ return error;
+ } else if (error != -ENOENT) {
+ goto fail_gunlock;
+@@ -781,8 +781,10 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
+ error = finish_open(file, dentry, gfs2_open_common, opened);
+
+ gfs2_glock_dq_uninit(&gh);
+- if (error)
++ if (error) {
++ dput(d);
+ return ERR_PTR(error);
++ }
+ return d;
+ }
+
+@@ -1163,14 +1165,16 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
+ d = __gfs2_lookup(dir, dentry, file, opened);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+- if (d == NULL)
+- d = dentry;
+- if (d->d_inode) {
++ if (d != NULL)
++ dentry = d;
++ if (dentry->d_inode) {
+ if (!(*opened & FILE_OPENED))
+- return finish_no_open(file, d);
++ return finish_no_open(file, dentry);
++ dput(d);
+ return 0;
+ }
+
++ BUG_ON(d != NULL);
+ if (!(flags & O_CREAT))
+ return -ENOENT;
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d53d6785cba2..3b115653d422 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1318,21 +1318,14 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+ int ret;
+
+ if (!data->rpc_done) {
+- ret = data->rpc_status;
+- goto err;
++ if (data->rpc_status) {
++ ret = data->rpc_status;
++ goto err;
++ }
++ /* cached opens have already been processed */
++ goto update;
+ }
+
+- ret = -ESTALE;
+- if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) ||
+- !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) ||
+- !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE))
+- goto err;
+-
+- ret = -ENOMEM;
+- state = nfs4_get_open_state(inode, data->owner);
+- if (state == NULL)
+- goto err;
+-
+ ret = nfs_refresh_inode(inode, &data->f_attr);
+ if (ret)
+ goto err;
+@@ -1341,8 +1334,10 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+
+ if (data->o_res.delegation_type != 0)
+ nfs4_opendata_check_deleg(data, state);
++update:
+ update_open_stateid(state, &data->o_res.stateid, NULL,
+ data->o_arg.fmode);
++ atomic_inc(&state->count);
+
+ return state;
+ err:
+@@ -4575,7 +4570,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
+ struct nfs4_label label = {0, 0, buflen, buf};
+
+ u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
+- struct nfs4_getattr_arg args = {
++ struct nfs4_getattr_arg arg = {
+ .fh = NFS_FH(inode),
+ .bitmask = bitmask,
+ };
+@@ -4586,14 +4581,14 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
+- .rpc_argp = &args,
++ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int ret;
+
+ nfs_fattr_init(&fattr);
+
+- ret = rpc_call_sync(server->client, &msg, 0);
++ ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
+ if (ret)
+ return ret;
+ if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
+@@ -4630,7 +4625,7 @@ static int _nfs4_do_set_security_label(struct inode *inode,
+ struct iattr sattr = {0};
+ struct nfs_server *server = NFS_SERVER(inode);
+ const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
+- struct nfs_setattrargs args = {
++ struct nfs_setattrargs arg = {
+ .fh = NFS_FH(inode),
+ .iap = &sattr,
+ .server = server,
+@@ -4644,14 +4639,14 @@ static int _nfs4_do_set_security_label(struct inode *inode,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
+- .rpc_argp = &args,
++ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int status;
+
+- nfs4_stateid_copy(&args.stateid, &zero_stateid);
++ nfs4_stateid_copy(&arg.stateid, &zero_stateid);
+
+- status = rpc_call_sync(server->client, &msg, 0);
++ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+ if (status)
+ dprintk("%s failed: %d\n", __func__, status);
+
+@@ -5106,6 +5101,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
+ status = 0;
+ }
+ request->fl_ops->fl_release_private(request);
++ request->fl_ops = NULL;
+ out:
+ return status;
+ }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index cc14cbb78b73..ebced8d71157 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1422,7 +1422,7 @@ restart:
+ if (status >= 0) {
+ status = nfs4_reclaim_locks(state, ops);
+ if (status >= 0) {
+- if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) {
++ if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+ spin_lock(&state->state_lock);
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
+@@ -1881,10 +1881,15 @@ again:
+ nfs4_root_machine_cred(clp);
+ goto again;
+ }
+- if (i > 2)
++ if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX)
+ break;
+ case -NFS4ERR_CLID_INUSE:
+ case -NFS4ERR_WRONGSEC:
++ /* No point in retrying if we already used RPC_AUTH_UNIX */
++ if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) {
++ status = -EPERM;
++ break;
++ }
+ clnt = rpc_clone_client_set_auth(clnt, RPC_AUTH_UNIX);
+ if (IS_ERR(clnt)) {
+ status = PTR_ERR(clnt);
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 5f38ea36e266..af51cf9bf2e3 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -536,16 +536,12 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ if (err)
+ goto out3;
+ exp.ex_anon_uid= make_kuid(&init_user_ns, an_int);
+- if (!uid_valid(exp.ex_anon_uid))
+- goto out3;
+
+ /* anon gid */
+ err = get_int(&mesg, &an_int);
+ if (err)
+ goto out3;
+ exp.ex_anon_gid= make_kgid(&init_user_ns, an_int);
+- if (!gid_valid(exp.ex_anon_gid))
+- goto out3;
+
+ /* fsid */
+ err = get_int(&mesg, &an_int);
+@@ -583,6 +579,17 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ exp.ex_uuid);
+ if (err)
+ goto out4;
++ /*
++ * For some reason exportfs has been passing down an
++ * invalid (-1) uid & gid on the "dummy" export which it
++ * uses to test export support. To make sure exportfs
++ * sees errors from check_export we therefore need to
++ * delay these checks till after check_export:
++ */
++ if (!uid_valid(exp.ex_anon_uid))
++ goto out4;
++ if (!gid_valid(exp.ex_anon_gid))
++ goto out4;
+ }
+
+ expp = svc_export_lookup(&exp);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index d9454fe5653f..ecc735e30bea 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -141,8 +141,8 @@ xdr_error: \
+
+ static void next_decode_page(struct nfsd4_compoundargs *argp)
+ {
+- argp->pagelist++;
+ argp->p = page_address(argp->pagelist[0]);
++ argp->pagelist++;
+ if (argp->pagelen < PAGE_SIZE) {
+ argp->end = argp->p + (argp->pagelen>>2);
+ argp->pagelen = 0;
+@@ -411,6 +411,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ label->data = kzalloc(dummy32 + 1, GFP_KERNEL);
+ if (!label->data)
+ return nfserr_jukebox;
++ label->len = dummy32;
+ defer_free(argp, kfree, label->data);
+ memcpy(label->data, buf, dummy32);
+ }
+@@ -1208,6 +1209,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
+ len -= pages * PAGE_SIZE;
+
+ argp->p = (__be32 *)page_address(argp->pagelist[0]);
++ argp->pagelist++;
+ argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
+ }
+ argp->p += XDR_QUADLEN(len);
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index c827acb0e943..72cb28e73ca0 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -298,41 +298,12 @@ commit_metadata(struct svc_fh *fhp)
+ }
+
+ /*
+- * Set various file attributes.
+- * N.B. After this call fhp needs an fh_put
++ * Go over the attributes and take care of the small differences between
++ * NFS semantics and what Linux expects.
+ */
+-__be32
+-nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+- int check_guard, time_t guardtime)
++static void
++nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
+ {
+- struct dentry *dentry;
+- struct inode *inode;
+- int accmode = NFSD_MAY_SATTR;
+- umode_t ftype = 0;
+- __be32 err;
+- int host_err;
+- int size_change = 0;
+-
+- if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+- accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
+- if (iap->ia_valid & ATTR_SIZE)
+- ftype = S_IFREG;
+-
+- /* Get inode */
+- err = fh_verify(rqstp, fhp, ftype, accmode);
+- if (err)
+- goto out;
+-
+- dentry = fhp->fh_dentry;
+- inode = dentry->d_inode;
+-
+- /* Ignore any mode updates on symlinks */
+- if (S_ISLNK(inode->i_mode))
+- iap->ia_valid &= ~ATTR_MODE;
+-
+- if (!iap->ia_valid)
+- goto out;
+-
+ /*
+ * NFSv2 does not differentiate between "set-[ac]time-to-now"
+ * which only requires access, and "set-[ac]time-to-X" which
+@@ -342,8 +313,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ * convert to "set to now" instead of "set to explicit time"
+ *
+ * We only call inode_change_ok as the last test as technically
+- * it is not an interface that we should be using. It is only
+- * valid if the filesystem does not define it's own i_op->setattr.
++ * it is not an interface that we should be using.
+ */
+ #define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
+ #define MAX_TOUCH_TIME_ERROR (30*60)
+@@ -369,30 +339,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ iap->ia_valid &= ~BOTH_TIME_SET;
+ }
+ }
+-
+- /*
+- * The size case is special.
+- * It changes the file as well as the attributes.
+- */
+- if (iap->ia_valid & ATTR_SIZE) {
+- if (iap->ia_size < inode->i_size) {
+- err = nfsd_permission(rqstp, fhp->fh_export, dentry,
+- NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE);
+- if (err)
+- goto out;
+- }
+-
+- host_err = get_write_access(inode);
+- if (host_err)
+- goto out_nfserr;
+-
+- size_change = 1;
+- host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
+- if (host_err) {
+- put_write_access(inode);
+- goto out_nfserr;
+- }
+- }
+
+ /* sanitize the mode change */
+ if (iap->ia_valid & ATTR_MODE) {
+@@ -415,32 +361,111 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
+ }
+ }
++}
+
+- /* Change the attributes. */
++static __be32
++nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
++ struct iattr *iap)
++{
++ struct inode *inode = fhp->fh_dentry->d_inode;
++ int host_err;
+
+- iap->ia_valid |= ATTR_CTIME;
++ if (iap->ia_size < inode->i_size) {
++ __be32 err;
+
+- err = nfserr_notsync;
+- if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+- host_err = nfsd_break_lease(inode);
+- if (host_err)
+- goto out_nfserr;
+- fh_lock(fhp);
++ err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
++ NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
++ if (err)
++ return err;
++ }
+
+- host_err = notify_change(dentry, iap);
+- err = nfserrno(host_err);
+- fh_unlock(fhp);
++ host_err = get_write_access(inode);
++ if (host_err)
++ goto out_nfserrno;
++
++ host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
++ if (host_err)
++ goto out_put_write_access;
++ return 0;
++
++out_put_write_access:
++ put_write_access(inode);
++out_nfserrno:
++ return nfserrno(host_err);
++}
++
++/*
++ * Set various file attributes. After this call fhp needs an fh_put.
++ */
++__be32
++nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
++ int check_guard, time_t guardtime)
++{
++ struct dentry *dentry;
++ struct inode *inode;
++ int accmode = NFSD_MAY_SATTR;
++ umode_t ftype = 0;
++ __be32 err;
++ int host_err;
++ int size_change = 0;
++
++ if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
++ accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
++ if (iap->ia_valid & ATTR_SIZE)
++ ftype = S_IFREG;
++
++ /* Get inode */
++ err = fh_verify(rqstp, fhp, ftype, accmode);
++ if (err)
++ goto out;
++
++ dentry = fhp->fh_dentry;
++ inode = dentry->d_inode;
++
++ /* Ignore any mode updates on symlinks */
++ if (S_ISLNK(inode->i_mode))
++ iap->ia_valid &= ~ATTR_MODE;
++
++ if (!iap->ia_valid)
++ goto out;
++
++ nfsd_sanitize_attrs(inode, iap);
++
++ /*
++ * The size case is special, it changes the file in addition to the
++ * attributes.
++ */
++ if (iap->ia_valid & ATTR_SIZE) {
++ err = nfsd_get_write_access(rqstp, fhp, iap);
++ if (err)
++ goto out;
++ size_change = 1;
+ }
++
++ iap->ia_valid |= ATTR_CTIME;
++
++ if (check_guard && guardtime != inode->i_ctime.tv_sec) {
++ err = nfserr_notsync;
++ goto out_put_write_access;
++ }
++
++ host_err = nfsd_break_lease(inode);
++ if (host_err)
++ goto out_put_write_access_nfserror;
++
++ fh_lock(fhp);
++ host_err = notify_change(dentry, iap);
++ fh_unlock(fhp);
++
++out_put_write_access_nfserror:
++ err = nfserrno(host_err);
++out_put_write_access:
+ if (size_change)
+ put_write_access(inode);
+ if (!err)
+ commit_metadata(fhp);
+ out:
+ return err;
+-
+-out_nfserr:
+- err = nfserrno(host_err);
+- goto out;
+ }
+
+ #if defined(CONFIG_NFSD_V2_ACL) || \
+diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
+index a5b59d92eb70..039708122038 100644
+--- a/fs/xfs/xfs_sb.c
++++ b/fs/xfs/xfs_sb.c
+@@ -596,6 +596,11 @@ xfs_sb_verify(
+ * single bit error could clear the feature bit and unused parts of the
+ * superblock are supposed to be zero. Hence a non-null crc field indicates that
+ * we've potentially lost a feature bit and we should check it anyway.
++ *
++ * However, past bugs (i.e. in growfs) left non-zeroed regions beyond the
++ * last field in V4 secondary superblocks. So for secondary superblocks,
++ * we are more forgiving, and ignore CRC failures if the primary doesn't
++ * indicate that the fs version is V5.
+ */
+ static void
+ xfs_sb_read_verify(
+@@ -616,8 +621,12 @@ xfs_sb_read_verify(
+
+ if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize),
+ offsetof(struct xfs_sb, sb_crc))) {
+- error = EFSCORRUPTED;
+- goto out_error;
++ /* Only fail bad secondaries on a known V5 filesystem */
++ if (bp->b_bn != XFS_SB_DADDR &&
++ xfs_sb_version_hascrc(&mp->m_sb)) {
++ error = EFSCORRUPTED;
++ goto out_error;
++ }
+ }
+ }
+ error = xfs_sb_verify(bp, true);
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index e8112ae50531..7554fd410bcc 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -99,9 +99,6 @@ extern void setup_new_exec(struct linux_binprm * bprm);
+ extern void would_dump(struct linux_binprm *, struct file *);
+
+ extern int suid_dumpable;
+-#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
+-#define SUID_DUMP_USER 1 /* Dump as user of process */
+-#define SUID_DUMP_ROOT 2 /* Dump as root */
+
+ /* Stack area protections */
+ #define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
+diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
+index e36dee52f224..3859ddbecb5f 100644
+--- a/include/linux/nfs4.h
++++ b/include/linux/nfs4.h
+@@ -395,7 +395,7 @@ enum lock_type4 {
+ #define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
+ #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
+ #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
+-#define FATTR4_WORD2_SECURITY_LABEL (1UL << 17)
++#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
+
+ /* MDS threshold bitmap bits */
+ #define THRESHOLD_RD (1UL << 0)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index e27baeeda3f4..b1e963efbde8 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -322,6 +322,10 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+ extern void set_dumpable(struct mm_struct *mm, int value);
+ extern int get_dumpable(struct mm_struct *mm);
+
++#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
++#define SUID_DUMP_USER 1 /* Dump as user of process */
++#define SUID_DUMP_ROOT 2 /* Dump as root */
++
+ /* mm flags */
+ /* dumpable bits */
+ #define MMF_DUMPABLE 0 /* core dump is permitted */
+@@ -2474,34 +2478,98 @@ static inline int tsk_is_polling(struct task_struct *p)
+ {
+ return task_thread_info(p)->status & TS_POLLING;
+ }
+-static inline void current_set_polling(void)
++static inline void __current_set_polling(void)
+ {
+ current_thread_info()->status |= TS_POLLING;
+ }
+
+-static inline void current_clr_polling(void)
++static inline bool __must_check current_set_polling_and_test(void)
++{
++ __current_set_polling();
++
++ /*
++ * Polling state must be visible before we test NEED_RESCHED,
++ * paired by resched_task()
++ */
++ smp_mb();
++
++ return unlikely(tif_need_resched());
++}
++
++static inline void __current_clr_polling(void)
+ {
+ current_thread_info()->status &= ~TS_POLLING;
+- smp_mb__after_clear_bit();
++}
++
++static inline bool __must_check current_clr_polling_and_test(void)
++{
++ __current_clr_polling();
++
++ /*
++ * Polling state must be visible before we test NEED_RESCHED,
++ * paired by resched_task()
++ */
++ smp_mb();
++
++ return unlikely(tif_need_resched());
+ }
+ #elif defined(TIF_POLLING_NRFLAG)
+ static inline int tsk_is_polling(struct task_struct *p)
+ {
+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
+ }
+-static inline void current_set_polling(void)
++
++static inline void __current_set_polling(void)
+ {
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ }
+
+-static inline void current_clr_polling(void)
++static inline bool __must_check current_set_polling_and_test(void)
++{
++ __current_set_polling();
++
++ /*
++ * Polling state must be visible before we test NEED_RESCHED,
++ * paired by resched_task()
++ *
++ * XXX: assumes set/clear bit are identical barrier wise.
++ */
++ smp_mb__after_clear_bit();
++
++ return unlikely(tif_need_resched());
++}
++
++static inline void __current_clr_polling(void)
+ {
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ }
++
++static inline bool __must_check current_clr_polling_and_test(void)
++{
++ __current_clr_polling();
++
++ /*
++ * Polling state must be visible before we test NEED_RESCHED,
++ * paired by resched_task()
++ */
++ smp_mb__after_clear_bit();
++
++ return unlikely(tif_need_resched());
++}
++
+ #else
+ static inline int tsk_is_polling(struct task_struct *p) { return 0; }
+-static inline void current_set_polling(void) { }
+-static inline void current_clr_polling(void) { }
++static inline void __current_set_polling(void) { }
++static inline void __current_clr_polling(void) { }
++
++static inline bool __must_check current_set_polling_and_test(void)
++{
++ return unlikely(tif_need_resched());
++}
++static inline bool __must_check current_clr_polling_and_test(void)
++{
++ return unlikely(tif_need_resched());
++}
+ #endif
+
+ /*
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index e7e04736802f..4ae6f32c8033 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -107,6 +107,8 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
+ #define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
+ #define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
+
++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
++
+ #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
+ /*
+ * An arch can define its own version of set_restore_sigmask() to get the
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 001629cd1a97..39cfa0aca91f 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -475,7 +475,8 @@ struct usb3_lpm_parameters {
+ * @lpm_capable: device supports LPM
+ * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
+ * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
+- * @usb2_hw_lpm_enabled: USB2 hardware LPM enabled
++ * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
++ * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
+ * @usb3_lpm_enabled: USB3 hardware LPM enabled
+ * @string_langid: language ID for strings
+ * @product: iProduct string, if present (static)
+@@ -548,6 +549,7 @@ struct usb_device {
+ unsigned usb2_hw_lpm_capable:1;
+ unsigned usb2_hw_lpm_besl_capable:1;
+ unsigned usb2_hw_lpm_enabled:1;
++ unsigned usb2_hw_lpm_allowed:1;
+ unsigned usb3_lpm_enabled:1;
+ int string_langid;
+
+diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
+index 9031a26249b5..ae6c3b8ed2f5 100644
+--- a/include/sound/compress_driver.h
++++ b/include/sound/compress_driver.h
+@@ -171,4 +171,13 @@ static inline void snd_compr_fragment_elapsed(struct snd_compr_stream *stream)
+ wake_up(&stream->runtime->sleep);
+ }
+
++static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
++{
++ if (snd_BUG_ON(!stream))
++ return;
++
++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
++ wake_up(&stream->runtime->sleep);
++}
++
+ #endif
+diff --git a/ipc/shm.c b/ipc/shm.c
+index d69739610fd4..7a51443a51d6 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -208,15 +208,18 @@ static void shm_open(struct vm_area_struct *vma)
+ */
+ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+ {
++ struct file *shm_file;
++
++ shm_file = shp->shm_file;
++ shp->shm_file = NULL;
+ ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ shm_rmid(ns, shp);
+ shm_unlock(shp);
+- if (!is_file_hugepages(shp->shm_file))
+- shmem_lock(shp->shm_file, 0, shp->mlock_user);
++ if (!is_file_hugepages(shm_file))
++ shmem_lock(shm_file, 0, shp->mlock_user);
+ else if (shp->mlock_user)
+- user_shm_unlock(file_inode(shp->shm_file)->i_size,
+- shp->mlock_user);
+- fput (shp->shm_file);
++ user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
++ fput(shm_file);
+ ipc_rcu_putref(shp, shm_rcu_free);
+ }
+
+@@ -974,15 +977,25 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ ipc_lock_object(&shp->shm_perm);
+ if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
+ kuid_t euid = current_euid();
+- err = -EPERM;
+ if (!uid_eq(euid, shp->shm_perm.uid) &&
+- !uid_eq(euid, shp->shm_perm.cuid))
++ !uid_eq(euid, shp->shm_perm.cuid)) {
++ err = -EPERM;
+ goto out_unlock0;
+- if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
++ }
++ if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
++ err = -EPERM;
+ goto out_unlock0;
++ }
+ }
+
+ shm_file = shp->shm_file;
++
++ /* check if shm_destroy() is tearing down shp */
++ if (shm_file == NULL) {
++ err = -EIDRM;
++ goto out_unlock0;
++ }
++
+ if (is_file_hugepages(shm_file))
+ goto out_unlock0;
+
+@@ -1101,6 +1114,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+ goto out_unlock;
+
+ ipc_lock_object(&shp->shm_perm);
++
++ /* check if shm_destroy() is tearing down shp */
++ if (shp->shm_file == NULL) {
++ ipc_unlock_object(&shp->shm_perm);
++ err = -EIDRM;
++ goto out_unlock;
++ }
++
+ path = shp->shm_file->f_path;
+ path_get(&path);
+ shp->shm_nattch++;
+diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
+index e695c0a0bcb5..c261409500e4 100644
+--- a/kernel/cpu/idle.c
++++ b/kernel/cpu/idle.c
+@@ -44,7 +44,7 @@ static inline int cpu_idle_poll(void)
+ rcu_idle_enter();
+ trace_cpu_idle_rcuidle(0, smp_processor_id());
+ local_irq_enable();
+- while (!need_resched())
++ while (!tif_need_resched())
+ cpu_relax();
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+ rcu_idle_exit();
+@@ -92,8 +92,7 @@ static void cpu_idle_loop(void)
+ if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
+ cpu_idle_poll();
+ } else {
+- current_clr_polling();
+- if (!need_resched()) {
++ if (!current_clr_polling_and_test()) {
+ stop_critical_timings();
+ rcu_idle_enter();
+ arch_cpu_idle();
+@@ -103,7 +102,7 @@ static void cpu_idle_loop(void)
+ } else {
+ local_irq_enable();
+ }
+- current_set_polling();
++ __current_set_polling();
+ }
+ arch_cpu_idle_exit();
+ }
+@@ -129,7 +128,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ */
+ boot_init_stack_canary();
+ #endif
+- current_set_polling();
++ __current_set_polling();
+ arch_cpu_idle_prepare();
+ cpu_idle_loop();
+ }
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index dd562e9aa2c8..1f4bcb3cc21c 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -257,7 +257,8 @@ ok:
+ if (task->mm)
+ dumpable = get_dumpable(task->mm);
+ rcu_read_lock();
+- if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
++ if (dumpable != SUID_DUMP_USER &&
++ !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
+ rcu_read_unlock();
+ return -EPERM;
+ }
+diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
+index 80c36bcf66e8..78e27e3b52ac 100644
+--- a/kernel/trace/trace_event_perf.c
++++ b/kernel/trace/trace_event_perf.c
+@@ -26,7 +26,7 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
+ {
+ /* The ftrace function trace is allowed only for root. */
+ if (ftrace_event_is_function(tp_event) &&
+- perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
++ perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /* No tracing, just counting, so no obvious leak */
+diff --git a/mm/slub.c b/mm/slub.c
+index c3eb3d3ca835..96f21691b67c 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1217,8 +1217,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
+ /*
+ * Enable debugging if selected on the kernel commandline.
+ */
+- if (slub_debug && (!slub_debug_slabs ||
+- !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
++ if (slub_debug && (!slub_debug_slabs || (name &&
++ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
+ flags |= slub_debug;
+
+ return flags;
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 084656671d6e..cc24323d3045 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -482,6 +482,7 @@ gss_alloc_msg(struct gss_auth *gss_auth,
+ switch (vers) {
+ case 0:
+ gss_encode_v0_msg(gss_msg);
++ break;
+ default:
+ gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
+ };
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 77479606a971..941d19f8c999 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -656,14 +656,16 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
+ /*
+ * Free an RPC client
+ */
+-static void
++static struct rpc_clnt *
+ rpc_free_client(struct rpc_clnt *clnt)
+ {
++ struct rpc_clnt *parent = NULL;
++
+ dprintk_rcu("RPC: destroying %s client for %s\n",
+ clnt->cl_program->name,
+ rcu_dereference(clnt->cl_xprt)->servername);
+ if (clnt->cl_parent != clnt)
+- rpc_release_client(clnt->cl_parent);
++ parent = clnt->cl_parent;
+ rpc_clnt_remove_pipedir(clnt);
+ rpc_unregister_client(clnt);
+ rpc_free_iostats(clnt->cl_metrics);
+@@ -672,18 +674,17 @@ rpc_free_client(struct rpc_clnt *clnt)
+ rpciod_down();
+ rpc_free_clid(clnt);
+ kfree(clnt);
++ return parent;
+ }
+
+ /*
+ * Free an RPC client
+ */
+-static void
++static struct rpc_clnt *
+ rpc_free_auth(struct rpc_clnt *clnt)
+ {
+- if (clnt->cl_auth == NULL) {
+- rpc_free_client(clnt);
+- return;
+- }
++ if (clnt->cl_auth == NULL)
++ return rpc_free_client(clnt);
+
+ /*
+ * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
+@@ -694,7 +695,8 @@ rpc_free_auth(struct rpc_clnt *clnt)
+ rpcauth_release(clnt->cl_auth);
+ clnt->cl_auth = NULL;
+ if (atomic_dec_and_test(&clnt->cl_count))
+- rpc_free_client(clnt);
++ return rpc_free_client(clnt);
++ return NULL;
+ }
+
+ /*
+@@ -705,10 +707,13 @@ rpc_release_client(struct rpc_clnt *clnt)
+ {
+ dprintk("RPC: rpc_release_client(%p)\n", clnt);
+
+- if (list_empty(&clnt->cl_tasks))
+- wake_up(&destroy_wait);
+- if (atomic_dec_and_test(&clnt->cl_count))
+- rpc_free_auth(clnt);
++ do {
++ if (list_empty(&clnt->cl_tasks))
++ wake_up(&destroy_wait);
++ if (!atomic_dec_and_test(&clnt->cl_count))
++ break;
++ clnt = rpc_free_auth(clnt);
++ } while (clnt != NULL);
+ }
+ EXPORT_SYMBOL_GPL(rpc_release_client);
+
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index ee03d35677d9..b752e1de2e7d 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -393,8 +393,10 @@ static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen,
+ return kernel_sendmsg(sock, &msg, NULL, 0, 0);
+ }
+
+-static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
++static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy)
+ {
++ ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
++ int offset, size_t size, int flags);
+ struct page **ppage;
+ unsigned int remainder;
+ int err, sent = 0;
+@@ -403,6 +405,9 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ base += xdr->page_base;
+ ppage = xdr->pages + (base >> PAGE_SHIFT);
+ base &= ~PAGE_MASK;
++ do_sendpage = sock->ops->sendpage;
++ if (!zerocopy)
++ do_sendpage = sock_no_sendpage;
+ for(;;) {
+ unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
+ int flags = XS_SENDMSG_FLAGS;
+@@ -410,7 +415,7 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ remainder -= len;
+ if (remainder != 0 || more)
+ flags |= MSG_MORE;
+- err = sock->ops->sendpage(sock, *ppage, base, len, flags);
++ err = do_sendpage(sock, *ppage, base, len, flags);
+ if (remainder == 0 || err != len)
+ break;
+ sent += err;
+@@ -431,9 +436,10 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ * @addrlen: UDP only -- length of destination address
+ * @xdr: buffer containing this request
+ * @base: starting position in the buffer
++ * @zerocopy: true if it is safe to use sendpage()
+ *
+ */
+-static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
++static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy)
+ {
+ unsigned int remainder = xdr->len - base;
+ int err, sent = 0;
+@@ -461,7 +467,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
+ if (base < xdr->page_len) {
+ unsigned int len = xdr->page_len - base;
+ remainder -= len;
+- err = xs_send_pagedata(sock, xdr, base, remainder != 0);
++ err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy);
+ if (remainder == 0 || err != len)
+ goto out;
+ sent += err;
+@@ -564,7 +570,7 @@ static int xs_local_send_request(struct rpc_task *task)
+ req->rq_svec->iov_base, req->rq_svec->iov_len);
+
+ status = xs_sendpages(transport->sock, NULL, 0,
+- xdr, req->rq_bytes_sent);
++ xdr, req->rq_bytes_sent, true);
+ dprintk("RPC: %s(%u) = %d\n",
+ __func__, xdr->len - req->rq_bytes_sent, status);
+ if (likely(status >= 0)) {
+@@ -620,7 +626,7 @@ static int xs_udp_send_request(struct rpc_task *task)
+ status = xs_sendpages(transport->sock,
+ xs_addr(xprt),
+ xprt->addrlen, xdr,
+- req->rq_bytes_sent);
++ req->rq_bytes_sent, true);
+
+ dprintk("RPC: xs_udp_send_request(%u) = %d\n",
+ xdr->len - req->rq_bytes_sent, status);
+@@ -693,6 +699,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ struct xdr_buf *xdr = &req->rq_snd_buf;
++ bool zerocopy = true;
+ int status;
+
+ xs_encode_stream_record_marker(&req->rq_snd_buf);
+@@ -700,13 +707,20 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ xs_pktdump("packet data:",
+ req->rq_svec->iov_base,
+ req->rq_svec->iov_len);
++ /* Don't use zero copy if this is a resend. If the RPC call
++ * completes while the socket holds a reference to the pages,
++ * then we may end up resending corrupted data.
++ */
++ if (task->tk_flags & RPC_TASK_SENT)
++ zerocopy = false;
+
+ /* Continue transmitting the packet/record. We must be careful
+ * to cope with writespace callbacks arriving _after_ we have
+ * called sendmsg(). */
+ while (1) {
+ status = xs_sendpages(transport->sock,
+- NULL, 0, xdr, req->rq_bytes_sent);
++ NULL, 0, xdr, req->rq_bytes_sent,
++ zerocopy);
+
+ dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
+ xdr->len - req->rq_bytes_sent, status);
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 399433ad614e..a9c3d3cd1990 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -73,7 +73,6 @@ static struct ima_rule_entry default_rules[] = {
+ {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
+- {.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index bea523a5d852..d9af6387f37c 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -680,14 +680,48 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
+ return -EPERM;
+ retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+ if (!retval) {
+- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+- wake_up(&stream->runtime->sleep);
++ snd_compr_drain_notify(stream);
+ stream->runtime->total_bytes_available = 0;
+ stream->runtime->total_bytes_transferred = 0;
+ }
+ return retval;
+ }
+
++static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
++{
++ int ret;
++
++ /*
++ * We are called with lock held. So drop the lock while we wait for
++ * drain complete notfication from the driver
++ *
++ * It is expected that driver will notify the drain completion and then
++ * stream will be moved to SETUP state, even if draining resulted in an
++ * error. We can trigger next track after this.
++ */
++ stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
++ mutex_unlock(&stream->device->lock);
++
++ /* we wait for drain to complete here, drain can return when
++ * interruption occurred, wait returned error or success.
++ * For the first two cases we don't do anything different here and
++ * return after waking up
++ */
++
++ ret = wait_event_interruptible(stream->runtime->sleep,
++ (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
++ if (ret == -ERESTARTSYS)
++ pr_debug("wait aborted by a signal");
++ else if (ret)
++ pr_debug("wait for drain failed with %d\n", ret);
++
++
++ wake_up(&stream->runtime->sleep);
++ mutex_lock(&stream->device->lock);
++
++ return ret;
++}
++
+ static int snd_compr_drain(struct snd_compr_stream *stream)
+ {
+ int retval;
+@@ -695,12 +729,15 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
+ if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+ stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ return -EPERM;
++
+ retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
+- if (!retval) {
+- stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
++ if (retval) {
++ pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
+ wake_up(&stream->runtime->sleep);
++ return retval;
+ }
+- return retval;
++
++ return snd_compress_wait_for_drain(stream);
+ }
+
+ static int snd_compr_next_track(struct snd_compr_stream *stream)
+@@ -736,9 +773,14 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
+ return -EPERM;
+
+ retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
++ if (retval) {
++ pr_debug("Partial drain returned failure\n");
++ wake_up(&stream->runtime->sleep);
++ return retval;
++ }
+
+ stream->next_track = false;
+- return retval;
++ return snd_compress_wait_for_drain(stream);
+ }
+
+ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
+index 1c19cd7ad26e..83b8a9a9163e 100644
+--- a/sound/drivers/pcsp/pcsp.c
++++ b/sound/drivers/pcsp/pcsp.c
+@@ -187,8 +187,8 @@ static int pcsp_probe(struct platform_device *dev)
+ static int pcsp_remove(struct platform_device *dev)
+ {
+ struct snd_pcsp *chip = platform_get_drvdata(dev);
+- alsa_card_pcsp_exit(chip);
+ pcspkr_input_remove(chip->input_dev);
++ alsa_card_pcsp_exit(chip);
+ return 0;
+ }
+
+diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
+index 81aeb934261a..0a90bd6ae232 100644
+--- a/sound/isa/msnd/msnd_pinnacle.c
++++ b/sound/isa/msnd/msnd_pinnacle.c
+@@ -73,9 +73,11 @@
+ #ifdef MSND_CLASSIC
+ # include "msnd_classic.h"
+ # define LOGNAME "msnd_classic"
++# define DEV_NAME "msnd-classic"
+ #else
+ # include "msnd_pinnacle.h"
+ # define LOGNAME "snd_msnd_pinnacle"
++# define DEV_NAME "msnd-pinnacle"
+ #endif
+
+ static void set_default_audio_parameters(struct snd_msnd *chip)
+@@ -1067,8 +1069,6 @@ static int snd_msnd_isa_remove(struct device *pdev, unsigned int dev)
+ return 0;
+ }
+
+-#define DEV_NAME "msnd-pinnacle"
+-
+ static struct isa_driver snd_msnd_driver = {
+ .match = snd_msnd_isa_match,
+ .probe = snd_msnd_isa_probe,
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 748c6a941963..e938a68625ea 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2579,9 +2579,6 @@ int snd_hda_codec_reset(struct hda_codec *codec)
+ cancel_delayed_work_sync(&codec->jackpoll_work);
+ #ifdef CONFIG_PM
+ cancel_delayed_work_sync(&codec->power_work);
+- codec->power_on = 0;
+- codec->power_transition = 0;
+- codec->power_jiffies = jiffies;
+ flush_workqueue(bus->workq);
+ #endif
+ snd_hda_ctls_clear(codec);
+@@ -3991,6 +3988,10 @@ static void hda_call_codec_resume(struct hda_codec *codec)
+ * in the resume / power-save sequence
+ */
+ hda_keep_power_on(codec);
++ if (codec->pm_down_notified) {
++ codec->pm_down_notified = 0;
++ hda_call_pm_notify(codec->bus, true);
++ }
+ hda_set_power_state(codec, AC_PWRST_D0);
+ restore_shutup_pins(codec);
+ hda_exec_init_verbs(codec);
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index b7c89dff7066..3067ed4fe3b2 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -549,11 +549,15 @@ static hda_nid_t look_for_out_mute_nid(struct hda_codec *codec,
+ static hda_nid_t look_for_out_vol_nid(struct hda_codec *codec,
+ struct nid_path *path)
+ {
++ struct hda_gen_spec *spec = codec->spec;
+ int i;
+
+ for (i = path->depth - 1; i >= 0; i--) {
+- if (nid_has_volume(codec, path->path[i], HDA_OUTPUT))
+- return path->path[i];
++ hda_nid_t nid = path->path[i];
++ if ((spec->out_vol_mask >> nid) & 1)
++ continue;
++ if (nid_has_volume(codec, nid, HDA_OUTPUT))
++ return nid;
+ }
+ return 0;
+ }
+@@ -792,10 +796,10 @@ static void set_pin_eapd(struct hda_codec *codec, hda_nid_t pin, bool enable)
+ if (spec->own_eapd_ctl ||
+ !(snd_hda_query_pin_caps(codec, pin) & AC_PINCAP_EAPD))
+ return;
+- if (codec->inv_eapd)
+- enable = !enable;
+ if (spec->keep_eapd_on && !enable)
+ return;
++ if (codec->inv_eapd)
++ enable = !enable;
+ snd_hda_codec_update_cache(codec, pin, 0,
+ AC_VERB_SET_EAPD_BTLENABLE,
+ enable ? 0x02 : 0x00);
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index 48d44026705b..7e45cb44d151 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -242,6 +242,9 @@ struct hda_gen_spec {
+ /* additional mute flags (only effective with auto_mute_via_amp=1) */
+ u64 mute_bits;
+
++ /* bitmask for skipping volume controls */
++ u64 out_vol_mask;
++
+ /* badness tables for output path evaluations */
+ const struct badness_table *main_out_badness;
+ const struct badness_table *extra_out_badness;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 6e61a019aa5e..a63aff2ca594 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -612,6 +612,11 @@ enum {
+ #define AZX_DCAPS_INTEL_PCH \
+ (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
+
++#define AZX_DCAPS_INTEL_HASWELL \
++ (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_ALIGN_BUFSIZE | \
++ AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME | \
++ AZX_DCAPS_I915_POWERWELL)
++
+ /* quirks for ATI SB / AMD Hudson */
+ #define AZX_DCAPS_PRESET_ATI_SB \
+ (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
+@@ -3987,14 +3992,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ /* Haswell */
+ { PCI_DEVICE(0x8086, 0x0a0c),
+- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
+- AZX_DCAPS_I915_POWERWELL },
++ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
+ { PCI_DEVICE(0x8086, 0x0c0c),
+- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
+- AZX_DCAPS_I915_POWERWELL },
++ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
+ { PCI_DEVICE(0x8086, 0x0d0c),
+- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
+- AZX_DCAPS_I915_POWERWELL },
++ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
+ /* 5 Series/3400 */
+ { PCI_DEVICE(0x8086, 0x3b56),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 2aa2f579b4d6..a52d2a1a5e83 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -219,8 +219,12 @@ static int alloc_ad_spec(struct hda_codec *codec)
+ static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+- if (action == HDA_FIXUP_ACT_PRE_PROBE)
++ struct ad198x_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ codec->inv_jack_detect = 1;
++ spec->gen.keep_eapd_on = 1;
++ }
+ }
+
+ enum {
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 18d972501585..072755c8289c 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -597,6 +597,7 @@ static int patch_cs420x(struct hda_codec *codec)
+ * Its layout is no longer compatible with CS4206/CS4207
+ */
+ enum {
++ CS4208_MAC_AUTO,
+ CS4208_MBA6,
+ CS4208_GPIO0,
+ };
+@@ -608,7 +609,12 @@ static const struct hda_model_fixup cs4208_models[] = {
+ };
+
+ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
+- /* codec SSID */
++ SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS4208_MAC_AUTO),
++ {} /* terminator */
++};
++
++/* codec SSID matching */
++static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
+ {} /* terminator */
+@@ -626,6 +632,20 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec,
+ }
+ }
+
++static const struct hda_fixup cs4208_fixups[];
++
++/* remap the fixup from codec SSID and apply it */
++static void cs4208_fixup_mac(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action != HDA_FIXUP_ACT_PRE_PROBE)
++ return;
++ snd_hda_pick_fixup(codec, NULL, cs4208_mac_fixup_tbl, cs4208_fixups);
++ if (codec->fixup_id < 0 || codec->fixup_id == CS4208_MAC_AUTO)
++ codec->fixup_id = CS4208_GPIO0; /* default fixup */
++ snd_hda_apply_fixup(codec, action);
++}
++
+ static const struct hda_fixup cs4208_fixups[] = {
+ [CS4208_MBA6] = {
+ .type = HDA_FIXUP_PINS,
+@@ -637,6 +657,10 @@ static const struct hda_fixup cs4208_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs4208_fixup_gpio0,
+ },
++ [CS4208_MAC_AUTO] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cs4208_fixup_mac,
++ },
+ };
+
+ /* correct the 0dB offset of input pins */
+@@ -660,6 +684,8 @@ static int patch_cs4208(struct hda_codec *codec)
+ return -ENOMEM;
+
+ spec->gen.automute_hook = cs_automute;
++ /* exclude NID 0x10 (HP) from output volumes due to different steps */
++ spec->gen.out_vol_mask = 1ULL << 0x10;
+
+ snd_hda_pick_fixup(codec, cs4208_models, cs4208_fixup_tbl,
+ cs4208_fixups);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index ec68eaea0336..96f07ce56603 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3568,6 +3568,8 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
+ .patch = patch_conexant_auto },
+ { .id = 0x14f15115, .name = "CX20757",
+ .patch = patch_conexant_auto },
++ { .id = 0x14f151d7, .name = "CX20952",
++ .patch = patch_conexant_auto },
+ {} /* terminator */
+ };
+
+@@ -3594,6 +3596,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15111");
+ MODULE_ALIAS("snd-hda-codec-id:14f15113");
+ MODULE_ALIAS("snd-hda-codec-id:14f15114");
+ MODULE_ALIAS("snd-hda-codec-id:14f15115");
++MODULE_ALIAS("snd-hda-codec-id:14f151d7");
+
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Conexant HD-audio codec");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8ad554312b69..2f39631f54c8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1043,6 +1043,7 @@ enum {
+ ALC880_FIXUP_UNIWILL,
+ ALC880_FIXUP_UNIWILL_DIG,
+ ALC880_FIXUP_Z71V,
++ ALC880_FIXUP_ASUS_W5A,
+ ALC880_FIXUP_3ST_BASE,
+ ALC880_FIXUP_3ST,
+ ALC880_FIXUP_3ST_DIG,
+@@ -1213,6 +1214,26 @@ static const struct hda_fixup alc880_fixups[] = {
+ { }
+ }
+ },
++ [ALC880_FIXUP_ASUS_W5A] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ /* set up the whole pins as BIOS is utterly broken */
++ { 0x14, 0x0121411f }, /* HP */
++ { 0x15, 0x411111f0 }, /* N/A */
++ { 0x16, 0x411111f0 }, /* N/A */
++ { 0x17, 0x411111f0 }, /* N/A */
++ { 0x18, 0x90a60160 }, /* mic */
++ { 0x19, 0x411111f0 }, /* N/A */
++ { 0x1a, 0x411111f0 }, /* N/A */
++ { 0x1b, 0x411111f0 }, /* N/A */
++ { 0x1c, 0x411111f0 }, /* N/A */
++ { 0x1d, 0x411111f0 }, /* N/A */
++ { 0x1e, 0xb743111e }, /* SPDIF out */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC880_FIXUP_GPIO1,
++ },
+ [ALC880_FIXUP_3ST_BASE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -1334,6 +1355,7 @@ static const struct hda_fixup alc880_fixups[] = {
+
+ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1019, 0x0f69, "Coeus G610P", ALC880_FIXUP_W810),
++ SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS W5A", ALC880_FIXUP_ASUS_W5A),
+ SND_PCI_QUIRK(0x1043, 0x1964, "ASUS Z71V", ALC880_FIXUP_Z71V),
+ SND_PCI_QUIRK_VENDOR(0x1043, "ASUS", ALC880_FIXUP_GPIO1),
+ SND_PCI_QUIRK(0x1558, 0x5401, "Clevo GPIO2", ALC880_FIXUP_GPIO2),
+@@ -1479,6 +1501,7 @@ enum {
+ ALC260_FIXUP_KN1,
+ ALC260_FIXUP_FSC_S7020,
+ ALC260_FIXUP_FSC_S7020_JWSE,
++ ALC260_FIXUP_VAIO_PINS,
+ };
+
+ static void alc260_gpio1_automute(struct hda_codec *codec)
+@@ -1619,6 +1642,24 @@ static const struct hda_fixup alc260_fixups[] = {
+ .chained = true,
+ .chain_id = ALC260_FIXUP_FSC_S7020,
+ },
++ [ALC260_FIXUP_VAIO_PINS] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ /* Pin configs are missing completely on some VAIOs */
++ { 0x0f, 0x01211020 },
++ { 0x10, 0x0001003f },
++ { 0x11, 0x411111f0 },
++ { 0x12, 0x01a15930 },
++ { 0x13, 0x411111f0 },
++ { 0x14, 0x411111f0 },
++ { 0x15, 0x411111f0 },
++ { 0x16, 0x411111f0 },
++ { 0x17, 0x411111f0 },
++ { 0x18, 0x411111f0 },
++ { 0x19, 0x411111f0 },
++ { }
++ }
++ },
+ };
+
+ static const struct snd_pci_quirk alc260_fixup_tbl[] = {
+@@ -1627,6 +1668,8 @@ static const struct snd_pci_quirk alc260_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1),
+ SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750),
+ SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900),
++ SND_PCI_QUIRK(0x104d, 0x81bb, "Sony VAIO", ALC260_FIXUP_VAIO_PINS),
++ SND_PCI_QUIRK(0x104d, 0x81e2, "Sony VAIO TX", ALC260_FIXUP_HP_PIN_0F),
+ SND_PCI_QUIRK(0x10cf, 0x1326, "FSC LifeBook S7020", ALC260_FIXUP_FSC_S7020),
+ SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1),
+ SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1),
+@@ -2388,6 +2431,7 @@ static const struct hda_verb alc268_beep_init_verbs[] = {
+ enum {
+ ALC268_FIXUP_INV_DMIC,
+ ALC268_FIXUP_HP_EAPD,
++ ALC268_FIXUP_SPDIF,
+ };
+
+ static const struct hda_fixup alc268_fixups[] = {
+@@ -2402,6 +2446,13 @@ static const struct hda_fixup alc268_fixups[] = {
+ {}
+ }
+ },
++ [ALC268_FIXUP_SPDIF] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1e, 0x014b1180 }, /* enable SPDIF out */
++ {}
++ }
++ },
+ };
+
+ static const struct hda_model_fixup alc268_fixup_models[] = {
+@@ -2411,6 +2462,7 @@ static const struct hda_model_fixup alc268_fixup_models[] = {
+ };
+
+ static const struct snd_pci_quirk alc268_fixup_tbl[] = {
++ SND_PCI_QUIRK(0x1025, 0x0139, "Acer TravelMate 6293", ALC268_FIXUP_SPDIF),
+ SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),
+ /* below is codec SSID since multiple Toshiba laptops have the
+ * same PCI SSID 1179:ff00
+@@ -2540,6 +2592,7 @@ enum {
+ ALC269_TYPE_ALC283,
+ ALC269_TYPE_ALC284,
+ ALC269_TYPE_ALC286,
++ ALC269_TYPE_ALC255,
+ };
+
+ /*
+@@ -2565,6 +2618,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
+ case ALC269_TYPE_ALC282:
+ case ALC269_TYPE_ALC283:
+ case ALC269_TYPE_ALC286:
++ case ALC269_TYPE_ALC255:
+ ssids = alc269_ssids;
+ break;
+ default:
+@@ -2944,6 +2998,23 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
+ snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
+ }
+
++/* Make sure the led works even in runtime suspend */
++static unsigned int led_power_filter(struct hda_codec *codec,
++ hda_nid_t nid,
++ unsigned int power_state)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (power_state != AC_PWRST_D3 || nid != spec->mute_led_nid)
++ return power_state;
++
++ /* Set pin ctl again, it might have just been set to 0 */
++ snd_hda_set_pin_ctl(codec, nid,
++ snd_hda_codec_get_pin_target(codec, nid));
++
++ return AC_PWRST_D0;
++}
++
+ static void alc269_fixup_hp_mute_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -2963,6 +3034,7 @@ static void alc269_fixup_hp_mute_led(struct hda_codec *codec,
+ spec->mute_led_nid = pin - 0x0a + 0x18;
+ spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
+ spec->gen.vmaster_mute_enum = 1;
++ codec->power_filter = led_power_filter;
+ snd_printd("Detected mute LED for %x:%d\n", spec->mute_led_nid,
+ spec->mute_led_polarity);
+ break;
+@@ -2978,6 +3050,7 @@ static void alc269_fixup_hp_mute_led_mic1(struct hda_codec *codec,
+ spec->mute_led_nid = 0x18;
+ spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
+ spec->gen.vmaster_mute_enum = 1;
++ codec->power_filter = led_power_filter;
+ }
+ }
+
+@@ -2990,6 +3063,7 @@ static void alc269_fixup_hp_mute_led_mic2(struct hda_codec *codec,
+ spec->mute_led_nid = 0x19;
+ spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
+ spec->gen.vmaster_mute_enum = 1;
++ codec->power_filter = led_power_filter;
+ }
+ }
+
+@@ -3230,8 +3304,10 @@ static void alc_update_headset_mode(struct hda_codec *codec)
+ else
+ new_headset_mode = ALC_HEADSET_MODE_HEADPHONE;
+
+- if (new_headset_mode == spec->current_headset_mode)
++ if (new_headset_mode == spec->current_headset_mode) {
++ snd_hda_gen_update_outputs(codec);
+ return;
++ }
+
+ switch (new_headset_mode) {
+ case ALC_HEADSET_MODE_UNPLUGGED:
+@@ -3895,6 +3971,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
+ SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+@@ -4128,6 +4205,9 @@ static int patch_alc269(struct hda_codec *codec)
+ case 0x10ec0286:
+ spec->codec_variant = ALC269_TYPE_ALC286;
+ break;
++ case 0x10ec0255:
++ spec->codec_variant = ALC269_TYPE_ALC255;
++ break;
+ }
+
+ if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
+@@ -4842,6 +4922,7 @@ static int patch_alc680(struct hda_codec *codec)
+ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
+ { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
++ { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
+ { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
+ { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
+ { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
+diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
+index c39c77978468..66edc4a7917f 100644
+--- a/sound/usb/6fire/chip.c
++++ b/sound/usb/6fire/chip.c
+@@ -101,7 +101,7 @@ static int usb6fire_chip_probe(struct usb_interface *intf,
+ usb_set_intfdata(intf, chips[i]);
+ mutex_unlock(&register_mutex);
+ return 0;
+- } else if (regidx < 0)
++ } else if (!devices[i] && regidx < 0)
+ regidx = i;
+ }
+ if (regidx < 0) {
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index 72a130bc448a..c329c8fc57f4 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -103,6 +103,10 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ while ((gfn << PAGE_SHIFT) & (page_size - 1))
+ page_size >>= 1;
+
++ /* Make sure hva is aligned to the page size we want to map */
++ while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
++ page_size >>= 1;
++
+ /*
+ * Pin all pages we are about to map in memory. This is
+ * important because we unmap and unpin in 4kb steps later.
diff --git a/1002_linux-3.12.3.patch b/1002_linux-3.12.3.patch
new file mode 100644
index 00000000..54560970
--- /dev/null
+++ b/1002_linux-3.12.3.patch
@@ -0,0 +1,8639 @@
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index 9d4c1d18..fb78e60a 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -290,13 +290,24 @@ Default value is "/sbin/hotplug".
+ kptr_restrict:
+
+ This toggle indicates whether restrictions are placed on
+-exposing kernel addresses via /proc and other interfaces. When
+-kptr_restrict is set to (0), there are no restrictions. When
+-kptr_restrict is set to (1), the default, kernel pointers
+-printed using the %pK format specifier will be replaced with 0's
+-unless the user has CAP_SYSLOG. When kptr_restrict is set to
+-(2), kernel pointers printed using %pK will be replaced with 0's
+-regardless of privileges.
++exposing kernel addresses via /proc and other interfaces.
++
++When kptr_restrict is set to (0), the default, there are no restrictions.
++
++When kptr_restrict is set to (1), kernel pointers printed using the %pK
++format specifier will be replaced with 0's unless the user has CAP_SYSLOG
++and effective user and group ids are equal to the real ids. This is
++because %pK checks are done at read() time rather than open() time, so
++if permissions are elevated between the open() and the read() (e.g via
++a setuid binary) then %pK will not leak kernel pointers to unprivileged
++users. Note, this is a temporary solution only. The correct long-term
++solution is to do the permission checks at open() time. Consider removing
++world read permissions from files that use %pK, and using dmesg_restrict
++to protect against uses of %pK in dmesg(8) if leaking kernel pointer
++values to unprivileged users is a concern.
++
++When kptr_restrict is set to (2), kernel pointers printed using
++%pK will be replaced with 0's regardless of privileges.
+
+ ==============================================================
+
+diff --git a/Makefile b/Makefile
+index e6e72b62..b28bc57d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
+index 1e12aeff..aa537ed1 100644
+--- a/arch/arm/boot/dts/bcm2835.dtsi
++++ b/arch/arm/boot/dts/bcm2835.dtsi
+@@ -85,6 +85,8 @@
+ reg = <0x7e205000 0x1000>;
+ interrupts = <2 21>;
+ clocks = <&clk_i2c>;
++ #address-cells = <1>;
++ #size-cells = <0>;
+ status = "disabled";
+ };
+
+@@ -93,6 +95,8 @@
+ reg = <0x7e804000 0x1000>;
+ interrupts = <2 21>;
+ clocks = <&clk_i2c>;
++ #address-cells = <1>;
++ #size-cells = <0>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
+index dc259e8b..9b186ac0 100644
+--- a/arch/arm/boot/dts/cros5250-common.dtsi
++++ b/arch/arm/boot/dts/cros5250-common.dtsi
+@@ -27,6 +27,13 @@
+ i2c2_bus: i2c2-bus {
+ samsung,pin-pud = <0>;
+ };
++
++ max77686_irq: max77686-irq {
++ samsung,pins = "gpx3-2";
++ samsung,pin-function = <0>;
++ samsung,pin-pud = <0>;
++ samsung,pin-drv = <0>;
++ };
+ };
+
+ i2c@12C60000 {
+@@ -35,6 +42,11 @@
+
+ max77686@09 {
+ compatible = "maxim,max77686";
++ interrupt-parent = <&gpx3>;
++ interrupts = <2 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&max77686_irq>;
++ wakeup-source;
+ reg = <0x09>;
+
+ voltage-regulators {
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 9cbe70c8..ec3e5cf4 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -192,6 +192,7 @@ __dabt_svc:
+ svc_entry
+ mov r2, sp
+ dabt_helper
++ THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
+ svc_exit r5 @ return from exception
+ UNWIND(.fnend )
+ ENDPROC(__dabt_svc)
+diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
+index 3b0a9538..e0fda04e 100644
+--- a/arch/arm/mach-at91/Makefile
++++ b/arch/arm/mach-at91/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for the linux kernel.
+ #
+
+-obj-y := irq.o gpio.o setup.o
++obj-y := irq.o gpio.o setup.o sysirq_mask.o
+ obj-m :=
+ obj-n :=
+ obj- :=
+diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
+index 5de6074b..ae10d14d 100644
+--- a/arch/arm/mach-at91/at91sam9260.c
++++ b/arch/arm/mach-at91/at91sam9260.c
+@@ -349,6 +349,8 @@ static void __init at91sam9260_initialize(void)
+ arm_pm_idle = at91sam9_idle;
+ arm_pm_restart = at91sam9_alt_restart;
+
++ at91_sysirq_mask_rtt(AT91SAM9260_BASE_RTT);
++
+ /* Register GPIO subsystem */
+ at91_gpio_init(at91sam9260_gpio, 3);
+ }
+diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
+index 0e079324..e761e74a 100644
+--- a/arch/arm/mach-at91/at91sam9261.c
++++ b/arch/arm/mach-at91/at91sam9261.c
+@@ -291,6 +291,8 @@ static void __init at91sam9261_initialize(void)
+ arm_pm_idle = at91sam9_idle;
+ arm_pm_restart = at91sam9_alt_restart;
+
++ at91_sysirq_mask_rtt(AT91SAM9261_BASE_RTT);
++
+ /* Register GPIO subsystem */
+ at91_gpio_init(at91sam9261_gpio, 3);
+ }
+diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
+index 6ce7d185..e6fed625 100644
+--- a/arch/arm/mach-at91/at91sam9263.c
++++ b/arch/arm/mach-at91/at91sam9263.c
+@@ -328,6 +328,9 @@ static void __init at91sam9263_initialize(void)
+ arm_pm_idle = at91sam9_idle;
+ arm_pm_restart = at91sam9_alt_restart;
+
++ at91_sysirq_mask_rtt(AT91SAM9263_BASE_RTT0);
++ at91_sysirq_mask_rtt(AT91SAM9263_BASE_RTT1);
++
+ /* Register GPIO subsystem */
+ at91_gpio_init(at91sam9263_gpio, 5);
+ }
+diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
+index 474ee04d..9f7a97c8 100644
+--- a/arch/arm/mach-at91/at91sam9g45.c
++++ b/arch/arm/mach-at91/at91sam9g45.c
+@@ -377,6 +377,9 @@ static void __init at91sam9g45_initialize(void)
+ arm_pm_idle = at91sam9_idle;
+ arm_pm_restart = at91sam9g45_restart;
+
++ at91_sysirq_mask_rtc(AT91SAM9G45_BASE_RTC);
++ at91_sysirq_mask_rtt(AT91SAM9G45_BASE_RTT);
++
+ /* Register GPIO subsystem */
+ at91_gpio_init(at91sam9g45_gpio, 5);
+ }
+diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c
+index c7d670d1..4d6001c3 100644
+--- a/arch/arm/mach-at91/at91sam9n12.c
++++ b/arch/arm/mach-at91/at91sam9n12.c
+@@ -223,7 +223,13 @@ static void __init at91sam9n12_map_io(void)
+ at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE);
+ }
+
++static void __init at91sam9n12_initialize(void)
++{
++ at91_sysirq_mask_rtc(AT91SAM9N12_BASE_RTC);
++}
++
+ AT91_SOC_START(at91sam9n12)
+ .map_io = at91sam9n12_map_io,
+ .register_clocks = at91sam9n12_register_clocks,
++ .init = at91sam9n12_initialize,
+ AT91_SOC_END
+diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
+index d4ec0d9a..301e1728 100644
+--- a/arch/arm/mach-at91/at91sam9rl.c
++++ b/arch/arm/mach-at91/at91sam9rl.c
+@@ -294,6 +294,9 @@ static void __init at91sam9rl_initialize(void)
+ arm_pm_idle = at91sam9_idle;
+ arm_pm_restart = at91sam9_alt_restart;
+
++ at91_sysirq_mask_rtc(AT91SAM9RL_BASE_RTC);
++ at91_sysirq_mask_rtt(AT91SAM9RL_BASE_RTT);
++
+ /* Register GPIO subsystem */
+ at91_gpio_init(at91sam9rl_gpio, 4);
+ }
+diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
+index 916e5a14..e8a2e075 100644
+--- a/arch/arm/mach-at91/at91sam9x5.c
++++ b/arch/arm/mach-at91/at91sam9x5.c
+@@ -322,6 +322,11 @@ static void __init at91sam9x5_map_io(void)
+ at91_init_sram(0, AT91SAM9X5_SRAM_BASE, AT91SAM9X5_SRAM_SIZE);
+ }
+
++static void __init at91sam9x5_initialize(void)
++{
++ at91_sysirq_mask_rtc(AT91SAM9X5_BASE_RTC);
++}
++
+ /* --------------------------------------------------------------------
+ * Interrupt initialization
+ * -------------------------------------------------------------------- */
+@@ -329,4 +334,5 @@ static void __init at91sam9x5_map_io(void)
+ AT91_SOC_START(at91sam9x5)
+ .map_io = at91sam9x5_map_io,
+ .register_clocks = at91sam9x5_register_clocks,
++ .init = at91sam9x5_initialize,
+ AT91_SOC_END
+diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
+index dc6e2f5f..26dee3ce 100644
+--- a/arch/arm/mach-at91/generic.h
++++ b/arch/arm/mach-at91/generic.h
+@@ -34,6 +34,8 @@ extern int __init at91_aic_of_init(struct device_node *node,
+ struct device_node *parent);
+ extern int __init at91_aic5_of_init(struct device_node *node,
+ struct device_node *parent);
++extern void __init at91_sysirq_mask_rtc(u32 rtc_base);
++extern void __init at91_sysirq_mask_rtt(u32 rtt_base);
+
+
+ /* Timer */
+diff --git a/arch/arm/mach-at91/include/mach/at91sam9n12.h b/arch/arm/mach-at91/include/mach/at91sam9n12.h
+index d374b87c..0151bcf6 100644
+--- a/arch/arm/mach-at91/include/mach/at91sam9n12.h
++++ b/arch/arm/mach-at91/include/mach/at91sam9n12.h
+@@ -49,6 +49,11 @@
+ #define AT91SAM9N12_BASE_USART3 0xf8028000
+
+ /*
++ * System Peripherals
++ */
++#define AT91SAM9N12_BASE_RTC 0xfffffeb0
++
++/*
+ * Internal Memory.
+ */
+ #define AT91SAM9N12_SRAM_BASE 0x00300000 /* Internal SRAM base address */
+diff --git a/arch/arm/mach-at91/include/mach/at91sam9x5.h b/arch/arm/mach-at91/include/mach/at91sam9x5.h
+index c75ee19b..2fc76c49 100644
+--- a/arch/arm/mach-at91/include/mach/at91sam9x5.h
++++ b/arch/arm/mach-at91/include/mach/at91sam9x5.h
+@@ -55,6 +55,11 @@
+ #define AT91SAM9X5_BASE_USART2 0xf8024000
+
+ /*
++ * System Peripherals
++ */
++#define AT91SAM9X5_BASE_RTC 0xfffffeb0
++
++/*
+ * Internal Memory.
+ */
+ #define AT91SAM9X5_SRAM_BASE 0x00300000 /* Internal SRAM base address */
+diff --git a/arch/arm/mach-at91/include/mach/sama5d3.h b/arch/arm/mach-at91/include/mach/sama5d3.h
+index 31096a8a..25613d8c 100644
+--- a/arch/arm/mach-at91/include/mach/sama5d3.h
++++ b/arch/arm/mach-at91/include/mach/sama5d3.h
+@@ -73,6 +73,11 @@
+ #define SAMA5D3_BASE_USART3 0xf8024000
+
+ /*
++ * System Peripherals
++ */
++#define SAMA5D3_BASE_RTC 0xfffffeb0
++
++/*
+ * Internal Memory
+ */
+ #define SAMA5D3_SRAM_BASE 0x00300000 /* Internal SRAM base address */
+diff --git a/arch/arm/mach-at91/sama5d3.c b/arch/arm/mach-at91/sama5d3.c
+index 40127971..3ea86428 100644
+--- a/arch/arm/mach-at91/sama5d3.c
++++ b/arch/arm/mach-at91/sama5d3.c
+@@ -371,7 +371,13 @@ static void __init sama5d3_map_io(void)
+ at91_init_sram(0, SAMA5D3_SRAM_BASE, SAMA5D3_SRAM_SIZE);
+ }
+
++static void __init sama5d3_initialize(void)
++{
++ at91_sysirq_mask_rtc(SAMA5D3_BASE_RTC);
++}
++
+ AT91_SOC_START(sama5d3)
+ .map_io = sama5d3_map_io,
+ .register_clocks = sama5d3_register_clocks,
++ .init = sama5d3_initialize,
+ AT91_SOC_END
+diff --git a/arch/arm/mach-at91/sysirq_mask.c b/arch/arm/mach-at91/sysirq_mask.c
+new file mode 100644
+index 00000000..2ba694f9
+--- /dev/null
++++ b/arch/arm/mach-at91/sysirq_mask.c
+@@ -0,0 +1,71 @@
++/*
++ * sysirq_mask.c - System-interrupt masking
++ *
++ * Copyright (C) 2013 Johan Hovold <jhovold@gmail.com>
++ *
++ * Functions to disable system interrupts from backup-powered peripherals.
++ *
++ * The RTC and RTT-peripherals are generally powered by backup power (VDDBU)
++ * and are not reset on wake-up, user, watchdog or software reset. This means
++ * that their interrupts may be enabled during early boot (e.g. after a user
++ * reset).
++ *
++ * As the RTC and RTT share the system-interrupt line with the PIT, an
++ * interrupt occurring before a handler has been installed would lead to the
++ * system interrupt being disabled and prevent the system from booting.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/io.h>
++#include <mach/at91_rtt.h>
++
++#include "generic.h"
++
++#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
++#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
++
++void __init at91_sysirq_mask_rtc(u32 rtc_base)
++{
++ void __iomem *base;
++ u32 mask;
++
++ base = ioremap(rtc_base, 64);
++ if (!base)
++ return;
++
++ mask = readl_relaxed(base + AT91_RTC_IMR);
++ if (mask) {
++ pr_info("AT91: Disabling rtc irq\n");
++ writel_relaxed(mask, base + AT91_RTC_IDR);
++ (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */
++ }
++
++ iounmap(base);
++}
++
++void __init at91_sysirq_mask_rtt(u32 rtt_base)
++{
++ void __iomem *base;
++ void __iomem *reg;
++ u32 mode;
++
++ base = ioremap(rtt_base, 16);
++ if (!base)
++ return;
++
++ reg = base + AT91_RTT_MR;
++
++ mode = readl_relaxed(reg);
++ if (mode & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)) {
++ pr_info("AT91: Disabling rtt irq\n");
++ mode &= ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
++ writel_relaxed(mode, reg);
++ (void)readl_relaxed(reg); /* flush */
++ }
++
++ iounmap(base);
++}
+diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
+index 9181a241..ef85ac49 100644
+--- a/arch/arm/mach-imx/clk-imx6q.c
++++ b/arch/arm/mach-imx/clk-imx6q.c
+@@ -428,7 +428,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
+ clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
+ clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
+- clk[can_root] = imx_clk_divider("can_root", "pll3_usb_otg", base + 0x20, 2, 6);
++ clk[can_root] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
+ clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
+ clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
+ clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
+diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
+index 8c60fcb0..2f4c9248 100644
+--- a/arch/arm/mach-integrator/integrator_cp.c
++++ b/arch/arm/mach-integrator/integrator_cp.c
+@@ -199,7 +199,8 @@ static struct mmci_platform_data mmc_data = {
+ static void cp_clcd_enable(struct clcd_fb *fb)
+ {
+ struct fb_var_screeninfo *var = &fb->fb.var;
+- u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2;
++ u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2
++ | CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1;
+
+ if (var->bits_per_pixel <= 8 ||
+ (var->bits_per_pixel == 16 && var->green.length == 5))
+diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
+index 3926f370..e022a869 100644
+--- a/arch/arm/mach-omap2/irq.c
++++ b/arch/arm/mach-omap2/irq.c
+@@ -233,7 +233,7 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
+ goto out;
+
+ irqnr = readl_relaxed(base_addr + 0xd8);
+-#ifdef CONFIG_SOC_TI81XX
++#if IS_ENABLED(CONFIG_SOC_TI81XX) || IS_ENABLED(CONFIG_SOC_AM33XX)
+ if (irqnr)
+ goto out;
+ irqnr = readl_relaxed(base_addr + 0xf8);
+diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
+index b69dd9ab..53f07358 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -621,6 +621,7 @@ static int _od_suspend_noirq(struct device *dev)
+
+ if (!ret && !pm_runtime_status_suspended(dev)) {
+ if (pm_generic_runtime_suspend(dev) == 0) {
++ pm_runtime_set_suspended(dev);
+ omap_device_idle(pdev);
+ od->flags |= OMAP_DEVICE_SUSPENDED;
+ }
+@@ -634,10 +635,18 @@ static int _od_resume_noirq(struct device *dev)
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_device *od = to_omap_device(pdev);
+
+- if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
+- !pm_runtime_status_suspended(dev)) {
++ if (od->flags & OMAP_DEVICE_SUSPENDED) {
+ od->flags &= ~OMAP_DEVICE_SUSPENDED;
+ omap_device_enable(pdev);
++ /*
++ * XXX: we run before core runtime pm has resumed itself. At
++ * this point in time, we just restore the runtime pm state and
++ * considering symmetric operations in resume, we donot expect
++ * to fail. If we failed, something changed in core runtime_pm
++ * framework OR some device driver messed things up, hence, WARN
++ */
++ WARN(pm_runtime_set_active(dev),
++ "Could not set %s runtime state active\n", dev_name(dev));
+ pm_generic_runtime_resume(dev);
+ }
+
+diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
+index e838ba27..c9808c68 100644
+--- a/arch/arm/mach-sa1100/assabet.c
++++ b/arch/arm/mach-sa1100/assabet.c
+@@ -512,6 +512,9 @@ static void __init assabet_map_io(void)
+ * Its called GPCLKR0 in my SA1110 manual.
+ */
+ Ser1SDCR0 |= SDCR0_SUS;
++ MSC1 = (MSC1 & ~0xffff) |
++ MSC_NonBrst | MSC_32BitStMem |
++ MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
+
+ if (!machine_has_neponset())
+ sa1100_register_uart_fns(&assabet_port_fns);
+diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
+index 9d285626..312e15e6 100644
+--- a/arch/arm/mm/extable.c
++++ b/arch/arm/mm/extable.c
+@@ -9,8 +9,13 @@ int fixup_exception(struct pt_regs *regs)
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+- if (fixup)
++ if (fixup) {
+ regs->ARM_pc = fixup->fixup;
++#ifdef CONFIG_THUMB2_KERNEL
++ /* Clear the IT state to avoid nasty surprises in the fixup */
++ regs->ARM_cpsr &= ~PSR_IT_MASK;
++#endif
++ }
+
+ return fixup != NULL;
+ }
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index f0bebc5e..0b27b657 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -25,10 +25,11 @@
+ * Software defined PTE bits definition.
+ */
+ #define PTE_VALID (_AT(pteval_t, 1) << 0)
+-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */
+-#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
++#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
+ #define PTE_DIRTY (_AT(pteval_t, 1) << 55)
+ #define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
++ /* bit 57 for PMD_SECT_SPLITTING */
++#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
+
+ /*
+ * VMALLOC and SPARSEMEM_VMEMMAP ranges.
+@@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+
+ /*
+ * Encode and decode a swap entry:
+- * bits 0, 2: present (must both be zero)
+- * bit 3: PTE_FILE
+- * bits 4-8: swap type
+- * bits 9-63: swap offset
++ * bits 0-1: present (must be zero)
++ * bit 2: PTE_FILE
++ * bits 3-8: swap type
++ * bits 9-57: swap offset
+ */
+-#define __SWP_TYPE_SHIFT 4
++#define __SWP_TYPE_SHIFT 3
+ #define __SWP_TYPE_BITS 6
++#define __SWP_OFFSET_BITS 49
+ #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+ #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
++#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
+
+ #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+-#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
++#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
+ #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+@@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+
+ /*
+ * Encode and decode a file entry:
+- * bits 0, 2: present (must both be zero)
+- * bit 3: PTE_FILE
+- * bits 4-63: file offset / PAGE_SIZE
++ * bits 0-1: present (must be zero)
++ * bit 2: PTE_FILE
++ * bits 3-57: file offset / PAGE_SIZE
+ */
+ #define pte_file(pte) (pte_val(pte) & PTE_FILE)
+-#define pte_to_pgoff(x) (pte_val(x) >> 4)
+-#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
++#define pte_to_pgoff(x) (pte_val(x) >> 3)
++#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
+
+-#define PTE_FILE_MAX_BITS 60
++#define PTE_FILE_MAX_BITS 55
+
+ extern int kern_addr_valid(unsigned long addr);
+
+diff --git a/arch/avr32/boot/u-boot/head.S b/arch/avr32/boot/u-boot/head.S
+index 4488fa27..2ffc298f 100644
+--- a/arch/avr32/boot/u-boot/head.S
++++ b/arch/avr32/boot/u-boot/head.S
+@@ -8,6 +8,8 @@
+ * published by the Free Software Foundation.
+ */
+ #include <asm/setup.h>
++#include <asm/thread_info.h>
++#include <asm/sysreg.h>
+
+ /*
+ * The kernel is loaded where we want it to be and all caches
+@@ -20,11 +22,6 @@
+ .section .init.text,"ax"
+ .global _start
+ _start:
+- /* Check if the boot loader actually provided a tag table */
+- lddpc r0, magic_number
+- cp.w r12, r0
+- brne no_tag_table
+-
+ /* Initialize .bss */
+ lddpc r2, bss_start_addr
+ lddpc r3, end_addr
+@@ -34,6 +31,25 @@ _start:
+ cp r2, r3
+ brlo 1b
+
++ /* Initialize status register */
++ lddpc r0, init_sr
++ mtsr SYSREG_SR, r0
++
++ /* Set initial stack pointer */
++ lddpc sp, stack_addr
++ sub sp, -THREAD_SIZE
++
++#ifdef CONFIG_FRAME_POINTER
++ /* Mark last stack frame */
++ mov lr, 0
++ mov r7, 0
++#endif
++
++ /* Check if the boot loader actually provided a tag table */
++ lddpc r0, magic_number
++ cp.w r12, r0
++ brne no_tag_table
++
+ /*
+ * Save the tag table address for later use. This must be done
+ * _after_ .bss has been initialized...
+@@ -53,8 +69,15 @@ bss_start_addr:
+ .long __bss_start
+ end_addr:
+ .long _end
++init_sr:
++ .long 0x007f0000 /* Supervisor mode, everything masked */
++stack_addr:
++ .long init_thread_union
++panic_addr:
++ .long panic
+
+ no_tag_table:
+ sub r12, pc, (. - 2f)
+- bral panic
++ /* branch to panic() which can be far away with that construct */
++ lddpc pc, panic_addr
+ 2: .asciz "Boot loader didn't provide correct magic number\n"
+diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
+index 9899d3cc..7301f480 100644
+--- a/arch/avr32/kernel/entry-avr32b.S
++++ b/arch/avr32/kernel/entry-avr32b.S
+@@ -401,9 +401,10 @@ handle_critical:
+ /* We should never get here... */
+ bad_return:
+ sub r12, pc, (. - 1f)
+- bral panic
++ lddpc pc, 2f
+ .align 2
+ 1: .asciz "Return from critical exception!"
++2: .long panic
+
+ .align 1
+ do_bus_error_write:
+diff --git a/arch/avr32/kernel/head.S b/arch/avr32/kernel/head.S
+index 6163bd0a..59eae6df 100644
+--- a/arch/avr32/kernel/head.S
++++ b/arch/avr32/kernel/head.S
+@@ -10,33 +10,13 @@
+ #include <linux/linkage.h>
+
+ #include <asm/page.h>
+-#include <asm/thread_info.h>
+-#include <asm/sysreg.h>
+
+ .section .init.text,"ax"
+ .global kernel_entry
+ kernel_entry:
+- /* Initialize status register */
+- lddpc r0, init_sr
+- mtsr SYSREG_SR, r0
+-
+- /* Set initial stack pointer */
+- lddpc sp, stack_addr
+- sub sp, -THREAD_SIZE
+-
+-#ifdef CONFIG_FRAME_POINTER
+- /* Mark last stack frame */
+- mov lr, 0
+- mov r7, 0
+-#endif
+-
+ /* Start the show */
+ lddpc pc, kernel_start_addr
+
+ .align 2
+-init_sr:
+- .long 0x007f0000 /* Supervisor mode, everything masked */
+-stack_addr:
+- .long init_thread_union
+ kernel_start_addr:
+ .long start_kernel
+diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
+new file mode 100644
+index 00000000..748016cb
+--- /dev/null
++++ b/arch/parisc/include/asm/socket.h
+@@ -0,0 +1,11 @@
++#ifndef _ASM_SOCKET_H
++#define _ASM_SOCKET_H
++
++#include <uapi/asm/socket.h>
++
++/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
++ * have to define SOCK_NONBLOCK to a different value here.
++ */
++#define SOCK_NONBLOCK 0x40000000
++
++#endif /* _ASM_SOCKET_H */
+diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
+index 71700e63..9f2174f4 100644
+--- a/arch/parisc/include/uapi/asm/socket.h
++++ b/arch/parisc/include/uapi/asm/socket.h
+@@ -1,5 +1,5 @@
+-#ifndef _ASM_SOCKET_H
+-#define _ASM_SOCKET_H
++#ifndef _UAPI_ASM_SOCKET_H
++#define _UAPI_ASM_SOCKET_H
+
+ #include <asm/sockios.h>
+
+@@ -75,9 +75,4 @@
+
+ #define SO_BUSY_POLL 0x4027
+
+-/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
+- * have to define SOCK_NONBLOCK to a different value here.
+- */
+-#define SOCK_NONBLOCK 0x40000000
+-
+-#endif /* _ASM_SOCKET_H */
++#endif /* _UAPI_ASM_SOCKET_H */
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 36d49e6b..fea2dba1 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+ #endif /* CONFIG_ALTIVEC */
+ if (copy_fpr_to_user(&frame->mc_fregs, current))
+ return 1;
++
++ /*
++ * Clear the MSR VSX bit to indicate there is no valid state attached
++ * to this context, except in the specific case below where we set it.
++ */
++ msr &= ~MSR_VSX;
+ #ifdef CONFIG_VSX
+ /*
+ * Copy VSR 0-31 upper half from thread_struct to local
+@@ -457,15 +463,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+ if (copy_vsx_to_user(&frame->mc_vsregs, current))
+ return 1;
+ msr |= MSR_VSX;
+- } else if (!ctx_has_vsx_region)
+- /*
+- * With a small context structure we can't hold the VSX
+- * registers, hence clear the MSR value to indicate the state
+- * was not saved.
+- */
+- msr &= ~MSR_VSX;
+-
+-
++ }
+ #endif /* CONFIG_VSX */
+ #ifdef CONFIG_SPE
+ /* save spe registers */
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index f93ec283..1e7ba881 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -121,6 +121,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ flush_fp_to_thread(current);
+ /* copy fpr regs and fpscr */
+ err |= copy_fpr_to_user(&sc->fp_regs, current);
++
++ /*
++ * Clear the MSR VSX bit to indicate there is no valid state attached
++ * to this context, except in the specific case below where we set it.
++ */
++ msr &= ~MSR_VSX;
+ #ifdef CONFIG_VSX
+ /*
+ * Copy VSX low doubleword to local buffer for formatting,
+diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
+index 1694d738..26588fdc 100644
+--- a/arch/s390/lib/uaccess_pt.c
++++ b/arch/s390/lib/uaccess_pt.c
+@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
+ * contains the (negative) exception code.
+ */
+ #ifdef CONFIG_64BIT
++
+ static unsigned long follow_table(struct mm_struct *mm,
+ unsigned long address, int write)
+ {
+ unsigned long *table = (unsigned long *)__pa(mm->pgd);
+
++ if (unlikely(address > mm->context.asce_limit - 1))
++ return -0x38UL;
+ switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ table = table + ((address >> 53) & 0x7ff);
+diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
+index 1fa8be40..122f737a 100644
+--- a/arch/sh/boards/mach-ecovec24/setup.c
++++ b/arch/sh/boards/mach-ecovec24/setup.c
+@@ -15,6 +15,7 @@
+ #include <linux/mmc/sh_mmcif.h>
+ #include <linux/mmc/sh_mobile_sdhi.h>
+ #include <linux/mtd/physmap.h>
++#include <linux/mfd/tmio.h>
+ #include <linux/gpio.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 5e00b5a5..0c611d89 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -645,10 +645,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+ if (blkcg_init_queue(q))
+- goto fail_id;
++ goto fail_bdi;
+
+ return q;
+
++fail_bdi:
++ bdi_destroy(&q->backing_dev_info);
+ fail_id:
+ ida_simple_remove(&blk_queue_ida, q->id);
+ fail_q:
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 06007f0e..52222a2f 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -106,7 +106,6 @@ error_no_sig:
+ static int x509_key_preparse(struct key_preparsed_payload *prep)
+ {
+ struct x509_certificate *cert;
+- struct tm now;
+ size_t srlen, sulen;
+ char *desc = NULL;
+ int ret;
+@@ -137,43 +136,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
+ goto error_free_cert;
+ }
+
+- time_to_tm(CURRENT_TIME.tv_sec, 0, &now);
+- pr_devel("Now: %04ld-%02d-%02d %02d:%02d:%02d\n",
+- now.tm_year + 1900, now.tm_mon + 1, now.tm_mday,
+- now.tm_hour, now.tm_min, now.tm_sec);
+- if (now.tm_year < cert->valid_from.tm_year ||
+- (now.tm_year == cert->valid_from.tm_year &&
+- (now.tm_mon < cert->valid_from.tm_mon ||
+- (now.tm_mon == cert->valid_from.tm_mon &&
+- (now.tm_mday < cert->valid_from.tm_mday ||
+- (now.tm_mday == cert->valid_from.tm_mday &&
+- (now.tm_hour < cert->valid_from.tm_hour ||
+- (now.tm_hour == cert->valid_from.tm_hour &&
+- (now.tm_min < cert->valid_from.tm_min ||
+- (now.tm_min == cert->valid_from.tm_min &&
+- (now.tm_sec < cert->valid_from.tm_sec
+- ))))))))))) {
+- pr_warn("Cert %s is not yet valid\n", cert->fingerprint);
+- ret = -EKEYREJECTED;
+- goto error_free_cert;
+- }
+- if (now.tm_year > cert->valid_to.tm_year ||
+- (now.tm_year == cert->valid_to.tm_year &&
+- (now.tm_mon > cert->valid_to.tm_mon ||
+- (now.tm_mon == cert->valid_to.tm_mon &&
+- (now.tm_mday > cert->valid_to.tm_mday ||
+- (now.tm_mday == cert->valid_to.tm_mday &&
+- (now.tm_hour > cert->valid_to.tm_hour ||
+- (now.tm_hour == cert->valid_to.tm_hour &&
+- (now.tm_min > cert->valid_to.tm_min ||
+- (now.tm_min == cert->valid_to.tm_min &&
+- (now.tm_sec > cert->valid_to.tm_sec
+- ))))))))))) {
+- pr_warn("Cert %s has expired\n", cert->fingerprint);
+- ret = -EKEYEXPIRED;
+- goto error_free_cert;
+- }
+-
+ cert->pub->algo = x509_public_key_algorithms[cert->pkey_algo];
+ cert->pub->id_type = PKEY_ID_X509;
+
+diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
+index d7e53ea5..79de26ed 100644
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -63,6 +63,9 @@ static struct acpi_scan_handler pci_root_handler = {
+ .ids = root_device_ids,
+ .attach = acpi_pci_root_add,
+ .detach = acpi_pci_root_remove,
++ .hotplug = {
++ .ignore = true,
++ },
+ };
+
+ static DEFINE_MUTEX(osc_lock);
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 3601738e..d047771c 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1812,7 +1812,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
+ */
+ list_for_each_entry(hwid, &pnp.ids, list) {
+ handler = acpi_scan_match_handler(hwid->id, NULL);
+- if (handler) {
++ if (handler && !handler->hotplug.ignore) {
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ acpi_hotplug_notify_cb, handler);
+ break;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 8e28f923..14f1e950 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -292,6 +292,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -431,6 +435,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
++ .driver_data = board_ahci_yes_fbs },
+
+ /* Promise */
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index aaac4fb0..cfb74470 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1275,9 +1275,11 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ {
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
++ struct ahci_port_priv *pp = ap->private_data;
+ const char *reason = NULL;
+ unsigned long now, msecs;
+ struct ata_taskfile tf;
++ bool fbs_disabled = false;
+ int rc;
+
+ DPRINTK("ENTER\n");
+@@ -1287,6 +1289,16 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
+
++ /*
++ * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
++ * clear PxFBS.EN to '0' prior to issuing software reset to devices
++ * that is attached to port multiplier.
++ */
++ if (!ata_is_host_link(link) && pp->fbs_enabled) {
++ ahci_disable_fbs(ap);
++ fbs_disabled = true;
++ }
++
+ ata_tf_init(link->device, &tf);
+
+ /* issue the first D2H Register FIS */
+@@ -1327,6 +1339,10 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ } else
+ *class = ahci_dev_classify(ap);
+
++ /* re-enable FBS if disabled before */
++ if (fbs_disabled)
++ ahci_enable_fbs(ap);
++
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 83b1a9fb..81a94a39 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4126,6 +4126,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
+ { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
++ { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+
+ /* Devices we expect to fail diagnostics */
+
+diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
+index 150a917f..e37413228 100644
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -321,25 +321,25 @@ int ata_tport_add(struct device *parent,
+ /*
+ * ATA link attributes
+ */
++static int noop(int x) { return x; }
+
+-
+-#define ata_link_show_linkspeed(field) \
++#define ata_link_show_linkspeed(field, format) \
+ static ssize_t \
+ show_ata_link_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct ata_link *link = transport_class_to_link(dev); \
+ \
+- return sprintf(buf,"%s\n", sata_spd_string(fls(link->field))); \
++ return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
+ }
+
+-#define ata_link_linkspeed_attr(field) \
+- ata_link_show_linkspeed(field) \
++#define ata_link_linkspeed_attr(field, format) \
++ ata_link_show_linkspeed(field, format) \
+ static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
+
+-ata_link_linkspeed_attr(hw_sata_spd_limit);
+-ata_link_linkspeed_attr(sata_spd_limit);
+-ata_link_linkspeed_attr(sata_spd);
++ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
++ata_link_linkspeed_attr(sata_spd_limit, fls);
++ata_link_linkspeed_attr(sata_spd, noop);
+
+
+ static DECLARE_TRANSPORT_CLASS(ata_link_class,
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 35fa3689..06051767 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -499,7 +499,7 @@ static void __device_release_driver(struct device *dev)
+ BUS_NOTIFY_UNBIND_DRIVER,
+ dev);
+
+- pm_runtime_put(dev);
++ pm_runtime_put_sync(dev);
+
+ if (dev->bus && dev->bus->remove)
+ dev->bus->remove(dev);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 2f036ca4..c8dac730 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -894,13 +894,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+
+ bio_list_init(&lo->lo_bio_list);
+
+- /*
+- * set queue make_request_fn, and add limits based on lower level
+- * device
+- */
+- blk_queue_make_request(lo->lo_queue, loop_make_request);
+- lo->lo_queue->queuedata = lo;
+-
+ if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
+ blk_queue_flush(lo->lo_queue, REQ_FLUSH);
+
+@@ -1618,6 +1611,8 @@ static int loop_add(struct loop_device **l, int i)
+ if (!lo)
+ goto out;
+
++ lo->lo_state = Lo_unbound;
++
+ /* allocate id, if @id >= 0, we're requesting that specific id */
+ if (i >= 0) {
+ err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
+@@ -1633,7 +1628,13 @@ static int loop_add(struct loop_device **l, int i)
+ err = -ENOMEM;
+ lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
+ if (!lo->lo_queue)
+- goto out_free_dev;
++ goto out_free_idr;
++
++ /*
++ * set queue make_request_fn
++ */
++ blk_queue_make_request(lo->lo_queue, loop_make_request);
++ lo->lo_queue->queuedata = lo;
+
+ disk = lo->lo_disk = alloc_disk(1 << part_shift);
+ if (!disk)
+@@ -1678,6 +1679,8 @@ static int loop_add(struct loop_device **l, int i)
+
+ out_free_queue:
+ blk_cleanup_queue(lo->lo_queue);
++out_free_idr:
++ idr_remove(&loop_index_idr, i);
+ out_free_dev:
+ kfree(lo);
+ out:
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index bf4b9d28..6620b73d 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -887,6 +887,8 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
+ unsigned long secure;
+ struct phys_req preq;
+
++ xen_blkif_get(blkif);
++
+ preq.sector_number = req->u.discard.sector_number;
+ preq.nr_sects = req->u.discard.nr_sectors;
+
+@@ -899,7 +901,6 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
+ }
+ blkif->st_ds_req++;
+
+- xen_blkif_get(blkif);
+ secure = (blkif->vbd.discard_secure &&
+ (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
+ BLKDEV_DISCARD_SECURE : 0;
+diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
+index 794123fc..bf8902a0 100644
+--- a/drivers/cpufreq/highbank-cpufreq.c
++++ b/drivers/cpufreq/highbank-cpufreq.c
+@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void)
+ struct device_node *np;
+ int ret;
+
+- if (!of_machine_is_compatible("calxeda,highbank"))
++ if ((!of_machine_is_compatible("calxeda,highbank")) &&
++ (!of_machine_is_compatible("calxeda,ecx-2000")))
+ return -ENODEV;
+
+ cpu_dev = get_cpu_device(0);
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index d8ececaf..3efc4dcf 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -87,13 +87,6 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+ static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6 };
+
+-/*
+- * technically sources 1 and 2 do not require SED, but the op will have
+- * at least 9 descriptors so that's irrelevant.
+- */
+-static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+- 1, 1, 1, 1, 1, 1, 1 };
+-
+ static void ioat3_eh(struct ioat2_dma_chan *ioat);
+
+ static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+@@ -135,12 +128,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2],
+ pq->coef[idx] = coef;
+ }
+
+-static int sed_get_pq16_pool_idx(int src_cnt)
+-{
+-
+- return pq16_idx_to_sed[src_cnt];
+-}
+-
+ static bool is_jf_ioat(struct pci_dev *pdev)
+ {
+ switch (pdev->device) {
+@@ -1129,9 +1116,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+ u8 op;
+ int i, s, idx, num_descs;
+
+- /* this function only handles src_cnt 9 - 16 */
+- BUG_ON(src_cnt < 9);
+-
+ /* this function is only called with 9-16 sources */
+ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+@@ -1159,8 +1143,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+
+- desc->sed = ioat3_alloc_sed(device,
+- sed_get_pq16_pool_idx(src_cnt));
++ desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
+ if (!desc->sed) {
+ dev_err(to_dev(chan),
+ "%s: no free sed entries\n", __func__);
+@@ -1218,13 +1201,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+ return &desc->txd;
+ }
+
++static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
++{
++ if (dmaf_p_disabled_continue(flags))
++ return src_cnt + 1;
++ else if (dmaf_continue(flags))
++ return src_cnt + 3;
++ else
++ return src_cnt;
++}
++
+ static struct dma_async_tx_descriptor *
+ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+ {
+- struct dma_device *dma = chan->device;
+-
+ /* specify valid address for disabled result */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ dst[0] = dst[1];
+@@ -1244,7 +1235,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ single_source_coef[0] = scf[0];
+ single_source_coef[1] = 0;
+
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
+ 2, single_source_coef, len,
+ flags) :
+@@ -1252,7 +1243,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ single_source_coef, len, flags);
+
+ } else {
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+@@ -1265,8 +1256,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags)
+ {
+- struct dma_device *dma = chan->device;
+-
+ /* specify valid address for disabled result */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ pq[0] = pq[1];
+@@ -1278,7 +1267,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ */
+ *pqres = 0;
+
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+@@ -1289,7 +1278,6 @@ static struct dma_async_tx_descriptor *
+ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+ {
+- struct dma_device *dma = chan->device;
+ unsigned char scf[src_cnt];
+ dma_addr_t pq[2];
+
+@@ -1298,7 +1286,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[1] = dst; /* specify valid address for disabled result */
+
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+@@ -1310,7 +1298,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+ unsigned int src_cnt, size_t len,
+ enum sum_check_flags *result, unsigned long flags)
+ {
+- struct dma_device *dma = chan->device;
+ unsigned char scf[src_cnt];
+ dma_addr_t pq[2];
+
+@@ -1324,8 +1311,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[1] = pq[0]; /* specify valid address for disabled result */
+
+-
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+@@ -1545,6 +1531,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
+ goto free_resources;
+ }
+
++ memset(page_address(dest), 0, PAGE_SIZE);
++
+ /* test for non-zero parity sum */
+ op = IOAT_OP_XOR_VAL;
+
+diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
+index c2bd8c6a..10d3d298 100644
+--- a/drivers/edac/highbank_l2_edac.c
++++ b/drivers/edac/highbank_l2_edac.c
+@@ -90,28 +90,30 @@ static int highbank_l2_err_probe(struct platform_device *pdev)
+ goto err;
+ }
+
++ dci->mod_name = dev_name(&pdev->dev);
++ dci->dev_name = dev_name(&pdev->dev);
++
++ if (edac_device_add_device(dci))
++ goto err;
++
+ drvdata->db_irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, drvdata->db_irq,
+ highbank_l2_err_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res < 0)
+- goto err;
++ goto err2;
+
+ drvdata->sb_irq = platform_get_irq(pdev, 1);
+ res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
+ highbank_l2_err_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res < 0)
+- goto err;
+-
+- dci->mod_name = dev_name(&pdev->dev);
+- dci->dev_name = dev_name(&pdev->dev);
+-
+- if (edac_device_add_device(dci))
+- goto err;
++ goto err2;
+
+ devres_close_group(&pdev->dev, NULL);
+ return 0;
++err2:
++ edac_device_del_device(&pdev->dev);
+ err:
+ devres_release_group(&pdev->dev, NULL);
+ edac_device_free_ctl_info(dci);
+diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
+index 4695dd2d..7a783075 100644
+--- a/drivers/edac/highbank_mc_edac.c
++++ b/drivers/edac/highbank_mc_edac.c
+@@ -189,14 +189,6 @@ static int highbank_mc_probe(struct platform_device *pdev)
+ goto err;
+ }
+
+- irq = platform_get_irq(pdev, 0);
+- res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
+- 0, dev_name(&pdev->dev), mci);
+- if (res < 0) {
+- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+- goto err;
+- }
+-
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+@@ -217,10 +209,20 @@ static int highbank_mc_probe(struct platform_device *pdev)
+ if (res < 0)
+ goto err;
+
++ irq = platform_get_irq(pdev, 0);
++ res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
++ 0, dev_name(&pdev->dev), mci);
++ if (res < 0) {
++ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
++ goto err2;
++ }
++
+ highbank_mc_create_debugfs_nodes(mci);
+
+ devres_close_group(&pdev->dev, NULL);
+ return 0;
++err2:
++ edac_mc_del_mc(&pdev->dev);
+ err:
+ devres_release_group(&pdev->dev, NULL);
+ edac_mc_free(mci);
+diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
+index f7a0cc4d..7b373009 100644
+--- a/drivers/gpio/gpio-msm-v2.c
++++ b/drivers/gpio/gpio-msm-v2.c
+@@ -102,7 +102,7 @@ struct msm_gpio_dev {
+ DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
+ struct irq_domain *domain;
+- unsigned int summary_irq;
++ int summary_irq;
+ void __iomem *msm_tlmm_base;
+ };
+
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 3c3321f9..db312904 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -79,7 +79,7 @@ struct mvebu_gpio_chip {
+ spinlock_t lock;
+ void __iomem *membase;
+ void __iomem *percpu_membase;
+- unsigned int irqbase;
++ int irqbase;
+ struct irq_domain *domain;
+ int soc_variant;
+ };
+diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
+index 4274e2e7..e925ca2e 100644
+--- a/drivers/gpio/gpio-pl061.c
++++ b/drivers/gpio/gpio-pl061.c
+@@ -286,11 +286,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
+ if (!chip->base)
+ return -ENOMEM;
+
+- chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
+- irq_base, &pl061_domain_ops, chip);
+- if (!chip->domain)
+- return -ENODEV;
+-
+ spin_lock_init(&chip->lock);
+
+ chip->gc.request = pl061_gpio_request;
+@@ -320,6 +315,11 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
+ irq_set_chained_handler(irq, pl061_irq_handler);
+ irq_set_handler_data(irq, chip);
+
++ chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
++ irq_base, &pl061_domain_ops, chip);
++ if (!chip->domain)
++ return -ENODEV;
++
+ for (i = 0; i < PL061_GPIO_NR; i++) {
+ if (pdata) {
+ if (pdata->directions & (1 << i))
+diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
+index 6038966a..8f64b670 100644
+--- a/drivers/gpio/gpio-rcar.c
++++ b/drivers/gpio/gpio-rcar.c
+@@ -380,7 +380,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
+ if (!p->irq_domain) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "cannot initialize irq domain\n");
+- goto err1;
++ goto err0;
+ }
+
+ if (devm_request_irq(&pdev->dev, irq->start,
+diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
+index d8e4f6ef..db2de1a2 100644
+--- a/drivers/gpio/gpio-twl4030.c
++++ b/drivers/gpio/gpio-twl4030.c
+@@ -354,17 +354,18 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+ static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+ {
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
++ int ret = -EINVAL;
+
+ mutex_lock(&priv->mutex);
+ if (offset < TWL4030_GPIO_MAX)
+- twl4030_set_gpio_dataout(offset, value);
++ ret = twl4030_set_gpio_direction(offset, 0);
+
+ priv->direction |= BIT(offset);
+ mutex_unlock(&priv->mutex);
+
+ twl_set(chip, offset, value);
+
+- return 0;
++ return ret;
+ }
+
+ static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index b53fff84..a069b5e2 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -1285,6 +1285,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ default:
+ break;
+ }
++
++ if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
++ pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
++ /*
++ * This is a big fat ugly hack.
++ *
++ * Some machines in UEFI boot mode provide us a VBT that has 18
++ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
++ * unknown we fail to light up. Yet the same BIOS boots up with
++ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
++ * max, not what it tells us to use.
++ *
++ * Note: This will still be broken if the eDP panel is not lit
++ * up by the BIOS, and thus we can't get the mode at module
++ * load.
++ */
++ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
++ pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
++ dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
++ }
+ }
+
+ static void intel_ddi_destroy(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index d78d33f9..333aa1bc 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -6770,7 +6770,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
++ POSTING_READ(CURCNTR(pipe));
+ I915_WRITE(CURBASE(pipe), base);
++ POSTING_READ(CURBASE(pipe));
+ }
+
+ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+@@ -6799,7 +6801,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
++ POSTING_READ(CURCNTR_IVB(pipe));
+ I915_WRITE(CURBASE_IVB(pipe), base);
++ POSTING_READ(CURBASE_IVB(pipe));
+ }
+
+ /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
+index 7fa7df54..922cacc1 100644
+--- a/drivers/gpu/drm/i915/intel_dvo.c
++++ b/drivers/gpu/drm/i915/intel_dvo.c
+@@ -171,11 +171,16 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
+ {
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
++ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
++ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
++ &crtc->config.requested_mode,
++ &crtc->config.adjusted_mode);
++
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+ }
+
+@@ -184,6 +189,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+ {
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_crtc *crtc;
++ struct intel_crtc_config *config;
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+@@ -204,10 +210,16 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+ /* We call connector dpms manually below in case pipe dpms doesn't
+ * change due to cloning. */
+ if (mode == DRM_MODE_DPMS_ON) {
++ config = &to_intel_crtc(crtc)->config;
++
+ intel_dvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
++ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
++ &config->requested_mode,
++ &config->adjusted_mode);
++
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+ } else {
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+@@ -299,10 +311,6 @@ static void intel_dvo_mode_set(struct intel_encoder *encoder)
+ break;
+ }
+
+- intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+- &crtc->config.requested_mode,
+- adjusted_mode);
+-
+ /* Save the data order, since I don't know what it should be set to. */
+ dvo_val = I915_READ(dvo_reg) &
+ (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
+index 8649f1c3..ee7d6491 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -204,6 +204,19 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+ gen6_gt_check_fifodbg(dev_priv);
+ }
+
++static void intel_uncore_forcewake_reset(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ if (IS_VALLEYVIEW(dev)) {
++ vlv_force_wake_reset(dev_priv);
++ } else if (INTEL_INFO(dev)->gen >= 6) {
++ __gen6_gt_force_wake_reset(dev_priv);
++ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
++ __gen6_gt_force_wake_mt_reset(dev_priv);
++ }
++}
++
+ void intel_uncore_early_sanitize(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -259,19 +272,8 @@ void intel_uncore_init(struct drm_device *dev)
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+-}
+
+-static void intel_uncore_forcewake_reset(struct drm_device *dev)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+-
+- if (IS_VALLEYVIEW(dev)) {
+- vlv_force_wake_reset(dev_priv);
+- } else if (INTEL_INFO(dev)->gen >= 6) {
+- __gen6_gt_force_wake_reset(dev_priv);
+- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+- __gen6_gt_force_wake_mt_reset(dev_priv);
+- }
++ intel_uncore_forcewake_reset(dev);
+ }
+
+ void intel_uncore_sanitize(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index f32b7123..d2dfdf76 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -317,7 +317,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
+ list_for_each_safe(entry, tmp, list) {
+ nvbo = list_entry(entry, struct nouveau_bo, entry);
+
+- nouveau_bo_fence(nvbo, fence);
++ if (likely(fence))
++ nouveau_bo_fence(nvbo, fence);
+
+ if (unlikely(nvbo->validate_mapped)) {
+ ttm_bo_kunmap(&nvbo->kmap);
+diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
+index 88722f23..f437b30c 100644
+--- a/drivers/gpu/drm/qxl/qxl_fb.c
++++ b/drivers/gpu/drm/qxl/qxl_fb.c
+@@ -108,7 +108,7 @@ static void qxl_fb_dirty_flush(struct fb_info *info)
+ u32 x1, x2, y1, y2;
+
+ /* TODO: hard coding 32 bpp */
+- int stride = qfbdev->qfb.base.pitches[0] * 4;
++ int stride = qfbdev->qfb.base.pitches[0];
+
+ x1 = qfbdev->dirty.x1;
+ x2 = qfbdev->dirty.x2;
+diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
+index 0109a964..821ab7b9 100644
+--- a/drivers/gpu/drm/qxl/qxl_release.c
++++ b/drivers/gpu/drm/qxl/qxl_release.c
+@@ -92,6 +92,7 @@ qxl_release_free(struct qxl_device *qdev,
+ - DRM_FILE_OFFSET);
+ qxl_fence_remove_release(&bo->fence, release->id);
+ qxl_bo_unref(&bo);
++ kfree(entry);
+ }
+ spin_lock(&qdev->release_idr_lock);
+ idr_remove(&qdev->release_idr, release->id);
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index bf87f6d4..86d9ee08 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1753,7 +1753,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+- } else {
++ } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
+ /* use the same PPLL for all monitors with the same clock */
+ pll = radeon_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
+index deaf98cd..0652ee0a 100644
+--- a/drivers/gpu/drm/radeon/atombios_i2c.c
++++ b/drivers/gpu/drm/radeon/atombios_i2c.c
+@@ -56,8 +56,10 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
+ return -EINVAL;
+ }
+ args.ucRegIndex = buf[0];
+- if (num > 1)
+- memcpy(&out, &buf[1], num - 1);
++ if (num > 1) {
++ num--;
++ memcpy(&out, &buf[1], num);
++ }
+ args.lpI2CDataOut = cpu_to_le16(out);
+ } else {
+ if (num > ATOM_MAX_HW_I2C_READ) {
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
+index 8f7e0453..5720e66d 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -2019,6 +2019,8 @@ static struct radeon_asic ci_asic = {
+ .bandwidth_update = &dce8_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
++ .set_backlight_level = &atombios_set_backlight_level,
++ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
+ },
+@@ -2119,6 +2121,8 @@ static struct radeon_asic kv_asic = {
+ .bandwidth_update = &dce8_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
++ .set_backlight_level = &atombios_set_backlight_level,
++ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
+ },
+diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
+index b990b1a2..3b1de72b 100644
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -1156,6 +1156,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ return -ENOMEM;
+
+ r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
++ if (r)
++ return r;
+ ib.length_dw = 0;
+
+ r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index 7cb178a3..bc73021d 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
++retry:
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
++
++ /* On old GPU like RN50 with little vram pining can fails because
++ * current fb is taking all space needed. So instead of unpining
++ * the old buffer after pining the new one, first unpin old one
++ * and then retry pining new one.
++ *
++ * As only master can set mode only master can pin and it is
++ * unlikely the master client will race with itself especialy
++ * on those old gpu with single crtc.
++ *
++ * We don't shutdown the display controller because new buffer
++ * will end up in same spot.
++ */
++ if (!atomic && fb && fb != crtc->fb) {
++ struct radeon_bo *old_rbo;
++ unsigned long nsize, osize;
++
++ old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
++ osize = radeon_bo_size(old_rbo);
++ nsize = radeon_bo_size(rbo);
++ if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
++ radeon_bo_unpin(old_rbo);
++ radeon_bo_unreserve(old_rbo);
++ fb = NULL;
++ goto retry;
++ }
++ }
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index 308eff5b..ab0a1724 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -240,6 +240,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+ if (handle != 0 && rdev->uvd.filp[i] == filp) {
+ struct radeon_fence *fence;
+
++ radeon_uvd_note_usage(rdev);
++
+ r = radeon_uvd_get_destroy_msg(rdev,
+ R600_RING_TYPE_UVD_INDEX, handle, &fence);
+ if (r) {
+@@ -620,7 +622,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
+ if (r)
+ goto err;
+
+- r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
++ r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
+ if (r)
+ goto err;
+
+diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
+index 7e2e0ea6..6e23019c 100644
+--- a/drivers/gpu/drm/radeon/sid.h
++++ b/drivers/gpu/drm/radeon/sid.h
+@@ -478,7 +478,7 @@
+ #define STATE3_MASK (0x1f << 15)
+ #define STATE3_SHIFT 15
+
+-#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
++#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
+ #define TRAIN_DONE_D0 (1 << 30)
+ #define TRAIN_DONE_D1 (1 << 31)
+
+diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
+index 9364129b..d700698a 100644
+--- a/drivers/gpu/drm/radeon/trinity_dpm.c
++++ b/drivers/gpu/drm/radeon/trinity_dpm.c
+@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
+ pi->enable_sclk_ds = true;
+ pi->enable_gfx_power_gating = true;
+ pi->enable_gfx_clock_gating = true;
+- pi->enable_mg_clock_gating = true;
+- pi->enable_gfx_dynamic_mgpg = true; /* ??? */
+- pi->override_dynamic_mgpg = true;
++ pi->enable_mg_clock_gating = false;
++ pi->enable_gfx_dynamic_mgpg = false;
++ pi->override_dynamic_mgpg = false;
+ pi->enable_auto_thermal_throttling = true;
+ pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
+ pi->uvd_dpm = true; /* ??? */
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index f1a857ec..72980532 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -986,24 +986,32 @@ out_unlock:
+ return ret;
+ }
+
+-static int ttm_bo_mem_compat(struct ttm_placement *placement,
+- struct ttm_mem_reg *mem)
++static bool ttm_bo_mem_compat(struct ttm_placement *placement,
++ struct ttm_mem_reg *mem,
++ uint32_t *new_flags)
+ {
+ int i;
+
+ if (mem->mm_node && placement->lpfn != 0 &&
+ (mem->start < placement->fpfn ||
+ mem->start + mem->num_pages > placement->lpfn))
+- return -1;
++ return false;
+
+ for (i = 0; i < placement->num_placement; i++) {
+- if ((placement->placement[i] & mem->placement &
+- TTM_PL_MASK_CACHING) &&
+- (placement->placement[i] & mem->placement &
+- TTM_PL_MASK_MEM))
+- return i;
++ *new_flags = placement->placement[i];
++ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
++ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
++ return true;
++ }
++
++ for (i = 0; i < placement->num_busy_placement; i++) {
++ *new_flags = placement->busy_placement[i];
++ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
++ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
++ return true;
+ }
+- return -1;
++
++ return false;
+ }
+
+ int ttm_bo_validate(struct ttm_buffer_object *bo,
+@@ -1012,6 +1020,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ bool no_wait_gpu)
+ {
+ int ret;
++ uint32_t new_flags;
+
+ lockdep_assert_held(&bo->resv->lock.base);
+ /* Check that range is valid */
+@@ -1022,8 +1031,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ /*
+ * Check whether we need to move buffer.
+ */
+- ret = ttm_bo_mem_compat(placement, &bo->mem);
+- if (ret < 0) {
++ if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible,
+ no_wait_gpu);
+ if (ret)
+@@ -1033,7 +1041,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+- ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
++ ttm_flag_masked(&bo->mem.placement, new_flags,
+ ~TTM_PL_MASK_MEMTYPE);
+ }
+ /*
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+index 7cc904d3..4834c463 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -343,19 +343,25 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ if (ret)
+ goto out;
+
++ /*
++ * Single TTM move. NOP.
++ */
+ if (old_iomap == NULL && new_iomap == NULL)
+ goto out2;
++
++ /*
++ * Move nonexistent data. NOP.
++ */
+ if (old_iomap == NULL && ttm == NULL)
+ goto out2;
+
+- if (ttm->state == tt_unpopulated) {
++ /*
++ * TTM might be null for moves within the same region.
++ */
++ if (ttm && ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+- if (ret) {
+- /* if we fail here don't nuke the mm node
+- * as the bo still owns it */
+- old_copy.mm_node = NULL;
++ if (ret)
+ goto out1;
+- }
+ }
+
+ add = 0;
+@@ -381,11 +387,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ prot);
+ } else
+ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+- if (ret) {
+- /* failing here, means keep old copy as-is */
+- old_copy.mm_node = NULL;
++ if (ret)
+ goto out1;
+- }
+ }
+ mb();
+ out2:
+@@ -403,7 +406,12 @@ out1:
+ ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
+ out:
+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+- ttm_bo_mem_put(bo, &old_copy);
++
++ /*
++ * On error, keep the mm node!
++ */
++ if (!ret)
++ ttm_bo_mem_put(bo, &old_copy);
+ return ret;
+ }
+ EXPORT_SYMBOL(ttm_bo_move_memcpy);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index 37fb4bef..252501a5 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -32,6 +32,8 @@
+ #include <drm/drmP.h>
+ #include "vmwgfx_resource_priv.h"
+
++#define VMW_RES_EVICT_ERR_COUNT 10
++
+ struct vmw_user_dma_buffer {
+ struct ttm_base_object base;
+ struct vmw_dma_buffer dma;
+@@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
+ * to a backup buffer.
+ *
+ * @res: The resource to evict.
++ * @interruptible: Whether to wait interruptible.
+ */
+-int vmw_resource_do_evict(struct vmw_resource *res)
++int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+ {
+ struct ttm_validate_buffer val_buf;
+ const struct vmw_res_func *func = res->func;
+@@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res)
+ BUG_ON(!func->may_evict);
+
+ val_buf.bo = NULL;
+- ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
++ ret = vmw_resource_check_buffer(res, &ticket, interruptible,
++ &val_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+@@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res)
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+ struct ttm_validate_buffer val_buf;
++ unsigned err_count = 0;
+
+ if (likely(!res->func->may_evict))
+ return 0;
+@@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res)
+
+ write_lock(&dev_priv->resource_lock);
+ if (list_empty(lru_list) || !res->func->may_evict) {
+- DRM_ERROR("Out of device device id entries "
++ DRM_ERROR("Out of device device resources "
+ "for %s.\n", res->func->type_name);
+ ret = -EBUSY;
+ write_unlock(&dev_priv->resource_lock);
+@@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res)
+ list_del_init(&evict_res->lru_head);
+
+ write_unlock(&dev_priv->resource_lock);
+- vmw_resource_do_evict(evict_res);
++
++ ret = vmw_resource_do_evict(evict_res, true);
++ if (unlikely(ret != 0)) {
++ write_lock(&dev_priv->resource_lock);
++ list_add_tail(&evict_res->lru_head, lru_list);
++ write_unlock(&dev_priv->resource_lock);
++ if (ret == -ERESTARTSYS ||
++ ++err_count > VMW_RES_EVICT_ERR_COUNT) {
++ vmw_resource_unreference(&evict_res);
++ goto out_no_validate;
++ }
++ }
++
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+@@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res)
+ * @type: The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+- * evict all evictable resources of a specific type.
++ * try to evict all evictable resources of a specific type.
+ */
+ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+ enum vmw_res_type type)
+ {
+ struct list_head *lru_list = &dev_priv->res_lru[type];
+ struct vmw_resource *evict_res;
++ unsigned err_count = 0;
++ int ret;
+
+ do {
+ write_lock(&dev_priv->resource_lock);
+@@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+- vmw_resource_do_evict(evict_res);
++
++ ret = vmw_resource_do_evict(evict_res, false);
++ if (unlikely(ret != 0)) {
++ write_lock(&dev_priv->resource_lock);
++ list_add_tail(&evict_res->lru_head, lru_list);
++ write_unlock(&dev_priv->resource_lock);
++ if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
++ vmw_resource_unreference(&evict_res);
++ return;
++ }
++ }
++
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index c91d5471..1fc15699 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -242,6 +242,7 @@ config HID_HOLTEK
+ - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
+ Zalman ZM-GM1
+ - SHARKOON DarkGlider Gaming mouse
++ - LEETGION Hellion Gaming Mouse
+
+ config HOLTEK_FF
+ bool "Holtek On Line Grip force feedback support"
+@@ -369,12 +370,14 @@ config LOGITECH_FF
+ force feedback.
+
+ config LOGIRUMBLEPAD2_FF
+- bool "Logitech RumblePad/Rumblepad 2 force feedback support"
++ bool "Logitech force feedback support (variant 2)"
+ depends on HID_LOGITECH
+ select INPUT_FF_MEMLESS
+ help
+- Say Y here if you want to enable force feedback support for Logitech
+- RumblePad and Rumblepad 2 devices.
++ Say Y here if you want to enable force feedback support for:
++ - Logitech RumblePad
++ - Logitech Rumblepad 2
++ - Logitech Formula Vibration Feedback Wheel
+
+ config LOGIG940_FF
+ bool "Logitech Flight System G940 force feedback support"
+@@ -453,9 +456,11 @@ config HID_MULTITOUCH
+ - Pixcir dual touch panels
+ - Quanta panels
+ - eGalax dual-touch panels, including the Joojoo and Wetab tablets
++ - SiS multitouch panels
+ - Stantum multitouch panels
+ - Touch International Panels
+ - Unitec Panels
++ - Wistron optical touch panels
+ - XAT optical touch panels
+ - Xiroku optical touch panels
+ - Zytronic touch panels
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 881cf7b4..3b219b95 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -46,6 +46,12 @@ module_param(iso_layout, uint, 0644);
+ MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
+ "(0 = disabled, [1] = enabled)");
+
++static unsigned int swap_opt_cmd = 0;
++module_param(swap_opt_cmd, uint, 0644);
++MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") keys. "
++ "(For people who want to keep Windows PC keyboard muscle memory. "
++ "[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
++
+ struct apple_sc {
+ unsigned long quirks;
+ unsigned int fn_on;
+@@ -150,6 +156,14 @@ static const struct apple_key_translation apple_iso_keyboard[] = {
+ { }
+ };
+
++static const struct apple_key_translation swapped_option_cmd_keys[] = {
++ { KEY_LEFTALT, KEY_LEFTMETA },
++ { KEY_LEFTMETA, KEY_LEFTALT },
++ { KEY_RIGHTALT, KEY_RIGHTMETA },
++ { KEY_RIGHTMETA,KEY_RIGHTALT },
++ { }
++};
++
+ static const struct apple_key_translation *apple_find_translation(
+ const struct apple_key_translation *table, u16 from)
+ {
+@@ -242,6 +256,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ }
+ }
+
++ if (swap_opt_cmd) {
++ trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
++ if (trans) {
++ input_event(input, usage->type, trans->to, value);
++ return 1;
++ }
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
+index 64ab94a5..a594e478 100644
+--- a/drivers/hid/hid-axff.c
++++ b/drivers/hid/hid-axff.c
+@@ -95,7 +95,7 @@ static int axff_init(struct hid_device *hid)
+ }
+ }
+
+- if (field_count < 4) {
++ if (field_count < 4 && hid->product != 0xf705) {
+ hid_err(hid, "not enough fields in the report: %d\n",
+ field_count);
+ return -ENODEV;
+@@ -180,6 +180,7 @@ static void ax_remove(struct hid_device *hdev)
+
+ static const struct hid_device_id ax_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705), },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ax_devices);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index e80da623..c08b5c14 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1605,6 +1605,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+@@ -1716,6 +1717,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+@@ -1754,6 +1756,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
+@@ -1801,12 +1804,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+ #if IS_ENABLED(CONFIG_HID_ROCCAT)
+- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
+@@ -1816,6 +1821,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
+@@ -2376,15 +2383,6 @@ bool hid_ignore(struct hid_device *hdev)
+ hdev->type == HID_TYPE_USBNONE)
+ return true;
+ break;
+- case USB_VENDOR_ID_DWAV:
+- /* These are handled by usbtouchscreen. hdev->type is probably
+- * HID_TYPE_USBNONE, but we say !HID_TYPE_USBMOUSE to match
+- * usbtouchscreen. */
+- if ((hdev->product == USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER ||
+- hdev->product == USB_DEVICE_ID_DWAV_TOUCHCONTROLLER) &&
+- hdev->type != HID_TYPE_USBMOUSE)
+- return true;
+- break;
+ case USB_VENDOR_ID_VELLEMAN:
+ /* These are not HID devices. They are handled by comedi. */
+ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
+diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
+index e696566c..0caa676d 100644
+--- a/drivers/hid/hid-holtek-mouse.c
++++ b/drivers/hid/hid-holtek-mouse.c
+@@ -28,6 +28,7 @@
+ * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
+ * and Zalman ZM-GM1
+ * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
++ * - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse
+ */
+
+ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+@@ -40,6 +41,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ * 0x2fff, so they don't exceed HID_MAX_USAGES */
+ switch (hdev->product) {
+ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067:
++ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072:
+ if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f
+ && rdesc[120] == 0xff && rdesc[121] == 0x7f) {
+ hid_info(hdev, "Fixing up report descriptor\n");
+@@ -66,6 +68,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
++ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
+ { }
+ };
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index f0296a50..9480b425 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -332,6 +332,11 @@
+ #define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101 0x0101
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102 0x0102
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106 0x0106
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+
+ #define USB_VENDOR_ID_GLAB 0x06c2
+ #define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
+@@ -448,8 +453,9 @@
+
+ #define USB_VENDOR_ID_HOLTEK_ALT 0x04d9
+ #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
+-#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
+
+ #define USB_VENDOR_ID_IMATION 0x0718
+@@ -571,6 +577,7 @@
+ #define USB_DEVICE_ID_DINOVO_EDGE 0xc714
+ #define USB_DEVICE_ID_DINOVO_MINI 0xc71f
+ #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03
++#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04
+
+ #define USB_VENDOR_ID_LUMIO 0x202e
+ #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
+@@ -745,6 +752,10 @@
+ #define USB_VENDOR_ID_SIGMATEL 0x066F
+ #define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+
++#define USB_VENDOR_ID_SIS2_TOUCH 0x0457
++#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
++#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
++
+ #define USB_VENDOR_ID_SKYCABLE 0x1223
+ #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
+
+@@ -888,6 +899,9 @@
+ #define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802
+ #define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804
+
++#define USB_VENDOR_ID_WISTRON 0x0fb8
++#define USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH 0x1109
++
+ #define USB_VENDOR_ID_X_TENSIONS 0x1ae7
+ #define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
+
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index 6f12ecd3..1bfd292c 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -45,6 +45,7 @@
+ /* Size of the original descriptors of the Driving Force (and Pro) wheels */
+ #define DF_RDESC_ORIG_SIZE 130
+ #define DFP_RDESC_ORIG_SIZE 97
++#define FV_RDESC_ORIG_SIZE 130
+ #define MOMO_RDESC_ORIG_SIZE 87
+
+ /* Fixed report descriptors for Logitech Driving Force (and Pro)
+@@ -170,6 +171,73 @@ static __u8 dfp_rdesc_fixed[] = {
+ 0xC0 /* End Collection */
+ };
+
++static __u8 fv_rdesc_fixed[] = {
++0x05, 0x01, /* Usage Page (Desktop), */
++0x09, 0x04, /* Usage (Joystik), */
++0xA1, 0x01, /* Collection (Application), */
++0xA1, 0x02, /* Collection (Logical), */
++0x95, 0x01, /* Report Count (1), */
++0x75, 0x0A, /* Report Size (10), */
++0x15, 0x00, /* Logical Minimum (0), */
++0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
++0x35, 0x00, /* Physical Minimum (0), */
++0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
++0x09, 0x30, /* Usage (X), */
++0x81, 0x02, /* Input (Variable), */
++0x95, 0x0C, /* Report Count (12), */
++0x75, 0x01, /* Report Size (1), */
++0x25, 0x01, /* Logical Maximum (1), */
++0x45, 0x01, /* Physical Maximum (1), */
++0x05, 0x09, /* Usage Page (Button), */
++0x19, 0x01, /* Usage Minimum (01h), */
++0x29, 0x0C, /* Usage Maximum (0Ch), */
++0x81, 0x02, /* Input (Variable), */
++0x95, 0x02, /* Report Count (2), */
++0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
++0x09, 0x01, /* Usage (01h), */
++0x81, 0x02, /* Input (Variable), */
++0x09, 0x02, /* Usage (02h), */
++0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++0x95, 0x01, /* Report Count (1), */
++0x75, 0x08, /* Report Size (8), */
++0x81, 0x02, /* Input (Variable), */
++0x05, 0x01, /* Usage Page (Desktop), */
++0x25, 0x07, /* Logical Maximum (7), */
++0x46, 0x3B, 0x01, /* Physical Maximum (315), */
++0x75, 0x04, /* Report Size (4), */
++0x65, 0x14, /* Unit (Degrees), */
++0x09, 0x39, /* Usage (Hat Switch), */
++0x81, 0x42, /* Input (Variable, Null State), */
++0x75, 0x01, /* Report Size (1), */
++0x95, 0x04, /* Report Count (4), */
++0x65, 0x00, /* Unit, */
++0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
++0x09, 0x01, /* Usage (01h), */
++0x25, 0x01, /* Logical Maximum (1), */
++0x45, 0x01, /* Physical Maximum (1), */
++0x81, 0x02, /* Input (Variable), */
++0x05, 0x01, /* Usage Page (Desktop), */
++0x95, 0x01, /* Report Count (1), */
++0x75, 0x08, /* Report Size (8), */
++0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++0x09, 0x31, /* Usage (Y), */
++0x81, 0x02, /* Input (Variable), */
++0x09, 0x32, /* Usage (Z), */
++0x81, 0x02, /* Input (Variable), */
++0xC0, /* End Collection, */
++0xA1, 0x02, /* Collection (Logical), */
++0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++0x95, 0x07, /* Report Count (7), */
++0x75, 0x08, /* Report Size (8), */
++0x09, 0x03, /* Usage (03h), */
++0x91, 0x02, /* Output (Variable), */
++0xC0, /* End Collection, */
++0xC0 /* End Collection */
++};
++
+ static __u8 momo_rdesc_fixed[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x04, /* Usage (Joystik), */
+@@ -275,6 +343,15 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ }
+ break;
+
++ case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
++ if (*rsize == FV_RDESC_ORIG_SIZE) {
++ hid_info(hdev,
++ "fixing up Logitech Formula Vibration report descriptor\n");
++ rdesc = fv_rdesc_fixed;
++ *rsize = sizeof(fv_rdesc_fixed);
++ }
++ break;
++
+ case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
+ if (*rsize == DFP_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+@@ -492,6 +569,7 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
++ case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
+ field->application = HID_GD_MULTIAXIS;
+ break;
+ default:
+@@ -639,6 +717,8 @@ static const struct hid_device_id lg_devices[] = {
+ .driver_data = LG_NOGET | LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
+ .driver_data = LG_FF4 },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL),
++ .driver_data = LG_FF2 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
+ .driver_data = LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
+diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
+index 1a42eaa6..0e3fb1a7 100644
+--- a/drivers/hid/hid-lg2ff.c
++++ b/drivers/hid/hid-lg2ff.c
+@@ -95,7 +95,7 @@ int lg2ff_init(struct hid_device *hid)
+
+ hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+
+- hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n");
++ hid_info(hid, "Force feedback for Logitech variant 2 rumble devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
+
+ return 0;
+ }
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index 8782fe1a..49f6cc0f 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -218,12 +218,46 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ __s32 *value = report->field[0]->value;
++ __u32 expand_a, expand_b;
++
++ /* De-activate Auto-Center */
++ if (magnitude == 0) {
++ value[0] = 0xf5;
++ value[1] = 0x00;
++ value[2] = 0x00;
++ value[3] = 0x00;
++ value[4] = 0x00;
++ value[5] = 0x00;
++ value[6] = 0x00;
++
++ hid_hw_request(hid, report, HID_REQ_SET_REPORT);
++ return;
++ }
++
++ if (magnitude <= 0xaaaa) {
++ expand_a = 0x0c * magnitude;
++ expand_b = 0x80 * magnitude;
++ } else {
++ expand_a = (0x0c * 0xaaaa) + 0x06 * (magnitude - 0xaaaa);
++ expand_b = (0x80 * 0xaaaa) + 0xff * (magnitude - 0xaaaa);
++ }
+
+ value[0] = 0xfe;
+ value[1] = 0x0d;
+- value[2] = magnitude >> 13;
+- value[3] = magnitude >> 13;
+- value[4] = magnitude >> 8;
++ value[2] = expand_a / 0xaaaa;
++ value[3] = expand_a / 0xaaaa;
++ value[4] = expand_b / 0xaaaa;
++ value[5] = 0x00;
++ value[6] = 0x00;
++
++ hid_hw_request(hid, report, HID_REQ_SET_REPORT);
++
++ /* Activate Auto-Center */
++ value[0] = 0x14;
++ value[1] = 0x00;
++ value[2] = 0x00;
++ value[3] = 0x00;
++ value[4] = 0x00;
+ value[5] = 0x00;
+ value[6] = 0x00;
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 5e5fe1b8..d83b1e8b 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -250,12 +250,12 @@ static struct mt_class mt_classes[] = {
+ { .name = MT_CLS_GENERALTOUCH_TWOFINGERS,
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+ MT_QUIRK_VALID_IS_INRANGE |
+- MT_QUIRK_SLOT_IS_CONTACTNUMBER,
++ MT_QUIRK_SLOT_IS_CONTACTID,
+ .maxcontacts = 2
+ },
+ { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+- MT_QUIRK_SLOT_IS_CONTACTNUMBER
++ MT_QUIRK_SLOT_IS_CONTACTID
+ },
+
+ { .name = MT_CLS_FLATFROG,
+@@ -1173,6 +1173,21 @@ static const struct hid_device_id mt_devices[] = {
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
++ { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
++ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
++ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) },
++ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
++ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
++ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) },
++ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
++ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
++ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) },
++ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
++ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
++ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) },
++ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
++ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
++ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) },
+
+ /* Gametel game controller */
+ { .driver_data = MT_CLS_NSMU,
+@@ -1284,6 +1299,14 @@ static const struct hid_device_id mt_devices[] = {
+ MT_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
+
++ /* SiS panels */
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
++ USB_DEVICE_ID_SIS9200_TOUCH) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
++ USB_DEVICE_ID_SIS817_TOUCH) },
++
+ /* Stantum panels */
+ { .driver_data = MT_CLS_CONFIDENCE,
+ MT_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+@@ -1312,6 +1335,12 @@ static const struct hid_device_id mt_devices[] = {
+ { .driver_data = MT_CLS_NSMU,
+ MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
+ USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
++
++ /* Wistron panels */
++ { .driver_data = MT_CLS_NSMU,
++ MT_USB_DEVICE(USB_VENDOR_ID_WISTRON,
++ USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH) },
++
+ /* XAT */
+ { .driver_data = MT_CLS_NSMU,
+ MT_USB_DEVICE(USB_VENDOR_ID_XAT,
+diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
+index 74f70403..e8408999 100644
+--- a/drivers/hid/hid-roccat-common.c
++++ b/drivers/hid/hid-roccat-common.c
+@@ -65,10 +65,11 @@ int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
+ EXPORT_SYMBOL_GPL(roccat_common2_send);
+
+ enum roccat_common2_control_states {
+- ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD = 0,
++ ROCCAT_COMMON_CONTROL_STATUS_CRITICAL = 0,
+ ROCCAT_COMMON_CONTROL_STATUS_OK = 1,
+ ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2,
+- ROCCAT_COMMON_CONTROL_STATUS_WAIT = 3,
++ ROCCAT_COMMON_CONTROL_STATUS_BUSY = 3,
++ ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW = 4,
+ };
+
+ static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
+@@ -88,13 +89,12 @@ static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
+ switch (control.value) {
+ case ROCCAT_COMMON_CONTROL_STATUS_OK:
+ return 0;
+- case ROCCAT_COMMON_CONTROL_STATUS_WAIT:
++ case ROCCAT_COMMON_CONTROL_STATUS_BUSY:
+ msleep(500);
+ continue;
+ case ROCCAT_COMMON_CONTROL_STATUS_INVALID:
+-
+- case ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD:
+- /* seems to be critical - replug necessary */
++ case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL:
++ case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW:
+ return -EINVAL;
+ default:
+ dev_err(&usb_dev->dev,
+diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
+index 0c8e1ef0..96604771 100644
+--- a/drivers/hid/hid-roccat-kovaplus.c
++++ b/drivers/hid/hid-roccat-kovaplus.c
+@@ -554,9 +554,13 @@ static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus,
+ break;
+ case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI:
+ kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1);
++ break;
+ case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY:
+ kovaplus->actual_x_sensitivity = button_report->data1;
+ kovaplus->actual_y_sensitivity = button_report->data2;
++ break;
++ default:
++ break;
+ }
+ }
+
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index 10e15810..9e4cdca5 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -326,7 +326,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
+ field->logical == attr_usage_id) {
+ sensor_hub_fill_attr_info(info, i, report->id,
+ field->unit, field->unit_exponent,
+- field->report_size);
++ field->report_size *
++ field->report_count);
+ ret = 0;
+ } else {
+ for (j = 0; j < field->maxusage; ++j) {
+@@ -338,7 +339,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
+ i, report->id,
+ field->unit,
+ field->unit_exponent,
+- field->report_size);
++ field->report_size *
++ field->report_count);
+ ret = 0;
+ break;
+ }
+@@ -425,9 +427,10 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
+ hid_dbg(hdev, "%d collection_index:%x hid:%x sz:%x\n",
+ i, report->field[i]->usage->collection_index,
+ report->field[i]->usage->hid,
+- report->field[i]->report_size/8);
+-
+- sz = report->field[i]->report_size/8;
++ (report->field[i]->report_size *
++ report->field[i]->report_count)/8);
++ sz = (report->field[i]->report_size *
++ report->field[i]->report_count)/8;
+ if (pdata->pending.status && pdata->pending.attr_usage_id ==
+ report->field[i]->usage->hid) {
+ hid_dbg(hdev, "data was pending ...\n");
+diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
+index 71adf9e6..e30567af 100644
+--- a/drivers/hid/hid-wiimote-modules.c
++++ b/drivers/hid/hid-wiimote-modules.c
+@@ -1656,9 +1656,9 @@ static void wiimod_pro_in_ext(struct wiimote_data *wdata, const __u8 *ext)
+ ry = (ext[6] & 0xff) | ((ext[7] & 0x0f) << 8);
+
+ input_report_abs(wdata->extension.input, ABS_X, lx - 0x800);
+- input_report_abs(wdata->extension.input, ABS_Y, ly - 0x800);
++ input_report_abs(wdata->extension.input, ABS_Y, 0x800 - ly);
+ input_report_abs(wdata->extension.input, ABS_RX, rx - 0x800);
+- input_report_abs(wdata->extension.input, ABS_RY, ry - 0x800);
++ input_report_abs(wdata->extension.input, ABS_RY, 0x800 - ry);
+
+ input_report_key(wdata->extension.input,
+ wiimod_pro_map[WIIMOD_PRO_KEY_RIGHT],
+diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
+index 93b00d76..cedc6da9 100644
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -287,7 +287,7 @@ static int uhid_event_from_user(const char __user *buffer, size_t len,
+ */
+ struct uhid_create_req_compat *compat;
+
+- compat = kmalloc(sizeof(*compat), GFP_KERNEL);
++ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+ if (!compat)
+ return -ENOMEM;
+
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 3fca3be0..0db9a672 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -84,6 +84,8 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
+index c65da3d9..4bf9507c 100644
+--- a/drivers/i2c/busses/i2c-wmt.c
++++ b/drivers/i2c/busses/i2c-wmt.c
+@@ -349,6 +349,7 @@ static int wmt_i2c_reset_hardware(struct wmt_i2c_dev *i2c_dev)
+ err = clk_set_rate(i2c_dev->clk, 20000000);
+ if (err) {
+ dev_err(i2c_dev->dev, "failed to set clock = 20Mhz\n");
++ clk_disable_unprepare(i2c_dev->clk);
+ return err;
+ }
+
+diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
+index a764da77..f821facb 100644
+--- a/drivers/i2c/muxes/i2c-mux-gpio.c
++++ b/drivers/i2c/muxes/i2c-mux-gpio.c
+@@ -30,15 +30,15 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
+ int i;
+
+ for (i = 0; i < mux->data.n_gpios; i++)
+- gpio_set_value(mux->gpio_base + mux->data.gpios[i],
+- val & (1 << i));
++ gpio_set_value_cansleep(mux->gpio_base + mux->data.gpios[i],
++ val & (1 << i));
+ }
+
+ static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
+ {
+ struct gpiomux *mux = data;
+
+- i2c_mux_gpio_set(mux, mux->data.values[chan]);
++ i2c_mux_gpio_set(mux, chan);
+
+ return 0;
+ }
+@@ -228,7 +228,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
+ unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
+
+ mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
+- i, class,
++ mux->data.values[i], class,
+ i2c_mux_gpio_select, deselect);
+ if (!mux->adap[i]) {
+ ret = -ENODEV;
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index 709c1325..34277153 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -112,9 +112,10 @@ static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = KXSD9_READ(address);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
+- if (ret)
+- return ret;
+- return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
++ if (!ret)
++ ret = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
++ mutex_unlock(&st->buf_lock);
++ return ret;
+ }
+
+ static IIO_CONST_ATTR(accel_scale_available,
+diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+index f5cb13b2..cc04b7ba 100644
+--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
++++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+@@ -280,9 +280,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
+ int j;
+ int ret;
+
+- ret = get_user_pages(current, current->mm, addr,
+- npages, 0, 1, pages, NULL);
+-
++ ret = get_user_pages_fast(addr, npages, 0, pages);
+ if (ret != npages) {
+ int i;
+
+@@ -811,10 +809,7 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd,
+ while (dim) {
+ const int mxp = 8;
+
+- down_write(&current->mm->mmap_sem);
+ ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+- up_write(&current->mm->mmap_sem);
+-
+ if (ret <= 0)
+ goto done_unlock;
+ else {
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index 016e7429..5bfc02f4 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -6190,21 +6190,20 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
+ {
+ struct qib_devdata *dd;
+ unsigned long val;
+- int ret;
+-
++ char *n;
+ if (strlen(str) >= MAX_ATTEN_LEN) {
+ pr_info("txselect_values string too long\n");
+ return -ENOSPC;
+ }
+- ret = kstrtoul(str, 0, &val);
+- if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
++ val = simple_strtoul(str, &n, 0);
++ if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+ TXDDS_MFG_SZ)) {
+ pr_info("txselect_values must start with a number < %d\n",
+ TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
+- return ret ? ret : -EINVAL;
++ return -EINVAL;
+ }
+-
+ strcpy(txselect_list, str);
++
+ list_for_each_entry(dd, &qib_dev_list, list)
+ if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
+ set_no_qsfp_atten(dd, 1);
+diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
+index d0a0ea0c..165aee2c 100644
+--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
++++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
+@@ -594,8 +594,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ else
+ j = npages;
+
+- ret = get_user_pages(current, current->mm, addr,
+- j, 0, 1, pages, NULL);
++ ret = get_user_pages_fast(addr, j, 0, pages);
+ if (ret != j) {
+ i = 0;
+ j = ret;
+@@ -1294,11 +1293,8 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
+ int mxp = 8;
+ int ndesc = 0;
+
+- down_write(&current->mm->mmap_sem);
+ ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
+ iov, dim, &list, &mxp, &ndesc);
+- up_write(&current->mm->mmap_sem);
+-
+ if (ret < 0)
+ goto done_unlock;
+ else {
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 6df23502..db5d0a31 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -1991,8 +1991,6 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+
+ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ data_left = se_cmd->data_length;
+- iscsit_increment_maxcmdsn(cmd, conn->sess);
+- cmd->stat_sn = conn->stat_sn++;
+ } else {
+ sg_off = cmd->write_data_done / PAGE_SIZE;
+ data_left = se_cmd->data_length - cmd->write_data_done;
+@@ -2204,8 +2202,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+
+ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ data_left = se_cmd->data_length;
+- iscsit_increment_maxcmdsn(cmd, conn->sess);
+- cmd->stat_sn = conn->stat_sn++;
+ } else {
+ sg_off = cmd->write_data_done / PAGE_SIZE;
+ data_left = se_cmd->data_length - cmd->write_data_done;
+@@ -2259,18 +2255,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ data_len = min(data_left, rdma_write_max);
+ wr->cur_rdma_length = data_len;
+
+- spin_lock_irqsave(&isert_conn->conn_lock, flags);
+- fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+- struct fast_reg_descriptor, list);
+- list_del(&fr_desc->list);
+- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+- wr->fr_desc = fr_desc;
++ /* if there is a single dma entry, dma mr is sufficient */
++ if (count == 1) {
++ ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
++ ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
++ ib_sge->lkey = isert_conn->conn_mr->lkey;
++ wr->fr_desc = NULL;
++ } else {
++ spin_lock_irqsave(&isert_conn->conn_lock, flags);
++ fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
++ struct fast_reg_descriptor, list);
++ list_del(&fr_desc->list);
++ spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
++ wr->fr_desc = fr_desc;
+
+- ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
+- ib_sge, offset, data_len);
+- if (ret) {
+- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+- goto unmap_sg;
++ ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
++ ib_sge, offset, data_len);
++ if (ret) {
++ list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
++ goto unmap_sg;
++ }
+ }
+
+ return 0;
+@@ -2306,7 +2310,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ * Build isert_conn->tx_desc for iSCSI response PDU and attach
+ */
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+- iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
++ iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
+ &isert_cmd->tx_desc.iscsi_header);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index f93baf82..17b58f4f 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -534,6 +534,11 @@ static void srp_remove_target(struct srp_target_port *target)
+ ib_destroy_cm_id(target->cm_id);
+ srp_free_target_ib(target);
+ srp_free_req_data(target);
++
++ spin_lock(&target->srp_host->target_lock);
++ list_del(&target->list);
++ spin_unlock(&target->srp_host->target_lock);
++
+ scsi_host_put(target->scsi_host);
+ }
+
+@@ -545,10 +550,6 @@ static void srp_remove_work(struct work_struct *work)
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
+
+ srp_remove_target(target);
+-
+- spin_lock(&target->srp_host->target_lock);
+- list_del(&target->list);
+- spin_unlock(&target->srp_host->target_lock);
+ }
+
+ static void srp_rport_delete(struct srp_rport *rport)
+@@ -1302,14 +1303,13 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
+ PFX "Recv failed with error code %d\n", res);
+ }
+
+-static void srp_handle_qp_err(enum ib_wc_status wc_status,
+- enum ib_wc_opcode wc_opcode,
++static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,
+ struct srp_target_port *target)
+ {
+ if (target->connected && !target->qp_in_error) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed %s status %d\n",
+- wc_opcode & IB_WC_RECV ? "receive" : "send",
++ send_err ? "send" : "receive",
+ wc_status);
+ }
+ target->qp_in_error = true;
+@@ -1325,7 +1325,7 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
+ if (likely(wc.status == IB_WC_SUCCESS)) {
+ srp_handle_recv(target, &wc);
+ } else {
+- srp_handle_qp_err(wc.status, wc.opcode, target);
++ srp_handle_qp_err(wc.status, false, target);
+ }
+ }
+ }
+@@ -1341,7 +1341,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
+ iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
+ list_add(&iu->list, &target->free_tx);
+ } else {
+- srp_handle_qp_err(wc.status, wc.opcode, target);
++ srp_handle_qp_err(wc.status, true, target);
+ }
+ }
+ }
+@@ -1751,7 +1751,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+
+ if (!req || !srp_claim_req(target, req, scmnd))
+- return FAILED;
++ return SUCCESS;
+ if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+ SRP_TSK_ABORT_TASK) == 0)
+ ret = SUCCESS;
+diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
+index b6ded17b..a06e1255 100644
+--- a/drivers/input/evdev.c
++++ b/drivers/input/evdev.c
+@@ -18,6 +18,8 @@
+ #include <linux/poll.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/input/mt.h>
+@@ -369,7 +371,11 @@ static int evdev_release(struct inode *inode, struct file *file)
+ mutex_unlock(&evdev->mutex);
+
+ evdev_detach_client(evdev, client);
+- kfree(client);
++
++ if (is_vmalloc_addr(client))
++ vfree(client);
++ else
++ kfree(client);
+
+ evdev_close_device(evdev);
+
+@@ -389,12 +395,14 @@ static int evdev_open(struct inode *inode, struct file *file)
+ {
+ struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
+ unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
++ unsigned int size = sizeof(struct evdev_client) +
++ bufsize * sizeof(struct input_event);
+ struct evdev_client *client;
+ int error;
+
+- client = kzalloc(sizeof(struct evdev_client) +
+- bufsize * sizeof(struct input_event),
+- GFP_KERNEL);
++ client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
++ if (!client)
++ client = vzalloc(size);
+ if (!client)
+ return -ENOMEM;
+
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 83658472..7c5d72a6 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -103,7 +103,6 @@ static const struct alps_model_info alps_model_data[] = {
+ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
+ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
+- { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */
+ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
+ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
+diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
+index f51765ff..888a81a7 100644
+--- a/drivers/input/mouse/cypress_ps2.c
++++ b/drivers/input/mouse/cypress_ps2.c
+@@ -439,7 +439,7 @@ static int cypress_get_finger_count(unsigned char header_byte)
+ case 2: return 5;
+ default:
+ /* Invalid contact (e.g. palm). Ignore it. */
+- return -1;
++ return 0;
+ }
+ }
+
+@@ -452,17 +452,10 @@ static int cypress_parse_packet(struct psmouse *psmouse,
+ {
+ unsigned char *packet = psmouse->packet;
+ unsigned char header_byte = packet[0];
+- int contact_cnt;
+
+ memset(report_data, 0, sizeof(struct cytp_report_data));
+
+- contact_cnt = cypress_get_finger_count(header_byte);
+-
+- if (contact_cnt < 0) /* e.g. palm detect */
+- return -EINVAL;
+-
+- report_data->contact_cnt = contact_cnt;
+-
++ report_data->contact_cnt = cypress_get_finger_count(header_byte);
+ report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0;
+
+ if (report_data->contact_cnt == 1) {
+@@ -535,11 +528,9 @@ static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt)
+ int slots[CYTP_MAX_MT_SLOTS];
+ int n;
+
+- if (cypress_parse_packet(psmouse, cytp, &report_data))
+- return;
++ cypress_parse_packet(psmouse, cytp, &report_data);
+
+ n = report_data.contact_cnt;
+-
+ if (n > CYTP_MAX_MT_SLOTS)
+ n = CYTP_MAX_MT_SLOTS;
+
+@@ -605,10 +596,6 @@ static psmouse_ret_t cypress_validate_byte(struct psmouse *psmouse)
+ return PSMOUSE_BAD_DATA;
+
+ contact_cnt = cypress_get_finger_count(packet[0]);
+-
+- if (contact_cnt < 0)
+- return PSMOUSE_BAD_DATA;
+-
+ if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE)
+ cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4);
+ else
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 5f306f79..0ec9abbe 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -765,6 +765,7 @@ static struct pnp_device_id pnp_kbd_devids[] = {
+ { .id = "CPQA0D7", .driver_data = 0 },
+ { .id = "", },
+ };
++MODULE_DEVICE_TABLE(pnp, pnp_kbd_devids);
+
+ static struct pnp_driver i8042_pnp_kbd_driver = {
+ .name = "i8042 kbd",
+@@ -786,6 +787,7 @@ static struct pnp_device_id pnp_aux_devids[] = {
+ { .id = "SYN0801", .driver_data = 0 },
+ { .id = "", },
+ };
++MODULE_DEVICE_TABLE(pnp, pnp_aux_devids);
+
+ static struct pnp_driver i8042_pnp_aux_driver = {
+ .name = "i8042 aux",
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index 721fdb35..ae4b6b90 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -146,12 +146,10 @@ enum {
+
+ #define USB_DEVICE_HID_CLASS(vend, prod) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \
+- | USB_DEVICE_ID_MATCH_INT_PROTOCOL \
+ | USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+- .bInterfaceClass = USB_INTERFACE_CLASS_HID, \
+- .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE
++ .bInterfaceClass = USB_INTERFACE_CLASS_HID
+
+ static const struct usb_device_id usbtouch_devices[] = {
+ #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index f42fc7ed..d1734d9d 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1742,6 +1742,9 @@ static bool fix_overlapping_extents(struct btree *b,
+ if (bkey_cmp(insert, k) < 0) {
+ bch_cut_front(insert, k);
+ } else {
++ if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
++ old_offset = KEY_START(insert);
++
+ if (bkey_written(b, k) &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+ /*
+@@ -1803,6 +1806,10 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
+ if (fix_overlapping_extents(b, k, &iter, op))
+ return false;
+
++ if (KEY_DIRTY(k))
++ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
++ KEY_START(k), KEY_SIZE(k));
++
+ while (m != end(i) &&
+ bkey_cmp(k, &START_KEY(m)) > 0)
+ prev = m, m = bkey_next(m);
+@@ -1831,10 +1838,6 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
+ insert: shift_keys(b, m, k);
+ copy: bkey_copy(m, k);
+ merged:
+- if (KEY_DIRTY(k))
+- bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+- KEY_START(k), KEY_SIZE(k));
+-
+ bch_check_keys(b, "%u for %s", status, op_type(op));
+
+ if (b->level && !KEY_OFFSET(k))
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 29569768..ea3e4b4f 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -148,6 +148,9 @@ struct cache {
+ wait_queue_head_t migration_wait;
+ atomic_t nr_migrations;
+
++ wait_queue_head_t quiescing_wait;
++ atomic_t quiescing_ack;
++
+ /*
+ * cache_size entries, dirty if set
+ */
+@@ -748,8 +751,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+
+ static void cleanup_migration(struct dm_cache_migration *mg)
+ {
+- dec_nr_migrations(mg->cache);
++ struct cache *cache = mg->cache;
+ free_migration(mg);
++ dec_nr_migrations(cache);
+ }
+
+ static void migration_failure(struct dm_cache_migration *mg)
+@@ -1346,34 +1350,51 @@ static void writeback_some_dirty_blocks(struct cache *cache)
+ /*----------------------------------------------------------------
+ * Main worker loop
+ *--------------------------------------------------------------*/
+-static void start_quiescing(struct cache *cache)
++static bool is_quiescing(struct cache *cache)
+ {
++ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+- cache->quiescing = 1;
++ r = cache->quiescing;
+ spin_unlock_irqrestore(&cache->lock, flags);
++
++ return r;
+ }
+
+-static void stop_quiescing(struct cache *cache)
++static void ack_quiescing(struct cache *cache)
++{
++ if (is_quiescing(cache)) {
++ atomic_inc(&cache->quiescing_ack);
++ wake_up(&cache->quiescing_wait);
++ }
++}
++
++static void wait_for_quiescing_ack(struct cache *cache)
++{
++ wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
++}
++
++static void start_quiescing(struct cache *cache)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+- cache->quiescing = 0;
++ cache->quiescing = true;
+ spin_unlock_irqrestore(&cache->lock, flags);
++
++ wait_for_quiescing_ack(cache);
+ }
+
+-static bool is_quiescing(struct cache *cache)
++static void stop_quiescing(struct cache *cache)
+ {
+- int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+- r = cache->quiescing;
++ cache->quiescing = false;
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+- return r;
++ atomic_set(&cache->quiescing_ack, 0);
+ }
+
+ static void wait_for_migrations(struct cache *cache)
+@@ -1420,16 +1441,15 @@ static void do_worker(struct work_struct *ws)
+ struct cache *cache = container_of(ws, struct cache, worker);
+
+ do {
+- if (!is_quiescing(cache))
++ if (!is_quiescing(cache)) {
++ writeback_some_dirty_blocks(cache);
++ process_deferred_writethrough_bios(cache);
+ process_deferred_bios(cache);
++ }
+
+ process_migrations(cache, &cache->quiesced_migrations, issue_copy);
+ process_migrations(cache, &cache->completed_migrations, complete_migration);
+
+- writeback_some_dirty_blocks(cache);
+-
+- process_deferred_writethrough_bios(cache);
+-
+ if (commit_if_needed(cache)) {
+ process_deferred_flush_bios(cache, false);
+
+@@ -1442,6 +1462,9 @@ static void do_worker(struct work_struct *ws)
+ process_migrations(cache, &cache->need_commit_migrations,
+ migration_success_post_commit);
+ }
++
++ ack_quiescing(cache);
++
+ } while (more_work(cache));
+ }
+
+@@ -2005,6 +2028,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+ atomic_set(&cache->nr_migrations, 0);
+ init_waitqueue_head(&cache->migration_wait);
+
++ init_waitqueue_head(&cache->quiescing_wait);
++ atomic_set(&cache->quiescing_ack, 0);
++
+ r = -ENOMEM;
+ cache->nr_dirty = 0;
+ cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index de570a55..799e479d 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -87,6 +87,7 @@ struct multipath {
+ unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
+ unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+ unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
++ unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
+
+ unsigned pg_init_retries; /* Number of times to retry pg_init */
+ unsigned pg_init_count; /* Number of times pg_init called */
+@@ -497,7 +498,8 @@ static void process_queued_ios(struct work_struct *work)
+ (!pgpath && !m->queue_if_no_path))
+ must_queue = 0;
+
+- if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
++ if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
++ !m->pg_init_disabled)
+ __pg_init_all_paths(m);
+
+ spin_unlock_irqrestore(&m->lock, flags);
+@@ -942,10 +944,20 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
+
+ static void flush_multipath_work(struct multipath *m)
+ {
++ unsigned long flags;
++
++ spin_lock_irqsave(&m->lock, flags);
++ m->pg_init_disabled = 1;
++ spin_unlock_irqrestore(&m->lock, flags);
++
+ flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
+ flush_workqueue(kmultipathd);
+ flush_work(&m->trigger_event);
++
++ spin_lock_irqsave(&m->lock, flags);
++ m->pg_init_disabled = 0;
++ spin_unlock_irqrestore(&m->lock, flags);
+ }
+
+ static void multipath_dtr(struct dm_target *ti)
+@@ -1164,7 +1176,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
+
+ spin_lock_irqsave(&m->lock, flags);
+
+- if (m->pg_init_count <= m->pg_init_retries)
++ if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
+ m->pg_init_required = 1;
+ else
+ limit_reached = 1;
+@@ -1714,7 +1726,7 @@ out:
+ *---------------------------------------------------------------*/
+ static struct target_type multipath_target = {
+ .name = "multipath",
+- .version = {1, 5, 1},
++ .version = {1, 6, 0},
+ .module = THIS_MODULE,
+ .ctr = multipath_ctr,
+ .dtr = multipath_dtr,
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 8f878353..41d907b5 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -545,14 +545,28 @@ static int adjoin(struct dm_table *table, struct dm_target *ti)
+
+ /*
+ * Used to dynamically allocate the arg array.
++ *
++ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
++ * process messages even if some device is suspended. These messages have a
++ * small fixed number of arguments.
++ *
++ * On the other hand, dm-switch needs to process bulk data using messages and
++ * excessive use of GFP_NOIO could cause trouble.
+ */
+ static char **realloc_argv(unsigned *array_size, char **old_argv)
+ {
+ char **argv;
+ unsigned new_size;
++ gfp_t gfp;
+
+- new_size = *array_size ? *array_size * 2 : 64;
+- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
++ if (*array_size) {
++ new_size = *array_size * 2;
++ gfp = GFP_KERNEL;
++ } else {
++ new_size = 8;
++ gfp = GFP_NOIO;
++ }
++ argv = kmalloc(new_size * sizeof(*argv), gfp);
+ if (argv) {
+ memcpy(argv, old_argv, *array_size * sizeof(*argv));
+ *array_size = new_size;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 561a65f8..ba46d974 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3620,6 +3620,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
+ mddev->in_sync = 1;
+ del_timer_sync(&mddev->safemode_timer);
+ }
++ blk_set_stacking_limits(&mddev->queue->limits);
+ pers->run(mddev);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ mddev_resume(mddev);
+@@ -7791,7 +7792,7 @@ void md_check_recovery(struct mddev *mddev)
+ if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
+ if ( ! (
+- (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
++ (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
+ test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ (mddev->external == 0 && mddev->safemode == 1) ||
+diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
+index 172147eb..af96e24e 100644
+--- a/drivers/md/persistent-data/dm-array.c
++++ b/drivers/md/persistent-data/dm-array.c
+@@ -509,15 +509,18 @@ static int grow_add_tail_block(struct resize *resize)
+ static int grow_needs_more_blocks(struct resize *resize)
+ {
+ int r;
++ unsigned old_nr_blocks = resize->old_nr_full_blocks;
+
+ if (resize->old_nr_entries_in_last_block > 0) {
++ old_nr_blocks++;
++
+ r = grow_extend_tail_block(resize, resize->max_entries);
+ if (r)
+ return r;
+ }
+
+ r = insert_full_ablocks(resize->info, resize->size_of_block,
+- resize->old_nr_full_blocks,
++ old_nr_blocks,
+ resize->new_nr_full_blocks,
+ resize->max_entries, resize->value,
+ &resize->root);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index f8b90684..e4109f61 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -340,7 +340,8 @@ static void release_stripe(struct stripe_head *sh)
+ unsigned long flags;
+ bool wakeup;
+
+- if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
++ if (unlikely(!conf->mddev->thread) ||
++ test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ goto slow_path;
+ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
+ if (wakeup)
+@@ -5239,6 +5240,9 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+ old_groups = conf->worker_groups;
+ old_group_cnt = conf->worker_cnt_per_group;
+
++ if (old_groups)
++ flush_workqueue(raid5_wq);
++
+ conf->worker_groups = NULL;
+ err = alloc_thread_groups(conf, new);
+ if (err) {
+diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
+index a204f282..fb504f1e 100644
+--- a/drivers/media/dvb-frontends/af9013.c
++++ b/drivers/media/dvb-frontends/af9013.c
+@@ -24,6 +24,9 @@
+
+ #include "af9013_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ struct af9013_state {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend fe;
+@@ -50,16 +53,23 @@ static int af9013_wr_regs_i2c(struct af9013_state *priv, u8 mbox, u16 reg,
+ const u8 *val, int len)
+ {
+ int ret;
+- u8 buf[3+len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->config.i2c_addr,
+ .flags = 0,
+- .len = sizeof(buf),
++ .len = 3 + len,
+ .buf = buf,
+ }
+ };
+
++ if (3 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = (reg >> 8) & 0xff;
+ buf[1] = (reg >> 0) & 0xff;
+ buf[2] = mbox;
+diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
+index a777b4b9..30ee5905 100644
+--- a/drivers/media/dvb-frontends/af9033.c
++++ b/drivers/media/dvb-frontends/af9033.c
+@@ -21,6 +21,9 @@
+
+ #include "af9033_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ struct af9033_state {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend fe;
+@@ -40,16 +43,23 @@ static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
+ int len)
+ {
+ int ret;
+- u8 buf[3 + len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = state->cfg.i2c_addr,
+ .flags = 0,
+- .len = sizeof(buf),
++ .len = 3 + len,
+ .buf = buf,
+ }
+ };
+
++ if (3 + len > sizeof(buf)) {
++ dev_warn(&state->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = (reg >> 16) & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+ buf[2] = (reg >> 0) & 0xff;
+@@ -161,7 +171,14 @@ static int af9033_wr_reg_val_tab(struct af9033_state *state,
+ const struct reg_val *tab, int tab_len)
+ {
+ int ret, i, j;
+- u8 buf[tab_len];
++ u8 buf[MAX_XFER_SIZE];
++
++ if (tab_len > sizeof(buf)) {
++ dev_warn(&state->i2c->dev,
++ "%s: i2c wr len=%d is too big!\n",
++ KBUILD_MODNAME, tab_len);
++ return -EINVAL;
++ }
+
+ dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
+
+diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
+index 1b77909c..39a29dd2 100644
+--- a/drivers/media/dvb-frontends/bcm3510.c
++++ b/drivers/media/dvb-frontends/bcm3510.c
+@@ -44,6 +44,9 @@
+ #include "bcm3510.h"
+ #include "bcm3510_priv.h"
+
++/* Max transfer size done by bcm3510_do_hab_cmd() function */
++#define MAX_XFER_SIZE 128
++
+ struct bcm3510_state {
+
+ struct i2c_adapter* i2c;
+@@ -201,9 +204,19 @@ static int bcm3510_hab_send_request(struct bcm3510_state *st, u8 *buf, int len)
+
+ static int bcm3510_do_hab_cmd(struct bcm3510_state *st, u8 cmd, u8 msgid, u8 *obuf, u8 olen, u8 *ibuf, u8 ilen)
+ {
+- u8 ob[olen+2],ib[ilen+2];
++ u8 ob[MAX_XFER_SIZE], ib[MAX_XFER_SIZE];
+ int ret = 0;
+
++ if (ilen + 2 > sizeof(ib)) {
++ deb_hab("do_hab_cmd: ilen=%d is too big!\n", ilen);
++ return -EINVAL;
++ }
++
++ if (olen + 2 > sizeof(ob)) {
++ deb_hab("do_hab_cmd: olen=%d is too big!\n", olen);
++ return -EINVAL;
++ }
++
+ ob[0] = cmd;
+ ob[1] = msgid;
+ memcpy(&ob[2],obuf,olen);
+diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
+index 7ca5c69d..51447a04 100644
+--- a/drivers/media/dvb-frontends/cxd2820r_core.c
++++ b/drivers/media/dvb-frontends/cxd2820r_core.c
+@@ -21,12 +21,15 @@
+
+ #include "cxd2820r_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ /* write multiple registers */
+ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
+ u8 *val, int len)
+ {
+ int ret;
+- u8 buf[len+1];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = i2c,
+@@ -36,6 +39,13 @@ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
+ }
+ };
+
++ if (1 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+@@ -55,7 +65,7 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
+ u8 *val, int len)
+ {
+ int ret;
+- u8 buf[len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = i2c,
+@@ -70,6 +80,13 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
+ }
+ };
+
++ if (len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
+index c1c3400b..cadcae4c 100644
+--- a/drivers/media/dvb-frontends/itd1000.c
++++ b/drivers/media/dvb-frontends/itd1000.c
+@@ -31,6 +31,9 @@
+ #include "itd1000.h"
+ #include "itd1000_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ static int debug;
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+@@ -52,10 +55,18 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+ /* don't write more than one byte with flexcop behind */
+ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 len)
+ {
+- u8 buf[1+len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg = {
+ .addr = state->cfg->i2c_address, .flags = 0, .buf = buf, .len = len+1
+ };
++
++ if (1 + len > sizeof(buf)) {
++ printk(KERN_WARNING
++ "itd1000: i2c wr reg=%04x: len=%d is too big!\n",
++ reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = reg;
+ memcpy(&buf[1], v, len);
+
+diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
+index ec388c1d..a74ac0dd 100644
+--- a/drivers/media/dvb-frontends/mt312.c
++++ b/drivers/media/dvb-frontends/mt312.c
+@@ -36,6 +36,8 @@
+ #include "mt312_priv.h"
+ #include "mt312.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
+
+ struct mt312_state {
+ struct i2c_adapter *i2c;
+@@ -96,9 +98,15 @@ static int mt312_write(struct mt312_state *state, const enum mt312_reg_addr reg,
+ const u8 *src, const size_t count)
+ {
+ int ret;
+- u8 buf[count + 1];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg;
+
++ if (1 + count > sizeof(buf)) {
++ printk(KERN_WARNING
++ "mt312: write: len=%zd is too big!\n", count);
++ return -EINVAL;
++ }
++
+ if (debug) {
+ int i;
+ dprintk("W(%d):", reg & 0x7f);
+diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
+index 8e288940..fbca9856 100644
+--- a/drivers/media/dvb-frontends/nxt200x.c
++++ b/drivers/media/dvb-frontends/nxt200x.c
+@@ -39,6 +39,9 @@
+ */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
+ #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
+ #define CRC_CCIT_MASK 0x1021
+@@ -95,10 +98,16 @@ static int i2c_readbytes(struct nxt200x_state *state, u8 addr, u8 *buf, u8 len)
+ static int nxt200x_writebytes (struct nxt200x_state* state, u8 reg,
+ const u8 *buf, u8 len)
+ {
+- u8 buf2 [len+1];
++ u8 buf2[MAX_XFER_SIZE];
+ int err;
+ struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf2, .len = len + 1 };
+
++ if (1 + len > sizeof(buf2)) {
++ pr_warn("%s: i2c wr reg=%04x: len=%d is too big!\n",
++ __func__, reg, len);
++ return -EINVAL;
++ }
++
+ buf2[0] = reg;
+ memcpy(&buf2[1], buf, len);
+
+diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
+index 362d26d1..7efb796c 100644
+--- a/drivers/media/dvb-frontends/rtl2830.c
++++ b/drivers/media/dvb-frontends/rtl2830.c
+@@ -27,20 +27,30 @@
+
+ #include "rtl2830_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ /* write multiple hardware registers */
+ static int rtl2830_wr(struct rtl2830_priv *priv, u8 reg, const u8 *val, int len)
+ {
+ int ret;
+- u8 buf[1+len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+- .len = 1+len,
++ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
++ if (1 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
+index facb8484..b8dd0a13 100644
+--- a/drivers/media/dvb-frontends/rtl2832.c
++++ b/drivers/media/dvb-frontends/rtl2832.c
+@@ -22,6 +22,9 @@
+ #include "dvb_math.h"
+ #include <linux/bitops.h>
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ int rtl2832_debug;
+ module_param_named(debug, rtl2832_debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+@@ -162,16 +165,23 @@ static const struct rtl2832_reg_entry registers[] = {
+ static int rtl2832_wr(struct rtl2832_priv *priv, u8 reg, u8 *val, int len)
+ {
+ int ret;
+- u8 buf[1+len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+- .len = 1+len,
++ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
++ if (1 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
+index e2fec9eb..93eeaf71 100644
+--- a/drivers/media/dvb-frontends/s5h1420.c
++++ b/drivers/media/dvb-frontends/s5h1420.c
+@@ -836,9 +836,16 @@ static u32 s5h1420_tuner_i2c_func(struct i2c_adapter *adapter)
+ static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
+ {
+ struct s5h1420_state *state = i2c_get_adapdata(i2c_adap);
+- struct i2c_msg m[1 + num];
++ struct i2c_msg m[3];
+ u8 tx_open[2] = { CON_1, state->CON_1_val | 1 }; /* repeater stops once there was a stop condition */
+
++ if (1 + num > ARRAY_SIZE(m)) {
++ printk(KERN_WARNING
++ "%s: i2c xfer: num=%d is too big!\n",
++ KBUILD_MODNAME, num);
++ return -EOPNOTSUPP;
++ }
++
+ memset(m, 0, sizeof(struct i2c_msg) * (1 + num));
+
+ m[0].addr = state->config->demod_address;
+@@ -847,7 +854,7 @@ static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c
+
+ memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
+
+- return i2c_transfer(state->i2c, m, 1+num) == 1 + num ? num : -EIO;
++ return i2c_transfer(state->i2c, m, 1 + num) == 1 + num ? num : -EIO;
+ }
+
+ static struct i2c_algorithm s5h1420_tuner_i2c_algo = {
+diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
+index 3dd5714e..07cd5ea7 100644
+--- a/drivers/media/dvb-frontends/stb0899_drv.c
++++ b/drivers/media/dvb-frontends/stb0899_drv.c
+@@ -32,6 +32,9 @@
+ #include "stb0899_priv.h"
+ #include "stb0899_reg.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ static unsigned int verbose = 0;//1;
+ module_param(verbose, int, 0644);
+
+@@ -499,7 +502,7 @@ err:
+ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, u32 count)
+ {
+ int ret;
+- u8 buf[2 + count];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg i2c_msg = {
+ .addr = state->config->demod_address,
+ .flags = 0,
+@@ -507,6 +510,13 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data,
+ .len = 2 + count
+ };
+
++ if (2 + count > sizeof(buf)) {
++ printk(KERN_WARNING
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, count);
++ return -EINVAL;
++ }
++
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+ memcpy(&buf[2], data, count);
+diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
+index 45f9523f..cea175d1 100644
+--- a/drivers/media/dvb-frontends/stb6100.c
++++ b/drivers/media/dvb-frontends/stb6100.c
+@@ -31,6 +31,8 @@
+ static unsigned int verbose;
+ module_param(verbose, int, 0644);
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
+
+ #define FE_ERROR 0
+ #define FE_NOTICE 1
+@@ -183,7 +185,7 @@ static int stb6100_read_reg(struct stb6100_state *state, u8 reg)
+ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int start, int len)
+ {
+ int rc;
+- u8 cmdbuf[len + 1];
++ u8 cmdbuf[MAX_XFER_SIZE];
+ struct i2c_msg msg = {
+ .addr = state->config->tuner_address,
+ .flags = 0,
+@@ -191,6 +193,13 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
+ .len = len + 1
+ };
+
++ if (1 + len > sizeof(buf)) {
++ printk(KERN_WARNING
++ "%s: i2c wr: len=%d is too big!\n",
++ KBUILD_MODNAME, len);
++ return -EINVAL;
++ }
++
+ if (unlikely(start < 1 || start + len > STB6100_NUMREGS)) {
+ dprintk(verbose, FE_ERROR, 1, "Invalid register range %d:%d",
+ start, len);
+diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
+index 7b6dba3c..45877273 100644
+--- a/drivers/media/dvb-frontends/stv0367.c
++++ b/drivers/media/dvb-frontends/stv0367.c
+@@ -33,6 +33,9 @@
+ #include "stv0367_regs.h"
+ #include "stv0367_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ static int stvdebug;
+ module_param_named(debug, stvdebug, int, 0644);
+
+@@ -767,7 +770,7 @@ static struct st_register def0367cab[STV0367CAB_NBREGS] = {
+ static
+ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
+ {
+- u8 buf[len + 2];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg = {
+ .addr = state->config->demod_address,
+ .flags = 0,
+@@ -776,6 +779,14 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
+ };
+ int ret;
+
++ if (2 + len > sizeof(buf)) {
++ printk(KERN_WARNING
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
++
+ buf[0] = MSB(reg);
+ buf[1] = LSB(reg);
+ memcpy(buf + 2, data, len);
+diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
+index 56d470ad..23e872f8 100644
+--- a/drivers/media/dvb-frontends/stv090x.c
++++ b/drivers/media/dvb-frontends/stv090x.c
+@@ -35,6 +35,9 @@
+ #include "stv090x.h"
+ #include "stv090x_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ static unsigned int verbose;
+ module_param(verbose, int, 0644);
+
+@@ -722,9 +725,16 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8
+ {
+ const struct stv090x_config *config = state->config;
+ int ret;
+- u8 buf[2 + count];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg i2c_msg = { .addr = config->address, .flags = 0, .buf = buf, .len = 2 + count };
+
++ if (2 + count > sizeof(buf)) {
++ printk(KERN_WARNING
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, count);
++ return -EINVAL;
++ }
++
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+ memcpy(&buf[2], data, count);
+diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
+index 20b5fa92..b1425830 100644
+--- a/drivers/media/dvb-frontends/stv6110.c
++++ b/drivers/media/dvb-frontends/stv6110.c
+@@ -30,6 +30,9 @@
+
+ #include "stv6110.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ static int debug;
+
+ struct stv6110_priv {
+@@ -68,7 +71,7 @@ static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[],
+ {
+ struct stv6110_priv *priv = fe->tuner_priv;
+ int rc;
+- u8 cmdbuf[len + 1];
++ u8 cmdbuf[MAX_XFER_SIZE];
+ struct i2c_msg msg = {
+ .addr = priv->i2c_address,
+ .flags = 0,
+@@ -78,6 +81,13 @@ static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[],
+
+ dprintk("%s\n", __func__);
+
++ if (1 + len > sizeof(cmdbuf)) {
++ printk(KERN_WARNING
++ "%s: i2c wr: len=%d is too big!\n",
++ KBUILD_MODNAME, len);
++ return -EINVAL;
++ }
++
+ if (start + len > 8)
+ return -EINVAL;
+
+diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
+index f36cab12..e66154e5 100644
+--- a/drivers/media/dvb-frontends/stv6110x.c
++++ b/drivers/media/dvb-frontends/stv6110x.c
+@@ -32,6 +32,9 @@
+ #include "stv6110x.h"
+ #include "stv6110x_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ static unsigned int verbose;
+ module_param(verbose, int, 0644);
+ MODULE_PARM_DESC(verbose, "Set Verbosity level");
+@@ -61,7 +64,8 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da
+ {
+ int ret;
+ const struct stv6110x_config *config = stv6110x->config;
+- u8 buf[len + 1];
++ u8 buf[MAX_XFER_SIZE];
++
+ struct i2c_msg msg = {
+ .addr = config->addr,
+ .flags = 0,
+@@ -69,6 +73,13 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da
+ .len = len + 1
+ };
+
++ if (1 + len > sizeof(buf)) {
++ printk(KERN_WARNING
++ "%s: i2c wr: len=%d is too big!\n",
++ KBUILD_MODNAME, len);
++ return -EINVAL;
++ }
++
+ if (start + len > 8)
+ return -EINVAL;
+
+diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
+index e79749cf..8ad3a57c 100644
+--- a/drivers/media/dvb-frontends/tda10071.c
++++ b/drivers/media/dvb-frontends/tda10071.c
+@@ -20,6 +20,9 @@
+
+ #include "tda10071_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ static struct dvb_frontend_ops tda10071_ops;
+
+ /* write multiple registers */
+@@ -27,16 +30,23 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
+ int len)
+ {
+ int ret;
+- u8 buf[len+1];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg.demod_i2c_addr,
+ .flags = 0,
+- .len = sizeof(buf),
++ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
++ if (1 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+@@ -56,7 +66,7 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
+ int len)
+ {
+ int ret;
+- u8 buf[len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg.demod_i2c_addr,
+@@ -66,11 +76,18 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
+ }, {
+ .addr = priv->cfg.demod_i2c_addr,
+ .flags = I2C_M_RD,
+- .len = sizeof(buf),
++ .len = len,
+ .buf = buf,
+ }
+ };
+
++ if (len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
+index d281f77d..2c54586a 100644
+--- a/drivers/media/dvb-frontends/tda18271c2dd.c
++++ b/drivers/media/dvb-frontends/tda18271c2dd.c
+@@ -34,6 +34,9 @@
+ #include "dvb_frontend.h"
+ #include "tda18271c2dd.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ struct SStandardParam {
+ s32 m_IFFrequency;
+ u32 m_BandWidth;
+@@ -139,11 +142,18 @@ static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
+ static int WriteRegs(struct tda_state *state,
+ u8 SubAddr, u8 *Regs, u16 nRegs)
+ {
+- u8 data[nRegs+1];
++ u8 data[MAX_XFER_SIZE];
++
++ if (1 + nRegs > sizeof(data)) {
++ printk(KERN_WARNING
++ "%s: i2c wr: len=%d is too big!\n",
++ KBUILD_MODNAME, nRegs);
++ return -EINVAL;
++ }
+
+ data[0] = SubAddr;
+ memcpy(data + 1, Regs, nRegs);
+- return i2c_write(state->i2c, state->adr, data, nRegs+1);
++ return i2c_write(state->i2c, state->adr, data, nRegs + 1);
+ }
+
+ static int WriteReg(struct tda_state *state, u8 SubAddr, u8 Reg)
+diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
+index eff9c5fd..91b6b2e9 100644
+--- a/drivers/media/dvb-frontends/zl10039.c
++++ b/drivers/media/dvb-frontends/zl10039.c
+@@ -30,6 +30,9 @@
+
+ static int debug;
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ #define dprintk(args...) \
+ do { \
+ if (debug) \
+@@ -98,7 +101,7 @@ static int zl10039_write(struct zl10039_state *state,
+ const enum zl10039_reg_addr reg, const u8 *src,
+ const size_t count)
+ {
+- u8 buf[count + 1];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg = {
+ .addr = state->i2c_addr,
+ .flags = 0,
+@@ -106,6 +109,13 @@ static int zl10039_write(struct zl10039_state *state,
+ .len = count + 1,
+ };
+
++ if (1 + count > sizeof(buf)) {
++ printk(KERN_WARNING
++ "%s: i2c wr reg=%04x: len=%zd is too big!\n",
++ KBUILD_MODNAME, reg, count);
++ return -EINVAL;
++ }
++
+ dprintk("%s\n", __func__);
+ /* Write register address and data in one go */
+ buf[0] = reg;
+diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
+index 004d8ace..6386ced9 100644
+--- a/drivers/media/pci/cx18/cx18-driver.c
++++ b/drivers/media/pci/cx18/cx18-driver.c
+@@ -324,23 +324,24 @@ static void cx18_eeprom_dump(struct cx18 *cx, unsigned char *eedata, int len)
+ /* Hauppauge card? get values from tveeprom */
+ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
+ {
+- struct i2c_client c;
++ struct i2c_client *c;
+ u8 eedata[256];
+
+- memset(&c, 0, sizeof(c));
+- strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
+- c.adapter = &cx->i2c_adap[0];
+- c.addr = 0xA0 >> 1;
++ c = kzalloc(sizeof(*c), GFP_KERNEL);
++
++ strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
++ c->adapter = &cx->i2c_adap[0];
++ c->addr = 0xa0 >> 1;
+
+ memset(tv, 0, sizeof(*tv));
+- if (tveeprom_read(&c, eedata, sizeof(eedata)))
+- return;
++ if (tveeprom_read(c, eedata, sizeof(eedata)))
++ goto ret;
+
+ switch (cx->card->type) {
+ case CX18_CARD_HVR_1600_ESMT:
+ case CX18_CARD_HVR_1600_SAMSUNG:
+ case CX18_CARD_HVR_1600_S5H1411:
+- tveeprom_hauppauge_analog(&c, tv, eedata);
++ tveeprom_hauppauge_analog(c, tv, eedata);
+ break;
+ case CX18_CARD_YUAN_MPC718:
+ case CX18_CARD_GOTVIEW_PCI_DVD3:
+@@ -354,6 +355,9 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
+ cx18_eeprom_dump(cx, eedata, sizeof(eedata));
+ break;
+ }
++
++ret:
++ kfree(c);
+ }
+
+ static void cx18_process_eeprom(struct cx18 *cx)
+diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
+index 73448491..16fa7ea4 100644
+--- a/drivers/media/pci/cx23885/cimax2.c
++++ b/drivers/media/pci/cx23885/cimax2.c
+@@ -26,6 +26,10 @@
+ #include "cx23885.h"
+ #include "cimax2.h"
+ #include "dvb_ca_en50221.h"
++
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ /**** Bit definitions for MC417_RWD and MC417_OEN registers ***
+ bits 31-16
+ +-----------+
+@@ -125,7 +129,7 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
+ u8 *buf, int len)
+ {
+ int ret;
+- u8 buffer[len + 1];
++ u8 buffer[MAX_XFER_SIZE];
+
+ struct i2c_msg msg = {
+ .addr = addr,
+@@ -134,6 +138,13 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
+ .len = len + 1
+ };
+
++ if (1 + len > sizeof(buffer)) {
++ printk(KERN_WARNING
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buffer[0] = reg;
+ memcpy(&buffer[1], buf, len);
+
+diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
+index f1cbfe52..6299d5da 100644
+--- a/drivers/media/pci/ttpci/av7110_hw.c
++++ b/drivers/media/pci/ttpci/av7110_hw.c
+@@ -22,7 +22,7 @@
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ *
+- * the project's page is at http://www.linuxtv.org/
++ * the project's page is at http://www.linuxtv.org/
+ */
+
+ /* for debugging ARM communication: */
+@@ -40,6 +40,14 @@
+
+ #define _NOHANDSHAKE
+
++/*
++ * Max transfer size done by av7110_fw_cmd()
++ *
++ * The maximum size passed to this function is 6 bytes. The buffer also
++ * uses two additional ones for type and size. So, 8 bytes is enough.
++ */
++#define MAX_XFER_SIZE 8
++
+ /****************************************************************************
+ * DEBI functions
+ ****************************************************************************/
+@@ -488,11 +496,18 @@ static int av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length)
+ int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...)
+ {
+ va_list args;
+- u16 buf[num + 2];
++ u16 buf[MAX_XFER_SIZE];
+ int i, ret;
+
+ // dprintk(4, "%p\n", av7110);
+
++ if (2 + num > sizeof(buf)) {
++ printk(KERN_WARNING
++ "%s: %s len=%d is too big!\n",
++ KBUILD_MODNAME, __func__, num);
++ return -EINVAL;
++ }
++
+ buf[0] = ((type << 8) | com);
+ buf[1] = num;
+
+diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
+index 6c96e489..72971a8d 100644
+--- a/drivers/media/tuners/e4000.c
++++ b/drivers/media/tuners/e4000.c
+@@ -21,20 +21,30 @@
+ #include "e4000_priv.h"
+ #include <linux/math64.h>
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ /* write multiple registers */
+ static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
+ {
+ int ret;
+- u8 buf[1 + len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+- .len = sizeof(buf),
++ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
++ if (1 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+@@ -54,7 +64,7 @@ static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
+ static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
+ {
+ int ret;
+- u8 buf[len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+@@ -64,11 +74,18 @@ static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
+ }, {
+ .addr = priv->cfg->i2c_addr,
+ .flags = I2C_M_RD,
+- .len = sizeof(buf),
++ .len = len,
+ .buf = buf,
+ }
+ };
+
++ if (len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c rd reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
+index 81f38aae..3aecaf46 100644
+--- a/drivers/media/tuners/fc2580.c
++++ b/drivers/media/tuners/fc2580.c
+@@ -20,6 +20,9 @@
+
+ #include "fc2580_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ /*
+ * TODO:
+ * I2C write and read works only for one single register. Multiple registers
+@@ -41,16 +44,23 @@
+ static int fc2580_wr_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len)
+ {
+ int ret;
+- u8 buf[1 + len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+- .len = sizeof(buf),
++ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
++ if (1 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+@@ -69,7 +79,7 @@ static int fc2580_wr_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len)
+ static int fc2580_rd_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len)
+ {
+ int ret;
+- u8 buf[len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+@@ -79,11 +89,18 @@ static int fc2580_rd_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len)
+ }, {
+ .addr = priv->cfg->i2c_addr,
+ .flags = I2C_M_RD,
+- .len = sizeof(buf),
++ .len = len,
+ .buf = buf,
+ }
+ };
+
++ if (len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c rd reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
+index e4a84ee2..abe256e1 100644
+--- a/drivers/media/tuners/tda18212.c
++++ b/drivers/media/tuners/tda18212.c
+@@ -20,6 +20,9 @@
+
+ #include "tda18212.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ struct tda18212_priv {
+ struct tda18212_config *cfg;
+ struct i2c_adapter *i2c;
+@@ -32,16 +35,23 @@ static int tda18212_wr_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
+ int len)
+ {
+ int ret;
+- u8 buf[len+1];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_address,
+ .flags = 0,
+- .len = sizeof(buf),
++ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
++ if (1 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+@@ -61,7 +71,7 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
+ int len)
+ {
+ int ret;
+- u8 buf[len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg->i2c_address,
+@@ -71,11 +81,18 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
+ }, {
+ .addr = priv->cfg->i2c_address,
+ .flags = I2C_M_RD,
+- .len = sizeof(buf),
++ .len = len,
+ .buf = buf,
+ }
+ };
+
++ if (len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c rd reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
+index 2d31aeb6..9300e936 100644
+--- a/drivers/media/tuners/tda18218.c
++++ b/drivers/media/tuners/tda18218.c
+@@ -20,11 +20,14 @@
+
+ #include "tda18218_priv.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ /* write multiple registers */
+ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+ {
+ int ret = 0, len2, remaining;
+- u8 buf[1 + len];
++ u8 buf[MAX_XFER_SIZE];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_address,
+@@ -33,6 +36,13 @@ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+ }
+ };
+
++ if (1 + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ for (remaining = len; remaining > 0;
+ remaining -= (priv->cfg->i2c_wr_max - 1)) {
+ len2 = remaining;
+@@ -63,7 +73,7 @@ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+ static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+ {
+ int ret;
+- u8 buf[reg+len]; /* we must start read always from reg 0x00 */
++ u8 buf[MAX_XFER_SIZE]; /* we must start read always from reg 0x00 */
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg->i2c_address,
+@@ -73,11 +83,18 @@ static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+ }, {
+ .addr = priv->cfg->i2c_address,
+ .flags = I2C_M_RD,
+- .len = sizeof(buf),
++ .len = reg + len,
+ .buf = buf,
+ }
+ };
+
++ if (reg + len > sizeof(buf)) {
++ dev_warn(&priv->i2c->dev,
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, len);
++ return -EINVAL;
++ }
++
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, &buf[reg], len);
+diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
+index 878d2c4d..9771cd83 100644
+--- a/drivers/media/tuners/tuner-xc2028.c
++++ b/drivers/media/tuners/tuner-xc2028.c
+@@ -24,6 +24,9 @@
+ #include <linux/dvb/frontend.h>
+ #include "dvb_frontend.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 80
++
+ /* Registers (Write-only) */
+ #define XREG_INIT 0x00
+ #define XREG_RF_FREQ 0x02
+@@ -547,7 +550,10 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
+ {
+ struct xc2028_data *priv = fe->tuner_priv;
+ int pos, rc;
+- unsigned char *p, *endp, buf[priv->ctrl.max_len];
++ unsigned char *p, *endp, buf[MAX_XFER_SIZE];
++
++ if (priv->ctrl.max_len > sizeof(buf))
++ priv->ctrl.max_len = sizeof(buf);
+
+ tuner_dbg("%s called\n", __func__);
+
+diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
+index d556042c..da47d239 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9015.c
++++ b/drivers/media/usb/dvb-usb-v2/af9015.c
+@@ -397,12 +397,13 @@ error:
+ return ret;
+ }
+
++#define AF9015_EEPROM_SIZE 256
++
+ /* hash (and dump) eeprom */
+ static int af9015_eeprom_hash(struct dvb_usb_device *d)
+ {
+ struct af9015_state *state = d_to_priv(d);
+ int ret, i;
+- static const unsigned int AF9015_EEPROM_SIZE = 256;
+ u8 buf[AF9015_EEPROM_SIZE];
+ struct req_t req = {READ_I2C, AF9015_I2C_EEPROM, 0, 0, 1, 1, NULL};
+
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 1ea17dc2..c8fcd784 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -21,6 +21,9 @@
+
+ #include "af9035.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+ static u16 af9035_checksum(const u8 *buf, size_t len)
+@@ -126,10 +129,16 @@ exit:
+ /* write multiple registers */
+ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
+ {
+- u8 wbuf[6 + len];
++ u8 wbuf[MAX_XFER_SIZE];
+ u8 mbox = (reg >> 16) & 0xff;
+ struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL };
+
++ if (6 + len > sizeof(wbuf)) {
++ dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
++ KBUILD_MODNAME, len);
++ return -EOPNOTSUPP;
++ }
++
+ wbuf[0] = len;
+ wbuf[1] = 2;
+ wbuf[2] = 0;
+@@ -228,9 +237,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ msg[1].len);
+ } else {
+ /* I2C */
+- u8 buf[5 + msg[0].len];
++ u8 buf[MAX_XFER_SIZE];
+ struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
+ buf, msg[1].len, msg[1].buf };
++
++ if (5 + msg[0].len > sizeof(buf)) {
++ dev_warn(&d->udev->dev,
++ "%s: i2c xfer: len=%d is too big!\n",
++ KBUILD_MODNAME, msg[0].len);
++ return -EOPNOTSUPP;
++ }
+ req.mbox |= ((msg[0].addr & 0x80) >> 3);
+ buf[0] = msg[1].len;
+ buf[1] = msg[0].addr << 1;
+@@ -257,9 +273,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ msg[0].len - 3);
+ } else {
+ /* I2C */
+- u8 buf[5 + msg[0].len];
++ u8 buf[MAX_XFER_SIZE];
+ struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
+ 0, NULL };
++
++ if (5 + msg[0].len > sizeof(buf)) {
++ dev_warn(&d->udev->dev,
++ "%s: i2c xfer: len=%d is too big!\n",
++ KBUILD_MODNAME, msg[0].len);
++ return -EOPNOTSUPP;
++ }
+ req.mbox |= ((msg[0].addr & 0x80) >> 3);
+ buf[0] = msg[0].len;
+ buf[1] = msg[0].addr << 1;
+diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+index e97964ef..2627553f 100644
+--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
++++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+@@ -23,6 +23,9 @@
+ #include "lgdt3305.h"
+ #include "lg2160.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ int dvb_usb_mxl111sf_debug;
+ module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
+ MODULE_PARM_DESC(debug, "set debugging level "
+@@ -57,7 +60,12 @@ int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+ {
+ int wo = (rbuf == NULL || rlen == 0); /* write-only */
+ int ret;
+- u8 sndbuf[1+wlen];
++ u8 sndbuf[MAX_XFER_SIZE];
++
++ if (1 + wlen > sizeof(sndbuf)) {
++ pr_warn("%s: len=%d is too big!\n", __func__, wlen);
++ return -EOPNOTSUPP;
++ }
+
+ pr_debug("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen);
+
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
+index 3940bb0f..20e345d9 100644
+--- a/drivers/media/usb/dvb-usb/cxusb.c
++++ b/drivers/media/usb/dvb-usb/cxusb.c
+@@ -43,6 +43,9 @@
+ #include "lgs8gxx.h"
+ #include "atbm8830.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ /* debug */
+ static int dvb_usb_cxusb_debug;
+ module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
+@@ -57,7 +60,14 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d,
+ u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+ {
+ int wo = (rbuf == NULL || rlen == 0); /* write-only */
+- u8 sndbuf[1+wlen];
++ u8 sndbuf[MAX_XFER_SIZE];
++
++ if (1 + wlen > sizeof(sndbuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ wlen);
++ return -EOPNOTSUPP;
++ }
++
+ memset(sndbuf, 0, 1+wlen);
+
+ sndbuf[0] = cmd;
+@@ -158,7 +168,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+
+ if (msg[i].flags & I2C_M_RD) {
+ /* read only */
+- u8 obuf[3], ibuf[1+msg[i].len];
++ u8 obuf[3], ibuf[MAX_XFER_SIZE];
++
++ if (1 + msg[i].len > sizeof(ibuf)) {
++ warn("i2c rd: len=%d is too big!\n",
++ msg[i].len);
++ return -EOPNOTSUPP;
++ }
+ obuf[0] = 0;
+ obuf[1] = msg[i].len;
+ obuf[2] = msg[i].addr;
+@@ -172,7 +188,18 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ } else if (i+1 < num && (msg[i+1].flags & I2C_M_RD) &&
+ msg[i].addr == msg[i+1].addr) {
+ /* write to then read from same address */
+- u8 obuf[3+msg[i].len], ibuf[1+msg[i+1].len];
++ u8 obuf[MAX_XFER_SIZE], ibuf[MAX_XFER_SIZE];
++
++ if (3 + msg[i].len > sizeof(obuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ msg[i].len);
++ return -EOPNOTSUPP;
++ }
++ if (1 + msg[i + 1].len > sizeof(ibuf)) {
++ warn("i2c rd: len=%d is too big!\n",
++ msg[i + 1].len);
++ return -EOPNOTSUPP;
++ }
+ obuf[0] = msg[i].len;
+ obuf[1] = msg[i+1].len;
+ obuf[2] = msg[i].addr;
+@@ -191,7 +218,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ i++;
+ } else {
+ /* write only */
+- u8 obuf[2+msg[i].len], ibuf;
++ u8 obuf[MAX_XFER_SIZE], ibuf;
++
++ if (2 + msg[i].len > sizeof(obuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ msg[i].len);
++ return -EOPNOTSUPP;
++ }
+ obuf[0] = msg[i].addr;
+ obuf[1] = msg[i].len;
+ memcpy(&obuf[2], msg[i].buf, msg[i].len);
+diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
+index c2dded92..6d68af0c 100644
+--- a/drivers/media/usb/dvb-usb/dibusb-common.c
++++ b/drivers/media/usb/dvb-usb/dibusb-common.c
+@@ -12,6 +12,9 @@
+ #include <linux/kconfig.h>
+ #include "dibusb.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ static int debug;
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS);
+@@ -105,11 +108,16 @@ EXPORT_SYMBOL(dibusb2_0_power_ctrl);
+ static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
+ u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
+ {
+- u8 sndbuf[wlen+4]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
++ u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
+ /* write only ? */
+ int wo = (rbuf == NULL || rlen == 0),
+ len = 2 + wlen + (wo ? 0 : 2);
+
++ if (4 + wlen > sizeof(sndbuf)) {
++ warn("i2c wr: len=%d is too big!\n", wlen);
++ return -EOPNOTSUPP;
++ }
++
+ sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
+ sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
+
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index 6e237b6d..71b22f5a 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -30,6 +30,9 @@
+ #include "stb6100_proc.h"
+ #include "m88rs2000.h"
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ #ifndef USB_PID_DW2102
+ #define USB_PID_DW2102 0x2102
+ #endif
+@@ -308,7 +311,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
+ case 2: {
+ /* read */
+ /* first write first register number */
+- u8 ibuf[msg[1].len + 2], obuf[3];
++ u8 ibuf[MAX_XFER_SIZE], obuf[3];
++
++ if (2 + msg[1].len > sizeof(ibuf)) {
++ warn("i2c rd: len=%d is too big!\n",
++ msg[1].len);
++ return -EOPNOTSUPP;
++ }
++
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+ obuf[2] = msg[0].buf[0];
+@@ -325,7 +335,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
+ switch (msg[0].addr) {
+ case 0x68: {
+ /* write to register */
+- u8 obuf[msg[0].len + 2];
++ u8 obuf[MAX_XFER_SIZE];
++
++ if (2 + msg[0].len > sizeof(obuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ msg[1].len);
++ return -EOPNOTSUPP;
++ }
++
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+ memcpy(obuf + 2, msg[0].buf, msg[0].len);
+@@ -335,7 +352,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
+ }
+ case 0x61: {
+ /* write to tuner */
+- u8 obuf[msg[0].len + 2];
++ u8 obuf[MAX_XFER_SIZE];
++
++ if (2 + msg[0].len > sizeof(obuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ msg[1].len);
++ return -EOPNOTSUPP;
++ }
++
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+ memcpy(obuf + 2, msg[0].buf, msg[0].len);
+@@ -401,7 +425,14 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
+ default: {
+ if (msg[j].flags == I2C_M_RD) {
+ /* read registers */
+- u8 ibuf[msg[j].len + 2];
++ u8 ibuf[MAX_XFER_SIZE];
++
++ if (2 + msg[j].len > sizeof(ibuf)) {
++ warn("i2c rd: len=%d is too big!\n",
++ msg[j].len);
++ return -EOPNOTSUPP;
++ }
++
+ dw210x_op_rw(d->udev, 0xc3,
+ (msg[j].addr << 1) + 1, 0,
+ ibuf, msg[j].len + 2,
+@@ -430,7 +461,14 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
+ } while (len > 0);
+ } else {
+ /* write registers */
+- u8 obuf[msg[j].len + 2];
++ u8 obuf[MAX_XFER_SIZE];
++
++ if (2 + msg[j].len > sizeof(obuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ msg[j].len);
++ return -EOPNOTSUPP;
++ }
++
+ obuf[0] = msg[j].addr << 1;
+ obuf[1] = msg[j].len;
+ memcpy(obuf + 2, msg[j].buf, msg[j].len);
+@@ -463,7 +501,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ case 2: {
+ /* read */
+ /* first write first register number */
+- u8 ibuf[msg[1].len + 2], obuf[3];
++ u8 ibuf[MAX_XFER_SIZE], obuf[3];
++
++ if (2 + msg[1].len > sizeof(ibuf)) {
++ warn("i2c rd: len=%d is too big!\n",
++ msg[1].len);
++ return -EOPNOTSUPP;
++ }
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+ obuf[2] = msg[0].buf[0];
+@@ -481,7 +525,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ case 0x60:
+ case 0x0c: {
+ /* write to register */
+- u8 obuf[msg[0].len + 2];
++ u8 obuf[MAX_XFER_SIZE];
++
++ if (2 + msg[0].len > sizeof(obuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ msg[0].len);
++ return -EOPNOTSUPP;
++ }
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+ memcpy(obuf + 2, msg[0].buf, msg[0].len);
+@@ -563,7 +613,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ default: {
+ if (msg[j].flags == I2C_M_RD) {
+ /* read registers */
+- u8 ibuf[msg[j].len];
++ u8 ibuf[MAX_XFER_SIZE];
++
++ if (msg[j].len > sizeof(ibuf)) {
++ warn("i2c rd: len=%d is too big!\n",
++ msg[j].len);
++ return -EOPNOTSUPP;
++ }
++
+ dw210x_op_rw(d->udev, 0x91, 0, 0,
+ ibuf, msg[j].len,
+ DW210X_READ_MSG);
+@@ -590,7 +647,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ } while (len > 0);
+ } else if (j < (num - 1)) {
+ /* write register addr before read */
+- u8 obuf[msg[j].len + 2];
++ u8 obuf[MAX_XFER_SIZE];
++
++ if (2 + msg[j].len > sizeof(obuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ msg[j].len);
++ return -EOPNOTSUPP;
++ }
++
+ obuf[0] = msg[j + 1].len;
+ obuf[1] = (msg[j].addr << 1);
+ memcpy(obuf + 2, msg[j].buf, msg[j].len);
+@@ -602,7 +666,13 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ break;
+ } else {
+ /* write registers */
+- u8 obuf[msg[j].len + 2];
++ u8 obuf[MAX_XFER_SIZE];
++
++ if (2 + msg[j].len > sizeof(obuf)) {
++ warn("i2c wr: len=%d is too big!\n",
++ msg[j].len);
++ return -EOPNOTSUPP;
++ }
+ obuf[0] = msg[j].len + 1;
+ obuf[1] = (msg[j].addr << 1);
+ memcpy(obuf + 2, msg[j].buf, msg[j].len);
+diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
+index c85d69da..85a6a341 100644
+--- a/drivers/media/v4l2-core/v4l2-async.c
++++ b/drivers/media/v4l2-core/v4l2-async.c
+@@ -189,30 +189,53 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+ struct v4l2_subdev *sd, *tmp;
+ unsigned int notif_n_subdev = notifier->num_subdevs;
+ unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
+- struct device *dev[n_subdev];
++ struct device **dev;
+ int i = 0;
+
+ if (!notifier->v4l2_dev)
+ return;
+
++ dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
++ if (!dev) {
++ dev_err(notifier->v4l2_dev->dev,
++ "Failed to allocate device cache!\n");
++ }
++
+ mutex_lock(&list_lock);
+
+ list_del(&notifier->list);
+
+ list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
+- dev[i] = get_device(sd->dev);
++ struct device *d;
++
++ d = get_device(sd->dev);
+
+ v4l2_async_cleanup(sd);
+
+ /* If we handled USB devices, we'd have to lock the parent too */
+- device_release_driver(dev[i++]);
++ device_release_driver(d);
+
+ if (notifier->unbind)
+ notifier->unbind(notifier, sd, sd->asd);
++
++ /*
++ * Store device at the device cache, in order to call
++ * put_device() on the final step
++ */
++ if (dev)
++ dev[i++] = d;
++ else
++ put_device(d);
+ }
+
+ mutex_unlock(&list_lock);
+
++ /*
++ * Call device_attach() to reprobe devices
++ *
++ * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
++ * executed.
++ */
+ while (i--) {
+ struct device *d = dev[i];
+
+@@ -228,6 +251,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+ }
+ put_device(d);
+ }
++ kfree(dev);
+
+ notifier->v4l2_dev = NULL;
+
+diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
+index 9483bc84..e775bfbc 100644
+--- a/drivers/mfd/lpc_ich.c
++++ b/drivers/mfd/lpc_ich.c
+@@ -53,6 +53,7 @@
+ * document number TBD : Wellsburg
+ * document number TBD : Avoton SoC
+ * document number TBD : Coleto Creek
++ * document number TBD : Wildcat Point-LP
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -211,6 +212,7 @@ enum lpc_chipsets {
+ LPC_WBG, /* Wellsburg */
+ LPC_AVN, /* Avoton SoC */
+ LPC_COLETO, /* Coleto Creek */
++ LPC_WPT_LP, /* Wildcat Point-LP */
+ };
+
+ static struct lpc_ich_info lpc_chipset_info[] = {
+@@ -503,6 +505,10 @@ static struct lpc_ich_info lpc_chipset_info[] = {
+ .name = "Coleto Creek",
+ .iTCO_version = 2,
+ },
++ [LPC_WPT_LP] = {
++ .name = "Lynx Point_LP",
++ .iTCO_version = 2,
++ },
+ };
+
+ /*
+@@ -721,6 +727,13 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
+ { PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
+ { PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
+ { PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
++ { PCI_VDEVICE(INTEL, 0x9cc1), LPC_WPT_LP},
++ { PCI_VDEVICE(INTEL, 0x9cc2), LPC_WPT_LP},
++ { PCI_VDEVICE(INTEL, 0x9cc3), LPC_WPT_LP},
++ { PCI_VDEVICE(INTEL, 0x9cc5), LPC_WPT_LP},
++ { PCI_VDEVICE(INTEL, 0x9cc6), LPC_WPT_LP},
++ { PCI_VDEVICE(INTEL, 0x9cc7), LPC_WPT_LP},
++ { PCI_VDEVICE(INTEL, 0x9cc9), LPC_WPT_LP},
+ { 0, }, /* End of list */
+ };
+ MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
+diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
+index 3b835f59..573de7bf 100644
+--- a/drivers/mfd/rts5249.c
++++ b/drivers/mfd/rts5249.c
+@@ -130,13 +130,57 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
+ {
+ int err;
+
+- err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, 0xFE46);
++ err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV,
++ PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED |
++ PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN |
++ PHY_REG_REV_RX_PWST | PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 |
++ PHY_REG_REV_STOP_CLKRD | PHY_REG_REV_STOP_CLKWR);
+ if (err < 0)
+ return err;
+
+ msleep(1);
+
+- return rtsx_pci_write_phy_register(pcr, PHY_BPCR, 0x05C0);
++ err = rtsx_pci_write_phy_register(pcr, PHY_BPCR,
++ PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL |
++ PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN);
++ if (err < 0)
++ return err;
++ err = rtsx_pci_write_phy_register(pcr, PHY_PCR,
++ PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
++ PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 |
++ PHY_PCR_RSSI_EN);
++ if (err < 0)
++ return err;
++ err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
++ PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR |
++ PHY_RCR2_CDR_CP_10 | PHY_RCR2_CDR_SR_2 |
++ PHY_RCR2_FREQSEL_12 | PHY_RCR2_CPADJEN |
++ PHY_RCR2_CDR_SC_8 | PHY_RCR2_CALIB_LATE);
++ if (err < 0)
++ return err;
++ err = rtsx_pci_write_phy_register(pcr, PHY_FLD4,
++ PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF |
++ PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA |
++ PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER |
++ PHY_FLD4_BER_CHK_EN);
++ if (err < 0)
++ return err;
++ err = rtsx_pci_write_phy_register(pcr, PHY_RDR, PHY_RDR_RXDSEL_1_9);
++ if (err < 0)
++ return err;
++ err = rtsx_pci_write_phy_register(pcr, PHY_RCR1,
++ PHY_RCR1_ADP_TIME | PHY_RCR1_VCO_COARSE);
++ if (err < 0)
++ return err;
++ err = rtsx_pci_write_phy_register(pcr, PHY_FLD3,
++ PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 |
++ PHY_FLD3_RXDELINK);
++ if (err < 0)
++ return err;
++ return rtsx_pci_write_phy_register(pcr, PHY_TUNE,
++ PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 |
++ PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 |
++ PHY_TUNE_TUNED12);
+ }
+
+ static int rts5249_turn_on_led(struct rtsx_pcr *pcr)
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 69e438ee..92c18779 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -589,6 +589,13 @@ static void atmci_timeout_timer(unsigned long data)
+ if (host->mrq->cmd->data) {
+ host->mrq->cmd->data->error = -ETIMEDOUT;
+ host->data = NULL;
++ /*
++ * With some SDIO modules, sometimes DMA transfer hangs. If
++ * stop_transfer() is not called then the DMA request is not
++ * removed, following ones are queued and never computed.
++ */
++ if (host->state == STATE_DATA_XFER)
++ host->stop_transfer(host);
+ } else {
+ host->mrq->cmd->error = -ETIMEDOUT;
+ host->cmd = NULL;
+@@ -1803,12 +1810,14 @@ static void atmci_tasklet_func(unsigned long priv)
+ if (unlikely(status)) {
+ host->stop_transfer(host);
+ host->data = NULL;
+- if (status & ATMCI_DTOE) {
+- data->error = -ETIMEDOUT;
+- } else if (status & ATMCI_DCRCE) {
+- data->error = -EILSEQ;
+- } else {
+- data->error = -EIO;
++ if (data) {
++ if (status & ATMCI_DTOE) {
++ data->error = -ETIMEDOUT;
++ } else if (status & ATMCI_DCRCE) {
++ data->error = -EILSEQ;
++ } else {
++ data->error = -EIO;
++ }
+ }
+ }
+
+diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
+index 6bc9618a..f111f04f 100644
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -78,7 +78,7 @@
+
+ /* Define max times to check status register before we give up. */
+ #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
+-#define MAX_CMD_SIZE 5
++#define MAX_CMD_SIZE 6
+
+ #define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+
+@@ -992,15 +992,13 @@ static int m25p_probe(struct spi_device *spi)
+ }
+ }
+
+- flash = kzalloc(sizeof *flash, GFP_KERNEL);
++ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+- flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0),
+- GFP_KERNEL);
+- if (!flash->command) {
+- kfree(flash);
++
++ flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL);
++ if (!flash->command)
+ return -ENOMEM;
+- }
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+@@ -1133,14 +1131,10 @@ static int m25p_probe(struct spi_device *spi)
+ static int m25p_remove(struct spi_device *spi)
+ {
+ struct m25p *flash = spi_get_drvdata(spi);
+- int status;
+
+ /* Clean up MTD stuff. */
+- status = mtd_device_unregister(&flash->mtd);
+- if (status == 0) {
+- kfree(flash->command);
+- kfree(flash);
+- }
++ mtd_device_unregister(&flash->mtd);
++
+ return 0;
+ }
+
+diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
+index 060feeaf..9be07922 100644
+--- a/drivers/mtd/nand/atmel_nand.c
++++ b/drivers/mtd/nand/atmel_nand.c
+@@ -2177,7 +2177,6 @@ err_no_card:
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+ err_nand_ioremap:
+- platform_driver_unregister(&atmel_nand_nfc_driver);
+ return res;
+ }
+
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+index a9830ff8..a9260178 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+@@ -392,8 +392,6 @@ static void dma_irq_callback(void *param)
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+- complete(dma_c);
+-
+ switch (this->dma_type) {
+ case DMA_FOR_COMMAND:
+ dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+@@ -418,6 +416,8 @@ static void dma_irq_callback(void *param)
+ default:
+ pr_err("in wrong DMA operation.\n");
+ }
++
++ complete(dma_c);
+ }
+
+ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+@@ -1568,8 +1568,6 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
+
+ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
+ {
+- int ret;
+-
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ if (GPMI_IS_MX23(this))
+ this->swap_block_mark = false;
+@@ -1577,12 +1575,8 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
+ this->swap_block_mark = true;
+
+ /* Set up the medium geometry */
+- ret = gpmi_set_geometry(this);
+- if (ret)
+- return ret;
++ return gpmi_set_geometry(this);
+
+- /* NAND boot init, depends on the gpmi_set_geometry(). */
+- return nand_boot_init(this);
+ }
+
+ static void gpmi_nfc_exit(struct gpmi_nand_data *this)
+@@ -1672,10 +1666,16 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
+ if (ret)
+ goto err_out;
+
++ chip->options |= NAND_SKIP_BBTSCAN;
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ goto err_out;
+
++ ret = nand_boot_init(this);
++ if (ret)
++ goto err_out;
++ chip->scan_bbt(mtd);
++
+ ppdata.of_node = this->pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret)
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index d340b2f1..d92d94bb 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2981,10 +2981,21 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
++
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+- mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
++
++ /*
++ * pages_per_block and blocks_per_lun may not be a power-of-2 size
++ * (don't ask me who thought of this...). MTD assumes that these
++ * dimensions will be power-of-2, so just truncate the remaining area.
++ */
++ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
++ mtd->erasesize *= mtd->writesize;
++
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+- chip->chipsize = le32_to_cpu(p->blocks_per_lun);
++
++ /* See erasesize comment */
++ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+
+ if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
+diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
+index 48161ede..69f58b07 100644
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
+ ah->stats.tx_bytes_count += skb->len;
+ info = IEEE80211_SKB_CB(skb);
+
++ size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
++ memcpy(info->status.rates, bf->rates, size);
++
+ tries[0] = info->status.rates[0].count;
+ tries[1] = info->status.rates[1].count;
+ tries[2] = info->status.rates[2].count;
+
+ ieee80211_tx_info_clear_status(info);
+
+- size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+- memcpy(info->status.rates, bf->rates, size);
+-
+ for (i = 0; i < ts->ts_final_idx; i++) {
+ struct ieee80211_tx_rate *r =
+ &info->status.rates[i];
+diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
+index 1576104e..9bf88987 100644
+--- a/drivers/net/wireless/mwifiex/sdio.c
++++ b/drivers/net/wireless/mwifiex/sdio.c
+@@ -1029,7 +1029,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb, u32 upld_typ)
+ {
+ u8 *cmd_buf;
++ __le16 *curr_ptr = (__le16 *)skb->data;
++ u16 pkt_len = le16_to_cpu(*curr_ptr);
+
++ skb_trim(skb, pkt_len);
+ skb_pull(skb, INTF_HEADER_LEN);
+
+ switch (upld_typ) {
+diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
+index 1cfe5a73..92f76d65 100644
+--- a/drivers/net/wireless/mwifiex/uap_txrx.c
++++ b/drivers/net/wireless/mwifiex/uap_txrx.c
+@@ -97,6 +97,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+ struct mwifiex_txinfo *tx_info;
+ int hdr_chop;
+ struct timeval tv;
++ struct ethhdr *p_ethhdr;
+ u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+ uap_rx_pd = (struct uap_rxpd *)(skb->data);
+@@ -112,14 +113,36 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+ }
+
+ if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
+- rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
++ rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
++ /* Replace the 803 header and rfc1042 header (llc/snap) with
++ * an Ethernet II header, keep the src/dst and snap_type
++ * (ethertype).
++ *
++ * The firmware only passes up SNAP frames converting all RX
++ * data from 802.11 to 802.2/LLC/SNAP frames.
++ *
++ * To create the Ethernet II, just move the src, dst address
++ * right before the snap_type.
++ */
++ p_ethhdr = (struct ethhdr *)
++ ((u8 *)(&rx_pkt_hdr->eth803_hdr)
++ + sizeof(rx_pkt_hdr->eth803_hdr)
++ + sizeof(rx_pkt_hdr->rfc1042_hdr)
++ - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
++ - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
++ - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
++ memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
++ sizeof(p_ethhdr->h_source));
++ memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
++ sizeof(p_ethhdr->h_dest));
+ /* Chop off the rxpd + the excess memory from
+ * 802.2/llc/snap header that was removed.
+ */
+- hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
+- else
++ hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd;
++ } else {
+ /* Chop off the rxpd */
+ hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
++ }
+
+ /* Chop off the leading header bytes so the it points
+ * to the start of either the reconstructed EthII frame
+diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
+index 5970ff6f..d498b02f 100644
+--- a/drivers/net/wireless/prism54/islpci_dev.c
++++ b/drivers/net/wireless/prism54/islpci_dev.c
+@@ -811,6 +811,10 @@ static const struct net_device_ops islpci_netdev_ops = {
+ .ndo_validate_addr = eth_validate_addr,
+ };
+
++static struct device_type wlan_type = {
++ .name = "wlan",
++};
++
+ struct net_device *
+ islpci_setup(struct pci_dev *pdev)
+ {
+@@ -821,9 +825,8 @@ islpci_setup(struct pci_dev *pdev)
+ return ndev;
+
+ pci_set_drvdata(pdev, ndev);
+-#if defined(SET_NETDEV_DEV)
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+-#endif
++ SET_NETDEV_DEVTYPE(ndev, &wlan_type);
+
+ /* setup the structure members */
+ ndev->base_addr = pci_resource_start(pdev, 0);
+diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
+index 3d53a09d..38ed9a3e 100644
+--- a/drivers/net/wireless/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/rt2x00/rt2400pci.c
+@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
+ */
+ rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
+ rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
+- rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
++ rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
+ entry->queue->rt2x00dev->rssi_offset;
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+
+diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
+index 8bb4a9a0..9fa90a25 100644
+--- a/drivers/net/wireless/rtlwifi/base.c
++++ b/drivers/net/wireless/rtlwifi/base.c
+@@ -37,6 +37,7 @@
+
+ #include <linux/ip.h>
+ #include <linux/module.h>
++#include <linux/udp.h>
+
+ /*
+ *NOTICE!!!: This file will be very big, we should
+@@ -1074,64 +1075,52 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+ if (!ieee80211_is_data(fc))
+ return false;
+
++ ip = (const struct iphdr *)(skb->data + mac_hdr_len +
++ SNAP_SIZE + PROTOC_TYPE_SIZE);
++ ether_type = be16_to_cpup((__be16 *)
++ (skb->data + mac_hdr_len + SNAP_SIZE));
+
+- ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
+- SNAP_SIZE + PROTOC_TYPE_SIZE);
+- ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
+- /* ether_type = ntohs(ether_type); */
+-
+- if (ETH_P_IP == ether_type) {
+- if (IPPROTO_UDP == ip->protocol) {
+- struct udphdr *udp = (struct udphdr *)((u8 *) ip +
+- (ip->ihl << 2));
+- if (((((u8 *) udp)[1] == 68) &&
+- (((u8 *) udp)[3] == 67)) ||
+- ((((u8 *) udp)[1] == 67) &&
+- (((u8 *) udp)[3] == 68))) {
+- /*
+- * 68 : UDP BOOTP client
+- * 67 : UDP BOOTP server
+- */
+- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
+- DBG_DMESG, "dhcp %s !!\n",
+- is_tx ? "Tx" : "Rx");
+-
+- if (is_tx) {
+- rtlpriv->enter_ps = false;
+- schedule_work(&rtlpriv->
+- works.lps_change_work);
+- ppsc->last_delaylps_stamp_jiffies =
+- jiffies;
+- }
++ switch (ether_type) {
++ case ETH_P_IP: {
++ struct udphdr *udp;
++ u16 src;
++ u16 dst;
+
+- return true;
+- }
+- }
+- } else if (ETH_P_ARP == ether_type) {
+- if (is_tx) {
+- rtlpriv->enter_ps = false;
+- schedule_work(&rtlpriv->works.lps_change_work);
+- ppsc->last_delaylps_stamp_jiffies = jiffies;
+- }
++ if (ip->protocol != IPPROTO_UDP)
++ return false;
++ udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
++ src = be16_to_cpu(udp->source);
++ dst = be16_to_cpu(udp->dest);
+
+- return true;
+- } else if (ETH_P_PAE == ether_type) {
++ /* If this case involves port 68 (UDP BOOTP client) connecting
++ * with port 67 (UDP BOOTP server), then return true so that
++ * the lowest speed is used.
++ */
++ if (!((src == 68 && dst == 67) || (src == 67 && dst == 68)))
++ return false;
++
++ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
++ "dhcp %s !!\n", is_tx ? "Tx" : "Rx");
++ break;
++ }
++ case ETH_P_ARP:
++ break;
++ case ETH_P_PAE:
+ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
+-
+- if (is_tx) {
+- rtlpriv->enter_ps = false;
+- schedule_work(&rtlpriv->works.lps_change_work);
+- ppsc->last_delaylps_stamp_jiffies = jiffies;
+- }
+-
+- return true;
+- } else if (ETH_P_IPV6 == ether_type) {
+- /* IPv6 */
+- return true;
++ break;
++ case ETH_P_IPV6:
++ /* TODO: Is this right? */
++ return false;
++ default:
++ return false;
+ }
+-
+- return false;
++ if (is_tx) {
++ rtlpriv->enter_ps = false;
++ schedule_work(&rtlpriv->works.lps_change_work);
++ ppsc->last_delaylps_stamp_jiffies = jiffies;
++ }
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(rtl_is_special_data);
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+index b68cae30..e06971be 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+@@ -143,6 +143,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
+ } else {
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
++ break;
+ }
+ }
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+index da4f5871..2c68c138 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+@@ -778,7 +778,7 @@ static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
+
+ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+- struct rx_desc_92c *pdesc,
++ struct rx_desc_92c *p_desc,
+ struct rx_fwinfo_92c *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+@@ -793,11 +793,11 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+ u32 rssi, total_rssi = 0;
+ bool in_powersavemode = false;
+ bool is_cck_rate;
++ u8 *pdesc = (u8 *)p_desc;
+
+- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
++ is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc);
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+- pstats->is_cck = is_cck_rate;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->RX_SIGQ[0] = -1;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+index 5a060e53..5c52a8ac 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+@@ -303,10 +303,10 @@ out:
+ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+- u8 *p_desc, struct sk_buff *skb)
++ u8 *pdesc, struct sk_buff *skb)
+ {
+ struct rx_fwinfo_92c *p_drvinfo;
+- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
++ struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc;
+ u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
+
+ stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+@@ -345,11 +345,11 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+ stats->rx_bufshift);
+- rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
++ rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+index b8ec718a..542394c8 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+@@ -525,7 +525,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+index 5061f1db..92d38ab3 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+@@ -265,7 +265,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
+ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1]) {
+ pwrdiff_limit[i] =
+- rtlefuse->pwrgroup_ht20
++ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1];
+ }
+ } else {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+index c7095118..7d0f2e20 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+@@ -329,7 +329,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ }
+
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+
+ return true;
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index 70325874..ad9c37a4 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -77,11 +77,7 @@
+ #define RTL_SLOT_TIME_9 9
+ #define RTL_SLOT_TIME_20 20
+
+-/*related with tcp/ip. */
+-/*if_ehther.h*/
+-#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */
+-#define ETH_P_IP 0x0800 /*Internet Protocol packet */
+-#define ETH_P_ARP 0x0806 /*Address Resolution packet */
++/*related to tcp/ip. */
+ #define SNAP_SIZE 6
+ #define PROTOC_TYPE_SIZE 2
+
+diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
+index 696caed5..ce8acc71 100644
+--- a/drivers/pci/pcie/portdrv_pci.c
++++ b/drivers/pci/pcie/portdrv_pci.c
+@@ -223,7 +223,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
+ static void pcie_portdrv_remove(struct pci_dev *dev)
+ {
+ pcie_port_device_remove(dev);
+- pci_disable_device(dev);
+ }
+
+ static int error_detected_iter(struct device *device, void *data)
+diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
+index 29f7e4fc..360b9b23 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
++++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
+@@ -335,7 +335,7 @@ static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+
+ gcfg1 &= ~DOVE_TWSI_ENABLE_OPTION1;
+- gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION2);
++ gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION3);
+
+ switch (config) {
+ case 1:
+diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
+index 13ec195f..3484dd29 100644
+--- a/drivers/platform/x86/sony-laptop.c
++++ b/drivers/platform/x86/sony-laptop.c
+@@ -140,7 +140,6 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
+ "on the model (default: no change from current value)");
+
+ #ifdef CONFIG_PM_SLEEP
+-static void sony_nc_kbd_backlight_resume(void);
+ static void sony_nc_thermal_resume(void);
+ #endif
+ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+@@ -1486,13 +1485,6 @@ static void sony_nc_function_resume(void)
+ case 0x0135:
+ sony_nc_rfkill_update();
+ break;
+- case 0x0137:
+- case 0x0143:
+- case 0x014b:
+- case 0x014c:
+- case 0x0163:
+- sony_nc_kbd_backlight_resume();
+- break;
+ default:
+ continue;
+ }
+@@ -1894,25 +1886,6 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
+ }
+ }
+
+-#ifdef CONFIG_PM_SLEEP
+-static void sony_nc_kbd_backlight_resume(void)
+-{
+- int ignore = 0;
+-
+- if (!kbdbl_ctl)
+- return;
+-
+- if (kbdbl_ctl->mode == 0)
+- sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
+- &ignore);
+-
+- if (kbdbl_ctl->timeout != 0)
+- sony_call_snc_handle(kbdbl_ctl->handle,
+- (kbdbl_ctl->base + 0x200) |
+- (kbdbl_ctl->timeout << 0x10), &ignore);
+-}
+-#endif
+-
+ struct battery_care_control {
+ struct device_attribute attrs[2];
+ unsigned int handle;
+diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
+index ba67b2c4..032df379 100644
+--- a/drivers/regulator/pfuze100-regulator.c
++++ b/drivers/regulator/pfuze100-regulator.c
+@@ -308,9 +308,15 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
+ if (ret)
+ return ret;
+
+- if (value & 0x0f) {
+- dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
+- return -ENODEV;
++ switch (value & 0x0f) {
++ /* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 as ID=8 */
++ case 0x8:
++ dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
++ case 0x0:
++ break;
++ default:
++ dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
++ return -ENODEV;
+ }
+
+ ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
+diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
+index 20c271d4..b993ec57 100644
+--- a/drivers/regulator/ti-abb-regulator.c
++++ b/drivers/regulator/ti-abb-regulator.c
+@@ -615,7 +615,7 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
+ pname, *volt_table, vset_mask);
+ continue;
+ }
+- info->vset = efuse_val & vset_mask >> __ffs(vset_mask);
++ info->vset = (efuse_val & vset_mask) >> __ffs(vset_mask);
+ dev_dbg(dev, "[%d]v=%d vset=%x\n", i, *volt_table, info->vset);
+ check_abb:
+ switch (info->opp_sel) {
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 0a328d0d..bd8c09e7 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -4451,7 +4451,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_snmp_ureq *ureq;
+- int req_len;
++ unsigned int req_len;
+ struct qeth_arp_query_info qinfo = {0, };
+ int rc = 0;
+
+@@ -4467,6 +4467,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+ /* skip 4 bytes (data_len struct member) to get req_len */
+ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+ return -EFAULT;
++ if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
++ sizeof(struct qeth_ipacmd_hdr) -
++ sizeof(struct qeth_ipacmd_setadpparms_hdr)))
++ return -EINVAL;
+ ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+ if (IS_ERR(ureq)) {
+ QETH_CARD_TEXT(card, 2, "snmpnome");
+diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+index 5dec771d..4d340f4a 100644
+--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
++++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+@@ -409,8 +409,8 @@ int ptlrpc_stop_pinger(void)
+ struct l_wait_info lwi = { 0 };
+ int rc = 0;
+
+- if (!thread_is_init(&pinger_thread) &&
+- !thread_is_stopped(&pinger_thread))
++ if (thread_is_init(&pinger_thread) ||
++ thread_is_stopped(&pinger_thread))
+ return -EALREADY;
+
+ ptlrpc_pinger_remove_timeouts();
+diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
+index 11d5338b..0feeaadf 100644
+--- a/drivers/staging/media/lirc/lirc_zilog.c
++++ b/drivers/staging/media/lirc/lirc_zilog.c
+@@ -61,6 +61,9 @@
+ #include <media/lirc_dev.h>
+ #include <media/lirc.h>
+
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
+ struct IR;
+
+ struct IR_rx {
+@@ -941,7 +944,14 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ } else {
+- unsigned char buf[rbuf->chunk_size];
++ unsigned char buf[MAX_XFER_SIZE];
++
++ if (rbuf->chunk_size > sizeof(buf)) {
++ zilog_error("chunk_size is too big (%d)!\n",
++ rbuf->chunk_size);
++ ret = -EINVAL;
++ break;
++ }
+ m = lirc_buffer_read(rbuf, buf);
+ if (m == rbuf->chunk_size) {
+ ret = copy_to_user((void *)outbuf+written, buf,
+diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
+index 2c73823d..2a13a41f 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
++++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
+@@ -1115,6 +1115,9 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
+ return _FAIL;
+ }
+
++ /* fix bug of flush_cam_entry at STOP AP mode */
++ psta->state |= WIFI_AP_STATE;
++ rtw_indicate_connect(padapter);
+ pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
+ return ret;
+ }
+diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
+index 165b918b..1b6d581c 100644
+--- a/drivers/staging/tidspbridge/Kconfig
++++ b/drivers/staging/tidspbridge/Kconfig
+@@ -4,7 +4,7 @@
+
+ menuconfig TIDSPBRIDGE
+ tristate "DSP Bridge driver"
+- depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM
++ depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM && BROKEN
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
+index 1e8b8412..4aa5ef54 100644
+--- a/drivers/staging/vt6656/baseband.c
++++ b/drivers/staging/vt6656/baseband.c
+@@ -939,6 +939,7 @@ int BBbVT3184Init(struct vnt_private *pDevice)
+ u8 * pbyAgc;
+ u16 wLengthAgc;
+ u8 abyArray[256];
++ u8 data;
+
+ ntStatus = CONTROLnsRequestIn(pDevice,
+ MESSAGE_TYPE_READ,
+@@ -1104,6 +1105,16 @@ else {
+ ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01);
+
+ RFbRFTableDownload(pDevice);
++
++ /* Fix for TX USB resets from vendors driver */
++ CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, USB_REG4,
++ MESSAGE_REQUEST_MEM, sizeof(data), &data);
++
++ data |= 0x2;
++
++ CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, USB_REG4,
++ MESSAGE_REQUEST_MEM, sizeof(data), &data);
++
+ return true;//ntStatus;
+ }
+
+diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h
+index 5e073062..5cf5e732 100644
+--- a/drivers/staging/vt6656/rndis.h
++++ b/drivers/staging/vt6656/rndis.h
+@@ -66,6 +66,8 @@
+
+ #define VIAUSB20_PACKET_HEADER 0x04
+
++#define USB_REG4 0x604
++
+ typedef struct _CMD_MESSAGE
+ {
+ u8 byData[256];
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 012ba15e..e06ec439 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -652,21 +652,30 @@ static ssize_t reset_store(struct device *dev,
+ return -ENOMEM;
+
+ /* Do not reset an active device! */
+- if (bdev->bd_holders)
+- return -EBUSY;
++ if (bdev->bd_holders) {
++ ret = -EBUSY;
++ goto out;
++ }
+
+ ret = kstrtou16(buf, 10, &do_reset);
+ if (ret)
+- return ret;
++ goto out;
+
+- if (!do_reset)
+- return -EINVAL;
++ if (!do_reset) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ /* Make sure all pending I/O is finished */
+ fsync_bdev(bdev);
++ bdput(bdev);
+
+ zram_reset_device(zram, true);
+ return len;
++
++out:
++ bdput(bdev);
++ return ret;
+ }
+
+ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
+diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
+index 1a67537d..3b950e5a 100644
+--- a/drivers/staging/zsmalloc/zsmalloc-main.c
++++ b/drivers/staging/zsmalloc/zsmalloc-main.c
+@@ -430,7 +430,12 @@ static struct page *get_next_page(struct page *page)
+ return next;
+ }
+
+-/* Encode <page, obj_idx> as a single handle value */
++/*
++ * Encode <page, obj_idx> as a single handle value.
++ * On hardware platforms with physical memory starting at 0x0 the pfn
++ * could be 0 so we ensure that the handle will never be 0 by adjusting the
++ * encoded obj_idx value before encoding.
++ */
+ static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
+ {
+ unsigned long handle;
+@@ -441,17 +446,21 @@ static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
+ }
+
+ handle = page_to_pfn(page) << OBJ_INDEX_BITS;
+- handle |= (obj_idx & OBJ_INDEX_MASK);
++ handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
+
+ return (void *)handle;
+ }
+
+-/* Decode <page, obj_idx> pair from the given object handle */
++/*
++ * Decode <page, obj_idx> pair from the given object handle. We adjust the
++ * decoded obj_idx back to its original value since it was adjusted in
++ * obj_location_to_handle().
++ */
+ static void obj_handle_to_location(unsigned long handle, struct page **page,
+ unsigned long *obj_idx)
+ {
+ *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
+- *obj_idx = handle & OBJ_INDEX_MASK;
++ *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
+ }
+
+ static unsigned long obj_idx_to_offset(struct page *page,
+diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
+index 7505fddc..3e801885 100644
+--- a/drivers/target/iscsi/iscsi_target_auth.c
++++ b/drivers/target/iscsi/iscsi_target_auth.c
+@@ -146,6 +146,7 @@ static int chap_server_compute_md5(
+ unsigned char client_digest[MD5_SIGNATURE_SIZE];
+ unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
++ size_t compare_len;
+ struct iscsi_chap *chap = conn->auth_protocol;
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+@@ -184,7 +185,9 @@ static int chap_server_compute_md5(
+ goto out;
+ }
+
+- if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
++ /* Include the terminating NULL in the compare */
++ compare_len = strlen(auth->userid) + 1;
++ if (strncmp(chap_n, auth->userid, compare_len) != 0) {
+ pr_err("CHAP_N values do not match!\n");
+ goto out;
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index fd145259..8a1bd1af 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1784,6 +1784,11 @@ static int lio_queue_status(struct se_cmd *se_cmd)
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_STATUS;
++
++ if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
++ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
++ return 0;
++ }
+ cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
+
+ return 0;
+diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
+index 6c7a5104..7087c736 100644
+--- a/drivers/target/iscsi/iscsi_target_device.c
++++ b/drivers/target/iscsi/iscsi_target_device.c
+@@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess
+
+ cmd->maxcmdsn_inc = 1;
+
+- if (!mutex_trylock(&sess->cmdsn_mutex)) {
+- sess->max_cmd_sn += 1;
+- pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
+- return;
+- }
++ mutex_lock(&sess->cmdsn_mutex);
+ sess->max_cmd_sn += 1;
+ pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
+ mutex_unlock(&sess->cmdsn_mutex);
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index ef6d836a..a972145d 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -88,7 +88,7 @@ int extract_param(
+ if (len < 0)
+ return -1;
+
+- if (len > max_length) {
++ if (len >= max_length) {
+ pr_err("Length of input: %d exceeds max_length:"
+ " %d\n", len, max_length);
+ return -1;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 81e945ee..0b0009b5 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2910,6 +2910,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+ cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
+
+ cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
++ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ trace_target_cmd_complete(cmd);
+ cmd->se_tfo->queue_status(cmd);
+
+@@ -2938,6 +2939,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
+ if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+ cmd->transport_state |= CMD_T_ABORTED;
+ smp_mb__after_atomic_inc();
++ return;
+ }
+ }
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 7a744b69..ff582933 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -767,8 +767,8 @@ static size_t __process_echoes(struct tty_struct *tty)
+ * of echo overrun before the next commit), then discard enough
+ * data at the tail to prevent a subsequent overrun */
+ while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+- if (echo_buf(ldata, tail == ECHO_OP_START)) {
+- if (echo_buf(ldata, tail) == ECHO_OP_ERASE_TAB)
++ if (echo_buf(ldata, tail) == ECHO_OP_START) {
++ if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
+ tail += 3;
+ else
+ tail += 2;
+@@ -2005,7 +2005,10 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
+ found = 1;
+
+ size = N_TTY_BUF_SIZE - tail;
+- n = (found + eol + size) & (N_TTY_BUF_SIZE - 1);
++ n = eol - tail;
++ if (n > 4096)
++ n += 4096;
++ n += found;
+ c = n;
+
+ if (found && read_buf(ldata, eol) == __DISABLED_CHAR) {
+@@ -2250,6 +2253,9 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ if (time)
+ timeout = time;
+ }
++ n_tty_set_room(tty);
++ up_read(&tty->termios_rwsem);
++
+ mutex_unlock(&ldata->atomic_read_lock);
+ remove_wait_queue(&tty->read_wait, &wait);
+
+@@ -2260,8 +2266,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ if (b - buf)
+ retval = b - buf;
+
+- n_tty_set_room(tty);
+- up_read(&tty->termios_rwsem);
+ return retval;
+ }
+
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 3a1a01af..c74a00ad 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2086,6 +2086,7 @@ retry_open:
+ filp->f_op = &tty_fops;
+ goto retry_open;
+ }
++ clear_bit(TTY_HUPPED, &tty->flags);
+ tty_unlock(tty);
+
+
+diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
+index f7447f70..d18da911 100644
+--- a/drivers/video/backlight/atmel-pwm-bl.c
++++ b/drivers/video/backlight/atmel-pwm-bl.c
+@@ -70,7 +70,7 @@ static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
+ static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
+ {
+ struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
+- u8 intensity;
++ u32 intensity;
+
+ if (pwmbl->pdata->pwm_active_low) {
+ intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) -
+@@ -80,7 +80,7 @@ static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
+ pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
+ }
+
+- return intensity;
++ return intensity & 0xffff;
+ }
+
+ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
+@@ -206,8 +206,10 @@ static int atmel_pwm_bl_remove(struct platform_device *pdev)
+ {
+ struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
+
+- if (pwmbl->gpio_on != -1)
+- gpio_set_value(pwmbl->gpio_on, 0);
++ if (pwmbl->gpio_on != -1) {
++ gpio_set_value(pwmbl->gpio_on,
++ 0 ^ pwmbl->pdata->on_active_low);
++ }
+ pwm_channel_disable(&pwmbl->pwmc);
+ pwm_channel_free(&pwmbl->pwmc);
+ backlight_device_unregister(pwmbl->bldev);
+diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
+index 35687fd5..4ad24f2c 100644
+--- a/drivers/video/console/sticore.c
++++ b/drivers/video/console/sticore.c
+@@ -3,7 +3,7 @@
+ * core code for console driver using HP's STI firmware
+ *
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+- * Copyright (C) 2001-2003 Helge Deller <deller@gmx.de>
++ * Copyright (C) 2001-2013 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+ *
+ * TODO:
+@@ -30,7 +30,7 @@
+
+ #include "../sticore.h"
+
+-#define STI_DRIVERVERSION "Version 0.9a"
++#define STI_DRIVERVERSION "Version 0.9b"
+
+ static struct sti_struct *default_sti __read_mostly;
+
+@@ -73,28 +73,34 @@ static const struct sti_init_flags default_init_flags = {
+
+ static int sti_init_graph(struct sti_struct *sti)
+ {
+- struct sti_init_inptr_ext inptr_ext = { 0, };
+- struct sti_init_inptr inptr = {
+- .text_planes = 3, /* # of text planes (max 3 for STI) */
+- .ext_ptr = STI_PTR(&inptr_ext)
+- };
+- struct sti_init_outptr outptr = { 0, };
++ struct sti_init_inptr *inptr = &sti->sti_data->init_inptr;
++ struct sti_init_inptr_ext *inptr_ext = &sti->sti_data->init_inptr_ext;
++ struct sti_init_outptr *outptr = &sti->sti_data->init_outptr;
+ unsigned long flags;
+- int ret;
++ int ret, err;
+
+ spin_lock_irqsave(&sti->lock, flags);
+
+- ret = STI_CALL(sti->init_graph, &default_init_flags, &inptr,
+- &outptr, sti->glob_cfg);
++ memset(inptr, 0, sizeof(*inptr));
++ inptr->text_planes = 3; /* # of text planes (max 3 for STI) */
++ memset(inptr_ext, 0, sizeof(*inptr_ext));
++ inptr->ext_ptr = STI_PTR(inptr_ext);
++ outptr->errno = 0;
++
++ ret = sti_call(sti, sti->init_graph, &default_init_flags, inptr,
++ outptr, sti->glob_cfg);
++
++ if (ret >= 0)
++ sti->text_planes = outptr->text_planes;
++ err = outptr->errno;
+
+ spin_unlock_irqrestore(&sti->lock, flags);
+
+ if (ret < 0) {
+- printk(KERN_ERR "STI init_graph failed (ret %d, errno %d)\n",ret,outptr.errno);
++ pr_err("STI init_graph failed (ret %d, errno %d)\n", ret, err);
+ return -1;
+ }
+
+- sti->text_planes = outptr.text_planes;
+ return 0;
+ }
+
+@@ -104,16 +110,18 @@ static const struct sti_conf_flags default_conf_flags = {
+
+ static void sti_inq_conf(struct sti_struct *sti)
+ {
+- struct sti_conf_inptr inptr = { 0, };
++ struct sti_conf_inptr *inptr = &sti->sti_data->inq_inptr;
++ struct sti_conf_outptr *outptr = &sti->sti_data->inq_outptr;
+ unsigned long flags;
+ s32 ret;
+
+- sti->outptr.ext_ptr = STI_PTR(&sti->outptr_ext);
++ outptr->ext_ptr = STI_PTR(&sti->sti_data->inq_outptr_ext);
+
+ do {
+ spin_lock_irqsave(&sti->lock, flags);
+- ret = STI_CALL(sti->inq_conf, &default_conf_flags,
+- &inptr, &sti->outptr, sti->glob_cfg);
++ memset(inptr, 0, sizeof(*inptr));
++ ret = sti_call(sti, sti->inq_conf, &default_conf_flags,
++ inptr, outptr, sti->glob_cfg);
+ spin_unlock_irqrestore(&sti->lock, flags);
+ } while (ret == 1);
+ }
+@@ -126,7 +134,8 @@ static const struct sti_font_flags default_font_flags = {
+ void
+ sti_putc(struct sti_struct *sti, int c, int y, int x)
+ {
+- struct sti_font_inptr inptr = {
++ struct sti_font_inptr *inptr = &sti->sti_data->font_inptr;
++ struct sti_font_inptr inptr_default = {
+ .font_start_addr= STI_PTR(sti->font->raw),
+ .index = c_index(sti, c),
+ .fg_color = c_fg(sti, c),
+@@ -134,14 +143,15 @@ sti_putc(struct sti_struct *sti, int c, int y, int x)
+ .dest_x = x * sti->font_width,
+ .dest_y = y * sti->font_height,
+ };
+- struct sti_font_outptr outptr = { 0, };
++ struct sti_font_outptr *outptr = &sti->sti_data->font_outptr;
+ s32 ret;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&sti->lock, flags);
+- ret = STI_CALL(sti->font_unpmv, &default_font_flags,
+- &inptr, &outptr, sti->glob_cfg);
++ *inptr = inptr_default;
++ ret = sti_call(sti, sti->font_unpmv, &default_font_flags,
++ inptr, outptr, sti->glob_cfg);
+ spin_unlock_irqrestore(&sti->lock, flags);
+ } while (ret == 1);
+ }
+@@ -156,7 +166,8 @@ void
+ sti_set(struct sti_struct *sti, int src_y, int src_x,
+ int height, int width, u8 color)
+ {
+- struct sti_blkmv_inptr inptr = {
++ struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
++ struct sti_blkmv_inptr inptr_default = {
+ .fg_color = color,
+ .bg_color = color,
+ .src_x = src_x,
+@@ -166,14 +177,15 @@ sti_set(struct sti_struct *sti, int src_y, int src_x,
+ .width = width,
+ .height = height,
+ };
+- struct sti_blkmv_outptr outptr = { 0, };
++ struct sti_blkmv_outptr *outptr = &sti->sti_data->blkmv_outptr;
+ s32 ret;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&sti->lock, flags);
+- ret = STI_CALL(sti->block_move, &clear_blkmv_flags,
+- &inptr, &outptr, sti->glob_cfg);
++ *inptr = inptr_default;
++ ret = sti_call(sti, sti->block_move, &clear_blkmv_flags,
++ inptr, outptr, sti->glob_cfg);
+ spin_unlock_irqrestore(&sti->lock, flags);
+ } while (ret == 1);
+ }
+@@ -182,7 +194,8 @@ void
+ sti_clear(struct sti_struct *sti, int src_y, int src_x,
+ int height, int width, int c)
+ {
+- struct sti_blkmv_inptr inptr = {
++ struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
++ struct sti_blkmv_inptr inptr_default = {
+ .fg_color = c_fg(sti, c),
+ .bg_color = c_bg(sti, c),
+ .src_x = src_x * sti->font_width,
+@@ -192,14 +205,15 @@ sti_clear(struct sti_struct *sti, int src_y, int src_x,
+ .width = width * sti->font_width,
+ .height = height* sti->font_height,
+ };
+- struct sti_blkmv_outptr outptr = { 0, };
++ struct sti_blkmv_outptr *outptr = &sti->sti_data->blkmv_outptr;
+ s32 ret;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&sti->lock, flags);
+- ret = STI_CALL(sti->block_move, &clear_blkmv_flags,
+- &inptr, &outptr, sti->glob_cfg);
++ *inptr = inptr_default;
++ ret = sti_call(sti, sti->block_move, &clear_blkmv_flags,
++ inptr, outptr, sti->glob_cfg);
+ spin_unlock_irqrestore(&sti->lock, flags);
+ } while (ret == 1);
+ }
+@@ -212,7 +226,8 @@ void
+ sti_bmove(struct sti_struct *sti, int src_y, int src_x,
+ int dst_y, int dst_x, int height, int width)
+ {
+- struct sti_blkmv_inptr inptr = {
++ struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
++ struct sti_blkmv_inptr inptr_default = {
+ .src_x = src_x * sti->font_width,
+ .src_y = src_y * sti->font_height,
+ .dest_x = dst_x * sti->font_width,
+@@ -220,14 +235,15 @@ sti_bmove(struct sti_struct *sti, int src_y, int src_x,
+ .width = width * sti->font_width,
+ .height = height* sti->font_height,
+ };
+- struct sti_blkmv_outptr outptr = { 0, };
++ struct sti_blkmv_outptr *outptr = &sti->sti_data->blkmv_outptr;
+ s32 ret;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&sti->lock, flags);
+- ret = STI_CALL(sti->block_move, &default_blkmv_flags,
+- &inptr, &outptr, sti->glob_cfg);
++ *inptr = inptr_default;
++ ret = sti_call(sti, sti->block_move, &default_blkmv_flags,
++ inptr, outptr, sti->glob_cfg);
+ spin_unlock_irqrestore(&sti->lock, flags);
+ } while (ret == 1);
+ }
+@@ -284,7 +300,7 @@ __setup("sti=", sti_setup);
+
+
+
+-static char *font_name[MAX_STI_ROMS] = { "VGA8x16", };
++static char *font_name[MAX_STI_ROMS];
+ static int font_index[MAX_STI_ROMS],
+ font_height[MAX_STI_ROMS],
+ font_width[MAX_STI_ROMS];
+@@ -389,10 +405,10 @@ static void sti_dump_outptr(struct sti_struct *sti)
+ "%d used bits\n"
+ "%d planes\n"
+ "attributes %08x\n",
+- sti->outptr.bits_per_pixel,
+- sti->outptr.bits_used,
+- sti->outptr.planes,
+- sti->outptr.attributes));
++ sti->sti_data->inq_outptr.bits_per_pixel,
++ sti->sti_data->inq_outptr.bits_used,
++ sti->sti_data->inq_outptr.planes,
++ sti->sti_data->inq_outptr.attributes));
+ }
+
+ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
+@@ -402,24 +418,21 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
+ struct sti_glob_cfg_ext *glob_cfg_ext;
+ void *save_addr;
+ void *sti_mem_addr;
+- const int save_addr_size = 1024; /* XXX */
+- int i;
++ int i, size;
+
+- if (!sti->sti_mem_request)
++ if (sti->sti_mem_request < 256)
+ sti->sti_mem_request = 256; /* STI default */
+
+- glob_cfg = kzalloc(sizeof(*sti->glob_cfg), GFP_KERNEL);
+- glob_cfg_ext = kzalloc(sizeof(*glob_cfg_ext), GFP_KERNEL);
+- save_addr = kzalloc(save_addr_size, GFP_KERNEL);
+- sti_mem_addr = kzalloc(sti->sti_mem_request, GFP_KERNEL);
++ size = sizeof(struct sti_all_data) + sti->sti_mem_request - 256;
+
+- if (!(glob_cfg && glob_cfg_ext && save_addr && sti_mem_addr)) {
+- kfree(glob_cfg);
+- kfree(glob_cfg_ext);
+- kfree(save_addr);
+- kfree(sti_mem_addr);
++ sti->sti_data = kzalloc(size, STI_LOWMEM);
++ if (!sti->sti_data)
+ return -ENOMEM;
+- }
++
++ glob_cfg = &sti->sti_data->glob_cfg;
++ glob_cfg_ext = &sti->sti_data->glob_cfg_ext;
++ save_addr = &sti->sti_data->save_addr;
++ sti_mem_addr = &sti->sti_data->sti_mem_addr;
+
+ glob_cfg->ext_ptr = STI_PTR(glob_cfg_ext);
+ glob_cfg->save_addr = STI_PTR(save_addr);
+@@ -475,32 +488,31 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
+ return 0;
+ }
+
+-#ifdef CONFIG_FB
++#ifdef CONFIG_FONTS
+ static struct sti_cooked_font *
+ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
+ {
+- const struct font_desc *fbfont;
++ const struct font_desc *fbfont = NULL;
+ unsigned int size, bpc;
+ void *dest;
+ struct sti_rom_font *nf;
+ struct sti_cooked_font *cooked_font;
+
+- if (!fbfont_name || !strlen(fbfont_name))
+- return NULL;
+- fbfont = find_font(fbfont_name);
++ if (fbfont_name && strlen(fbfont_name))
++ fbfont = find_font(fbfont_name);
+ if (!fbfont)
+ fbfont = get_default_font(1024,768, ~(u32)0, ~(u32)0);
+ if (!fbfont)
+ return NULL;
+
+- DPRINTK((KERN_DEBUG "selected %dx%d fb-font %s\n",
+- fbfont->width, fbfont->height, fbfont->name));
++ pr_info("STI selected %dx%d framebuffer font %s for sticon\n",
++ fbfont->width, fbfont->height, fbfont->name);
+
+ bpc = ((fbfont->width+7)/8) * fbfont->height;
+ size = bpc * 256;
+ size += sizeof(struct sti_rom_font);
+
+- nf = kzalloc(size, GFP_KERNEL);
++ nf = kzalloc(size, STI_LOWMEM);
+ if (!nf)
+ return NULL;
+
+@@ -637,7 +649,7 @@ static void *sti_bmode_font_raw(struct sti_cooked_font *f)
+ unsigned char *n, *p, *q;
+ int size = f->raw->bytes_per_char*256+sizeof(struct sti_rom_font);
+
+- n = kzalloc (4*size, GFP_KERNEL);
++ n = kzalloc(4*size, STI_LOWMEM);
+ if (!n)
+ return NULL;
+ p = n + 3;
+@@ -673,7 +685,7 @@ static struct sti_rom *sti_get_bmode_rom (unsigned long address)
+ sti_bmode_rom_copy(address + BMODE_LAST_ADDR_OFFS, sizeof(size), &size);
+
+ size = (size+3) / 4;
+- raw = kmalloc(size, GFP_KERNEL);
++ raw = kmalloc(size, STI_LOWMEM);
+ if (raw) {
+ sti_bmode_rom_copy(address, size, raw);
+ memmove (&raw->res004, &raw->type[0], 0x3c);
+@@ -707,7 +719,7 @@ static struct sti_rom *sti_get_wmode_rom(unsigned long address)
+ /* read the ROM size directly from the struct in ROM */
+ size = gsc_readl(address + offsetof(struct sti_rom,last_addr));
+
+- raw = kmalloc(size, GFP_KERNEL);
++ raw = kmalloc(size, STI_LOWMEM);
+ if (raw)
+ sti_rom_copy(address, size, raw);
+
+@@ -743,6 +755,10 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
+
+ address = (unsigned long) STI_PTR(raw);
+
++ pr_info("STI ROM supports 32 %sbit firmware functions.\n",
++ raw->alt_code_type == ALT_CODE_TYPE_PA_RISC_64
++ ? "and 64 " : "");
++
+ sti->font_unpmv = address + (raw->font_unpmv & 0x03ffffff);
+ sti->block_move = address + (raw->block_move & 0x03ffffff);
+ sti->init_graph = address + (raw->init_graph & 0x03ffffff);
+@@ -901,7 +917,8 @@ test_rom:
+ sti_dump_globcfg(sti->glob_cfg, sti->sti_mem_request);
+ sti_dump_outptr(sti);
+
+- printk(KERN_INFO " graphics card name: %s\n", sti->outptr.dev_name );
++ pr_info(" graphics card name: %s\n",
++ sti->sti_data->inq_outptr.dev_name);
+
+ sti_roms[num_sti_roms] = sti;
+ num_sti_roms++;
+@@ -1073,6 +1090,29 @@ struct sti_struct * sti_get_rom(unsigned int index)
+ }
+ EXPORT_SYMBOL(sti_get_rom);
+
++
++int sti_call(const struct sti_struct *sti, unsigned long func,
++ const void *flags, void *inptr, void *outptr,
++ struct sti_glob_cfg *glob_cfg)
++{
++ unsigned long _flags = STI_PTR(flags);
++ unsigned long _inptr = STI_PTR(inptr);
++ unsigned long _outptr = STI_PTR(outptr);
++ unsigned long _glob_cfg = STI_PTR(glob_cfg);
++ int ret;
++
++#ifdef CONFIG_64BIT
++ /* Check for overflow when using 32bit STI on 64bit kernel. */
++ if (WARN_ONCE(_flags>>32 || _inptr>>32 || _outptr>>32 || _glob_cfg>>32,
++ "Out of 32bit-range pointers!"))
++ return -1;
++#endif
++
++ ret = pdc_sti_call(func, _flags, _inptr, _outptr, _glob_cfg);
++
++ return ret;
++}
++
+ MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
+ MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/video/sticore.h b/drivers/video/sticore.h
+index addf7b61..af161953 100644
+--- a/drivers/video/sticore.h
++++ b/drivers/video/sticore.h
+@@ -18,6 +18,9 @@
+ #define STI_FONT_HPROMAN8 1
+ #define STI_FONT_KANA8 2
+
++#define ALT_CODE_TYPE_UNKNOWN 0x00 /* alt code type values */
++#define ALT_CODE_TYPE_PA_RISC_64 0x01
++
+ /* The latency of the STI functions cannot really be reduced by setting
+ * this to 0; STI doesn't seem to be designed to allow calling a different
+ * function (or the same function with different arguments) after a
+@@ -40,14 +43,6 @@
+
+ #define STI_PTR(p) ( virt_to_phys(p) )
+ #define PTR_STI(p) ( phys_to_virt((unsigned long)p) )
+-#define STI_CALL(func, flags, inptr, outptr, glob_cfg) \
+- ({ \
+- pdc_sti_call( func, STI_PTR(flags), \
+- STI_PTR(inptr), \
+- STI_PTR(outptr), \
+- STI_PTR(glob_cfg)); \
+- })
+-
+
+ #define sti_onscreen_x(sti) (sti->glob_cfg->onscreen_x)
+ #define sti_onscreen_y(sti) (sti->glob_cfg->onscreen_y)
+@@ -56,6 +51,12 @@
+ #define sti_font_x(sti) (PTR_STI(sti->font)->width)
+ #define sti_font_y(sti) (PTR_STI(sti->font)->height)
+
++#ifdef CONFIG_64BIT
++#define STI_LOWMEM (GFP_KERNEL | GFP_DMA)
++#else
++#define STI_LOWMEM (GFP_KERNEL)
++#endif
++
+
+ /* STI function configuration structs */
+
+@@ -306,6 +307,34 @@ struct sti_blkmv_outptr {
+ };
+
+
++/* sti_all_data is an internal struct which needs to be allocated in
++ * low memory (< 4GB) if STI is used with 32bit STI on a 64bit kernel */
++
++struct sti_all_data {
++ struct sti_glob_cfg glob_cfg;
++ struct sti_glob_cfg_ext glob_cfg_ext;
++
++ struct sti_conf_inptr inq_inptr;
++ struct sti_conf_outptr inq_outptr; /* configuration */
++ struct sti_conf_outptr_ext inq_outptr_ext;
++
++ struct sti_init_inptr_ext init_inptr_ext;
++ struct sti_init_inptr init_inptr;
++ struct sti_init_outptr init_outptr;
++
++ struct sti_blkmv_inptr blkmv_inptr;
++ struct sti_blkmv_outptr blkmv_outptr;
++
++ struct sti_font_inptr font_inptr;
++ struct sti_font_outptr font_outptr;
++
++ /* leave as last entries */
++ unsigned long save_addr[1024 / sizeof(unsigned long)];
++ /* min 256 bytes which is STI default, max sti->sti_mem_request */
++ unsigned long sti_mem_addr[256 / sizeof(unsigned long)];
++ /* do not add something below here ! */
++};
++
+ /* internal generic STI struct */
+
+ struct sti_struct {
+@@ -330,11 +359,9 @@ struct sti_struct {
+ region_t regions[STI_REGION_MAX];
+ unsigned long regions_phys[STI_REGION_MAX];
+
+- struct sti_glob_cfg *glob_cfg;
+- struct sti_cooked_font *font; /* ptr to selected font (cooked) */
++ struct sti_glob_cfg *glob_cfg; /* points into sti_all_data */
+
+- struct sti_conf_outptr outptr; /* configuration */
+- struct sti_conf_outptr_ext outptr_ext;
++ struct sti_cooked_font *font; /* ptr to selected font (cooked) */
+
+ struct pci_dev *pd;
+
+@@ -343,6 +370,9 @@ struct sti_struct {
+
+ /* pointer to the fb_info where this STI device is used */
+ struct fb_info *info;
++
++ /* pointer to all internal data */
++ struct sti_all_data *sti_data;
+ };
+
+
+@@ -350,6 +380,14 @@ struct sti_struct {
+
+ struct sti_struct *sti_get_rom(unsigned int index); /* 0: default sti */
+
++
++/* sticore main function to call STI firmware */
++
++int sti_call(const struct sti_struct *sti, unsigned long func,
++ const void *flags, void *inptr, void *outptr,
++ struct sti_glob_cfg *glob_cfg);
++
++
+ /* functions to call the STI ROM directly */
+
+ void sti_putc(struct sti_struct *sti, int c, int y, int x);
+diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
+index 876648e1..019a1fee 100644
+--- a/drivers/video/stifb.c
++++ b/drivers/video/stifb.c
+@@ -1101,6 +1101,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ var = &info->var;
+
+ fb->sti = sti;
++ dev_name = sti->sti_data->inq_outptr.dev_name;
+ /* store upper 32bits of the graphics id */
+ fb->id = fb->sti->graphics_id[0];
+
+@@ -1114,11 +1115,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ Since this driver only supports standard mode, we check
+ if the device name contains the string "DX" and tell the
+ user how to reconfigure the card. */
+- if (strstr(sti->outptr.dev_name, "DX")) {
++ if (strstr(dev_name, "DX")) {
+ printk(KERN_WARNING
+ "WARNING: stifb framebuffer driver does not support '%s' in double-buffer mode.\n"
+ "WARNING: Please disable the double-buffer mode in IPL menu (the PARISC-BIOS).\n",
+- sti->outptr.dev_name);
++ dev_name);
+ goto out_err0;
+ }
+ /* fall though */
+@@ -1130,7 +1131,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ break;
+ default:
+ printk(KERN_WARNING "stifb: '%s' (id: 0x%08x) not supported.\n",
+- sti->outptr.dev_name, fb->id);
++ dev_name, fb->id);
+ goto out_err0;
+ }
+
+@@ -1154,7 +1155,6 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ fb->id = S9000_ID_A1659A;
+ break;
+ case S9000_ID_TIMBER: /* HP9000/710 Any (may be a grayscale device) */
+- dev_name = fb->sti->outptr.dev_name;
+ if (strstr(dev_name, "GRAYSCALE") ||
+ strstr(dev_name, "Grayscale") ||
+ strstr(dev_name, "grayscale"))
+@@ -1290,7 +1290,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ var->xres,
+ var->yres,
+ var->bits_per_pixel,
+- sti->outptr.dev_name,
++ dev_name,
+ fb->id,
+ fix->mmio_start);
+
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 52b6f6c2..c8e03f8d 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -261,7 +261,7 @@ struct smb_version_operations {
+ /* query path data from the server */
+ int (*query_path_info)(const unsigned int, struct cifs_tcon *,
+ struct cifs_sb_info *, const char *,
+- FILE_ALL_INFO *, bool *);
++ FILE_ALL_INFO *, bool *, bool *);
+ /* query file data from the server */
+ int (*query_file_info)(const unsigned int, struct cifs_tcon *,
+ struct cifs_fid *, FILE_ALL_INFO *);
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index ccd31ab8..5f1f3285 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -3315,11 +3315,13 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
+ return 0;
+ }
+ cifs_acl->version = cpu_to_le16(1);
+- if (acl_type == ACL_TYPE_ACCESS)
++ if (acl_type == ACL_TYPE_ACCESS) {
+ cifs_acl->access_entry_count = cpu_to_le16(count);
+- else if (acl_type == ACL_TYPE_DEFAULT)
++ cifs_acl->default_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else if (acl_type == ACL_TYPE_DEFAULT) {
+ cifs_acl->default_entry_count = cpu_to_le16(count);
+- else {
++ cifs_acl->access_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else {
+ cifs_dbg(FYI, "unknown ACL type %d\n", acl_type);
+ return 0;
+ }
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 867b7cdc..36f9ebb9 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -542,7 +542,8 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
+ /* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
+ static void
+ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
+- struct cifs_sb_info *cifs_sb, bool adjust_tz)
++ struct cifs_sb_info *cifs_sb, bool adjust_tz,
++ bool symlink)
+ {
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+
+@@ -569,7 +570,11 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
+ fattr->cf_createtime = le64_to_cpu(info->CreationTime);
+
+ fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+- if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
++
++ if (symlink) {
++ fattr->cf_mode = S_IFLNK;
++ fattr->cf_dtype = DT_LNK;
++ } else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+ fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
+ fattr->cf_dtype = DT_DIR;
+ /*
+@@ -578,10 +583,6 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
+ */
+ if (!tcon->unix_ext)
+ fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
+- } else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
+- fattr->cf_mode = S_IFLNK;
+- fattr->cf_dtype = DT_LNK;
+- fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+ } else {
+ fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
+ fattr->cf_dtype = DT_REG;
+@@ -626,7 +627,8 @@ cifs_get_file_info(struct file *filp)
+ rc = server->ops->query_file_info(xid, tcon, &cfile->fid, &find_data);
+ switch (rc) {
+ case 0:
+- cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
++ cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false,
++ false);
+ break;
+ case -EREMOTE:
+ cifs_create_dfs_fattr(&fattr, inode->i_sb);
+@@ -673,6 +675,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
+ bool adjust_tz = false;
+ struct cifs_fattr fattr;
+ struct cifs_search_info *srchinf = NULL;
++ bool symlink = false;
+
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+@@ -702,12 +705,12 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
+ }
+ data = (FILE_ALL_INFO *)buf;
+ rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path,
+- data, &adjust_tz);
++ data, &adjust_tz, &symlink);
+ }
+
+ if (!rc) {
+- cifs_all_info_to_fattr(&fattr, (FILE_ALL_INFO *)data, cifs_sb,
+- adjust_tz);
++ cifs_all_info_to_fattr(&fattr, data, cifs_sb, adjust_tz,
++ symlink);
+ } else if (rc == -EREMOTE) {
+ cifs_create_dfs_fattr(&fattr, sb);
+ rc = 0;
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 53a75f3d..5940ecab 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -134,22 +134,6 @@ out:
+ dput(dentry);
+ }
+
+-/*
+- * Is it possible that this directory might turn out to be a DFS referral
+- * once we go to try and use it?
+- */
+-static bool
+-cifs_dfs_is_possible(struct cifs_sb_info *cifs_sb)
+-{
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-
+- if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
+- return true;
+-#endif
+- return false;
+-}
+-
+ static void
+ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
+ {
+@@ -159,27 +143,19 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
+ if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+ fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
+ fattr->cf_dtype = DT_DIR;
+- /*
+- * Windows CIFS servers generally make DFS referrals look
+- * like directories in FIND_* responses with the reparse
+- * attribute flag also set (since DFS junctions are
+- * reparse points). We must revalidate at least these
+- * directory inodes before trying to use them (if
+- * they are DFS we will get PATH_NOT_COVERED back
+- * when queried directly and can then try to connect
+- * to the DFS target)
+- */
+- if (cifs_dfs_is_possible(cifs_sb) &&
+- (fattr->cf_cifsattrs & ATTR_REPARSE))
+- fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+- } else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
+- fattr->cf_mode = S_IFLNK;
+- fattr->cf_dtype = DT_LNK;
+ } else {
+ fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
+ fattr->cf_dtype = DT_REG;
+ }
+
++ /*
++ * We need to revalidate it further to make a decision about whether it
++ * is a symbolic link, DFS referral or a reparse point with a direct
++ * access like junctions, deduplicated files, NFS symlinks.
++ */
++ if (fattr->cf_cifsattrs & ATTR_REPARSE)
++ fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
++
+ /* non-unix readdir doesn't provide nlink */
+ fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
+
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index 8233b174..e50554b7 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -534,10 +534,12 @@ cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ static int
+ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+- FILE_ALL_INFO *data, bool *adjustTZ)
++ FILE_ALL_INFO *data, bool *adjustTZ, bool *symlink)
+ {
+ int rc;
+
++ *symlink = false;
++
+ /* could do find first instead but this returns more info */
+ rc = CIFSSMBQPathInfo(xid, tcon, full_path, data, 0 /* not legacy */,
+ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+@@ -554,6 +556,23 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ *adjustTZ = true;
+ }
++
++ if (!rc && (le32_to_cpu(data->Attributes) & ATTR_REPARSE)) {
++ int tmprc;
++ int oplock = 0;
++ __u16 netfid;
++
++ /* Need to check if this is a symbolic link or not */
++ tmprc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
++ FILE_READ_ATTRIBUTES, 0, &netfid, &oplock,
++ NULL, cifs_sb->local_nls,
++ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
++ if (tmprc == -EOPNOTSUPP)
++ *symlink = true;
++ else
++ CIFSSMBClose(xid, tcon, netfid);
++ }
++
+ return rc;
+ }
+
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 78ff88c4..84c012a6 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -123,12 +123,13 @@ move_smb2_info_to_cifs(FILE_ALL_INFO *dst, struct smb2_file_all_info *src)
+ int
+ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+- FILE_ALL_INFO *data, bool *adjust_tz)
++ FILE_ALL_INFO *data, bool *adjust_tz, bool *symlink)
+ {
+ int rc;
+ struct smb2_file_all_info *smb2_data;
+
+ *adjust_tz = false;
++ *symlink = false;
+
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ GFP_KERNEL);
+@@ -136,9 +137,16 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ return -ENOMEM;
+
+ rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
+- FILE_READ_ATTRIBUTES, FILE_OPEN,
+- OPEN_REPARSE_POINT, smb2_data,
+- SMB2_OP_QUERY_INFO);
++ FILE_READ_ATTRIBUTES, FILE_OPEN, 0,
++ smb2_data, SMB2_OP_QUERY_INFO);
++ if (rc == -EOPNOTSUPP) {
++ *symlink = true;
++ /* Failed on a symbolic link - query a reparse point info */
++ rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
++ FILE_READ_ATTRIBUTES, FILE_OPEN,
++ OPEN_REPARSE_POINT, smb2_data,
++ SMB2_OP_QUERY_INFO);
++ }
+ if (rc)
+ goto out;
+
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index e3fb4801..7db5db0e 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -61,7 +61,7 @@ extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
+ extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ const char *full_path, FILE_ALL_INFO *data,
+- bool *adjust_tz);
++ bool *adjust_tz, bool *symlink);
+ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *full_path, __u64 size,
+ struct cifs_sb_info *cifs_sb, bool set_alloc);
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index 073d30b9..a726b9f2 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -498,6 +498,7 @@ static void devpts_kill_sb(struct super_block *sb)
+ {
+ struct pts_fs_info *fsi = DEVPTS_SB(sb);
+
++ ida_destroy(&fsi->allocated_ptys);
+ kfree(fsi);
+ kill_litter_super(sb);
+ }
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 03e9bebb..1423c481 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1352,6 +1352,7 @@ retry:
+ new_extra_isize = s_min_extra_isize;
+ kfree(is); is = NULL;
+ kfree(bs); bs = NULL;
++ brelse(bh);
+ goto retry;
+ }
+ error = -1;
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 968d4c56..12987666 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1168,8 +1168,11 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
+ if (d != NULL)
+ dentry = d;
+ if (dentry->d_inode) {
+- if (!(*opened & FILE_OPENED))
++ if (!(*opened & FILE_OPENED)) {
++ if (d == NULL)
++ dget(dentry);
+ return finish_no_open(file, dentry);
++ }
+ dput(d);
+ return 0;
+ }
+diff --git a/fs/namei.c b/fs/namei.c
+index caa28051..23ac50f4 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2468,6 +2468,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
+ */
+ static inline int may_create(struct inode *dir, struct dentry *child)
+ {
++ audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
+ if (child->d_inode)
+ return -EEXIST;
+ if (IS_DEADDIR(dir))
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index d9019821..26115778 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -100,6 +100,7 @@ enum acpi_hotplug_mode {
+ struct acpi_hotplug_profile {
+ struct kobject kobj;
+ bool enabled:1;
++ bool ignore:1;
+ enum acpi_hotplug_mode mode;
+ };
+
+diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
+index d1382dfb..0ce77210 100644
+--- a/include/linux/mfd/rtsx_pci.h
++++ b/include/linux/mfd/rtsx_pci.h
+@@ -756,6 +756,59 @@
+ #define PCR_SETTING_REG2 0x814
+ #define PCR_SETTING_REG3 0x747
+
++/* Phy bits */
++#define PHY_PCR_FORCE_CODE 0xB000
++#define PHY_PCR_OOBS_CALI_50 0x0800
++#define PHY_PCR_OOBS_VCM_08 0x0200
++#define PHY_PCR_OOBS_SEN_90 0x0040
++#define PHY_PCR_RSSI_EN 0x0002
++
++#define PHY_RCR1_ADP_TIME 0x0100
++#define PHY_RCR1_VCO_COARSE 0x001F
++
++#define PHY_RCR2_EMPHASE_EN 0x8000
++#define PHY_RCR2_NADJR 0x4000
++#define PHY_RCR2_CDR_CP_10 0x0400
++#define PHY_RCR2_CDR_SR_2 0x0100
++#define PHY_RCR2_FREQSEL_12 0x0040
++#define PHY_RCR2_CPADJEN 0x0020
++#define PHY_RCR2_CDR_SC_8 0x0008
++#define PHY_RCR2_CALIB_LATE 0x0002
++
++#define PHY_RDR_RXDSEL_1_9 0x4000
++
++#define PHY_TUNE_TUNEREF_1_0 0x4000
++#define PHY_TUNE_VBGSEL_1252 0x0C00
++#define PHY_TUNE_SDBUS_33 0x0200
++#define PHY_TUNE_TUNED18 0x01C0
++#define PHY_TUNE_TUNED12 0X0020
++
++#define PHY_BPCR_IBRXSEL 0x0400
++#define PHY_BPCR_IBTXSEL 0x0100
++#define PHY_BPCR_IB_FILTER 0x0080
++#define PHY_BPCR_CMIRROR_EN 0x0040
++
++#define PHY_REG_REV_RESV 0xE000
++#define PHY_REG_REV_RXIDLE_LATCHED 0x1000
++#define PHY_REG_REV_P1_EN 0x0800
++#define PHY_REG_REV_RXIDLE_EN 0x0400
++#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 0x0040
++#define PHY_REG_REV_STOP_CLKRD 0x0020
++#define PHY_REG_REV_RX_PWST 0x0008
++#define PHY_REG_REV_STOP_CLKWR 0x0004
++
++#define PHY_FLD3_TIMER_4 0x7800
++#define PHY_FLD3_TIMER_6 0x00E0
++#define PHY_FLD3_RXDELINK 0x0004
++
++#define PHY_FLD4_FLDEN_SEL 0x4000
++#define PHY_FLD4_REQ_REF 0x2000
++#define PHY_FLD4_RXAMP_OFF 0x1000
++#define PHY_FLD4_REQ_ADDA 0x0800
++#define PHY_FLD4_BER_COUNT 0x00E0
++#define PHY_FLD4_BER_TIMER 0x000A
++#define PHY_FLD4_BER_CHK_EN 0x0001
++
+ #define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
+
+ struct rtsx_pcr;
+diff --git a/include/linux/msg.h b/include/linux/msg.h
+index 391af8d1..e21f9d44 100644
+--- a/include/linux/msg.h
++++ b/include/linux/msg.h
+@@ -6,9 +6,9 @@
+
+ /* one msg_msg structure for each message */
+ struct msg_msg {
+- struct list_head m_list;
+- long m_type;
+- int m_ts; /* message text size */
++ struct list_head m_list;
++ long m_type;
++ size_t m_ts; /* message text size */
+ struct msg_msgseg* next;
+ void *security;
+ /* the actual message follows immediately */
+diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
+index 4b02512e..5f487d77 100644
+--- a/include/linux/mtd/map.h
++++ b/include/linux/mtd/map.h
+@@ -365,7 +365,7 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
+ bitpos = (map_bankwidth(map)-1-i)*8;
+ #endif
+ orig.x[0] &= ~(0xff << bitpos);
+- orig.x[0] |= buf[i-start] << bitpos;
++ orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
+ }
+ }
+ return orig;
+@@ -384,7 +384,7 @@ static inline map_word map_word_ff(struct map_info *map)
+
+ if (map_bankwidth(map) < MAP_FF_LIMIT) {
+ int bw = 8 * map_bankwidth(map);
+- r.x[0] = (1 << bw) - 1;
++ r.x[0] = (1UL << bw) - 1;
+ } else {
+ for (i=0; i<map_words(map); i++)
+ r.x[i] = ~0UL;
+diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
+index aa870a4d..57e75ae9 100644
+--- a/include/linux/rbtree.h
++++ b/include/linux/rbtree.h
+@@ -85,6 +85,11 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
+ *rb_link = node;
+ }
+
++#define rb_entry_safe(ptr, type, member) \
++ ({ typeof(ptr) ____ptr = (ptr); \
++ ____ptr ? rb_entry(____ptr, type, member) : NULL; \
++ })
++
+ /**
+ * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
+ * given type safe against removal of rb_node entry
+@@ -95,12 +100,9 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
+ * @field: the name of the rb_node field within 'type'.
+ */
+ #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+- for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
+- n = rb_entry(rb_next_postorder(&pos->field), \
+- typeof(*pos), field); \
+- &pos->field; \
+- pos = n, \
+- n = rb_entry(rb_next_postorder(&pos->field), \
+- typeof(*pos), field))
++ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
++ pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
++ typeof(*pos), field); 1; }); \
++ pos = n)
+
+ #endif /* _LINUX_RBTREE_H */
+diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
+index 5c7ab17c..712ea360 100644
+--- a/include/trace/ftrace.h
++++ b/include/trace/ftrace.h
+@@ -372,7 +372,8 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
+ __data_size += (len) * sizeof(type);
+
+ #undef __string
+-#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
++#define __string(item, src) __dynamic_array(char, item, \
++ strlen((src) ? (const char *)(src) : "(null)") + 1)
+
+ #undef DECLARE_EVENT_CLASS
+ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+@@ -502,7 +503,7 @@ static inline notrace int ftrace_get_offsets_##call( \
+
+ #undef __assign_str
+ #define __assign_str(dst, src) \
+- strcpy(__get_str(dst), src);
++ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+ #undef TP_fast_assign
+ #define TP_fast_assign(args...) args
+diff --git a/ipc/msgutil.c b/ipc/msgutil.c
+index 491e71f2..7e709597 100644
+--- a/ipc/msgutil.c
++++ b/ipc/msgutil.c
+@@ -41,15 +41,15 @@ struct msg_msgseg {
+ /* the next part of the message follows immediately */
+ };
+
+-#define DATALEN_MSG (int)(PAGE_SIZE-sizeof(struct msg_msg))
+-#define DATALEN_SEG (int)(PAGE_SIZE-sizeof(struct msg_msgseg))
++#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg))
++#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg))
+
+
+-static struct msg_msg *alloc_msg(int len)
++static struct msg_msg *alloc_msg(size_t len)
+ {
+ struct msg_msg *msg;
+ struct msg_msgseg **pseg;
+- int alen;
++ size_t alen;
+
+ alen = min(len, DATALEN_MSG);
+ msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
+@@ -80,12 +80,12 @@ out_err:
+ return NULL;
+ }
+
+-struct msg_msg *load_msg(const void __user *src, int len)
++struct msg_msg *load_msg(const void __user *src, size_t len)
+ {
+ struct msg_msg *msg;
+ struct msg_msgseg *seg;
+ int err = -EFAULT;
+- int alen;
++ size_t alen;
+
+ msg = alloc_msg(len);
+ if (msg == NULL)
+@@ -117,8 +117,8 @@ out_err:
+ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst)
+ {
+ struct msg_msgseg *dst_pseg, *src_pseg;
+- int len = src->m_ts;
+- int alen;
++ size_t len = src->m_ts;
++ size_t alen;
+
+ BUG_ON(dst == NULL);
+ if (src->m_ts > dst->m_ts)
+@@ -147,9 +147,9 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst)
+ return ERR_PTR(-ENOSYS);
+ }
+ #endif
+-int store_msg(void __user *dest, struct msg_msg *msg, int len)
++int store_msg(void __user *dest, struct msg_msg *msg, size_t len)
+ {
+- int alen;
++ size_t alen;
+ struct msg_msgseg *seg;
+
+ alen = min(len, DATALEN_MSG);
+diff --git a/ipc/util.h b/ipc/util.h
+index f2f5036f..59d78aa9 100644
+--- a/ipc/util.h
++++ b/ipc/util.h
+@@ -148,9 +148,9 @@ int ipc_parse_version (int *cmd);
+ #endif
+
+ extern void free_msg(struct msg_msg *msg);
+-extern struct msg_msg *load_msg(const void __user *src, int len);
++extern struct msg_msg *load_msg(const void __user *src, size_t len);
+ extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst);
+-extern int store_msg(void __user *dest, struct msg_msg *msg, int len);
++extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
+
+ extern void recompute_msgmni(struct ipc_namespace *);
+
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 7b0e23a7..7ddfd8a0 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -613,7 +613,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
+ int rc = 0;
+ uid_t uid = from_kuid(&init_user_ns, current_uid());
+
+- if (!audit_enabled) {
++ if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
+ *ab = NULL;
+ return rc;
+ }
+@@ -659,6 +659,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (msg_type) {
+ case AUDIT_GET:
++ status_set.mask = 0;
+ status_set.enabled = audit_enabled;
+ status_set.failure = audit_failure;
+ status_set.pid = audit_pid;
+@@ -670,7 +671,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ &status_set, sizeof(status_set));
+ break;
+ case AUDIT_SET:
+- if (nlh->nlmsg_len < sizeof(struct audit_status))
++ if (nlmsg_len(nlh) < sizeof(struct audit_status))
+ return -EINVAL;
+ status_get = (struct audit_status *)data;
+ if (status_get->mask & AUDIT_STATUS_ENABLED) {
+@@ -832,7 +833,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ memset(&s, 0, sizeof(s));
+ /* guard against past and future API changes */
+- memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len));
++ memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
+ if ((s.enabled != 0 && s.enabled != 1) ||
+ (s.log_passwd != 0 && s.log_passwd != 1))
+ return -EINVAL;
+@@ -1536,6 +1537,26 @@ void audit_log_name(struct audit_context *context, struct audit_names *n,
+ }
+ }
+
++ /* log the audit_names record type */
++ audit_log_format(ab, " nametype=");
++ switch(n->type) {
++ case AUDIT_TYPE_NORMAL:
++ audit_log_format(ab, "NORMAL");
++ break;
++ case AUDIT_TYPE_PARENT:
++ audit_log_format(ab, "PARENT");
++ break;
++ case AUDIT_TYPE_CHILD_DELETE:
++ audit_log_format(ab, "DELETE");
++ break;
++ case AUDIT_TYPE_CHILD_CREATE:
++ audit_log_format(ab, "CREATE");
++ break;
++ default:
++ audit_log_format(ab, "UNKNOWN");
++ break;
++ }
++
+ audit_log_fcaps(ab, n);
+ audit_log_end(ab);
+ }
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 8bd9cfdc..5c9127dc 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -90,6 +90,14 @@ static DEFINE_MUTEX(cgroup_mutex);
+ static DEFINE_MUTEX(cgroup_root_mutex);
+
+ /*
++ * cgroup destruction makes heavy use of work items and there can be a lot
++ * of concurrent destructions. Use a separate workqueue so that cgroup
++ * destruction work items don't end up filling up max_active of system_wq
++ * which may lead to deadlock.
++ */
++static struct workqueue_struct *cgroup_destroy_wq;
++
++/*
+ * Generate an array of cgroup subsystem pointers. At boot time, this is
+ * populated with the built in subsystems, and modular subsystems are
+ * registered after that. The mutable section of this array is protected by
+@@ -223,6 +231,7 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp);
+ static int cgroup_destroy_locked(struct cgroup *cgrp);
+ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+ bool is_add);
++static int cgroup_file_release(struct inode *inode, struct file *file);
+
+ /**
+ * cgroup_css - obtain a cgroup's css for the specified subsystem
+@@ -908,7 +917,7 @@ static void cgroup_free_rcu(struct rcu_head *head)
+ struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
+
+ INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
+- schedule_work(&cgrp->destroy_work);
++ queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
+ }
+
+ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
+@@ -2463,7 +2472,7 @@ static const struct file_operations cgroup_seqfile_operations = {
+ .read = seq_read,
+ .write = cgroup_file_write,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = cgroup_file_release,
+ };
+
+ static int cgroup_file_open(struct inode *inode, struct file *file)
+@@ -2524,6 +2533,8 @@ static int cgroup_file_release(struct inode *inode, struct file *file)
+ ret = cft->release(inode, file);
+ if (css->ss)
+ css_put(css);
++ if (file->f_op == &cgroup_seqfile_operations)
++ single_release(inode, file);
+ return ret;
+ }
+
+@@ -4306,7 +4317,7 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
+ * css_put(). dput() requires process context which we don't have.
+ */
+ INIT_WORK(&css->destroy_work, css_free_work_fn);
+- schedule_work(&css->destroy_work);
++ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -4603,7 +4614,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- schedule_work(&css->destroy_work);
++ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
+
+ /**
+@@ -5139,6 +5150,22 @@ out:
+ return err;
+ }
+
++static int __init cgroup_wq_init(void)
++{
++ /*
++ * There isn't much point in executing destruction path in
++ * parallel. Good chunk is serialized with cgroup_mutex anyway.
++ * Use 1 for @max_active.
++ *
++ * We would prefer to do this in cgroup_init() above, but that
++ * is called before init_workqueues(): so leave this until after.
++ */
++ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
++ BUG_ON(!cgroup_destroy_wq);
++ return 0;
++}
++core_initcall(cgroup_wq_init);
++
+ /*
+ * proc_cgroup_show()
+ * - Print task's cgroup paths into seq_file, one line for each hierarchy
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 6bf981e1..4772034b 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -1033,8 +1033,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
+
+- if (need_loop)
++ if (need_loop) {
++ local_irq_disable();
+ write_seqcount_begin(&tsk->mems_allowed_seq);
++ }
+
+ nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+@@ -1042,8 +1044,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
+ tsk->mems_allowed = *newmems;
+
+- if (need_loop)
++ if (need_loop) {
+ write_seqcount_end(&tsk->mems_allowed_seq);
++ local_irq_enable();
++ }
+
+ task_unlock(tsk);
+ }
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 514bcfd8..3e59f951 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -956,7 +956,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ goto out_mput;
+ }
+
+- sched_setscheduler(t, SCHED_FIFO, &param);
++ sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
+
+ /*
+ * We keep the reference to the task struct even if
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 98c3b34a..b38109e2 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -792,7 +792,8 @@ void free_basic_memory_bitmaps(void)
+ {
+ struct memory_bitmap *bm1, *bm2;
+
+- BUG_ON(!(forbidden_pages_map && free_pages_map));
++ if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
++ return;
+
+ bm1 = forbidden_pages_map;
+ bm2 = free_pages_map;
+@@ -1402,7 +1403,11 @@ int hibernate_preallocate_memory(void)
+ * highmem and non-highmem zones separately.
+ */
+ pages_highmem = preallocate_image_highmem(highmem / 2);
+- alloc = (count - max_size) - pages_highmem;
++ alloc = count - max_size;
++ if (alloc > pages_highmem)
++ alloc -= pages_highmem;
++ else
++ alloc = 0;
+ pages = preallocate_image_memory(alloc, avail_normal);
+ if (pages < alloc) {
+ /* We have exhausted non-highmem pages, try highmem. */
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 957f0616..ffc931c3 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -70,6 +70,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
+ data->swap = swsusp_resume_device ?
+ swap_type_of(swsusp_resume_device, 0, NULL) : -1;
+ data->mode = O_RDONLY;
++ data->free_bitmaps = false;
+ error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
+ if (error)
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index eec50fce..88c9c65a 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -490,7 +490,7 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
+ clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EINVAL;
+
+ return hrtimer_get_res(baseid, tp);
+ }
+@@ -507,7 +507,7 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
+ struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EINVAL;
+
+ *tp = ktime_to_timespec(base->gettime());
+ return 0;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 03cf44ac..f3bd09ee 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -367,9 +367,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
+
+ static int __register_ftrace_function(struct ftrace_ops *ops)
+ {
+- if (unlikely(ftrace_disabled))
+- return -ENODEV;
+-
+ if (FTRACE_WARN_ON(ops == &global_ops))
+ return -EINVAL;
+
+@@ -428,9 +425,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ {
+ int ret;
+
+- if (ftrace_disabled)
+- return -ENODEV;
+-
+ if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
+ return -EBUSY;
+
+@@ -2088,10 +2082,15 @@ static void ftrace_startup_enable(int command)
+ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ {
+ bool hash_enable = true;
++ int ret;
+
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
++ ret = __register_ftrace_function(ops);
++ if (ret)
++ return ret;
++
+ ftrace_start_up++;
+ command |= FTRACE_UPDATE_CALLS;
+
+@@ -2113,12 +2112,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ return 0;
+ }
+
+-static void ftrace_shutdown(struct ftrace_ops *ops, int command)
++static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ {
+ bool hash_disable = true;
++ int ret;
+
+ if (unlikely(ftrace_disabled))
+- return;
++ return -ENODEV;
++
++ ret = __unregister_ftrace_function(ops);
++ if (ret)
++ return ret;
+
+ ftrace_start_up--;
+ /*
+@@ -2153,9 +2157,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+ }
+
+ if (!command || !ftrace_enabled)
+- return;
++ return 0;
+
+ ftrace_run_update_code(command);
++ return 0;
+ }
+
+ static void ftrace_startup_sysctl(void)
+@@ -3060,16 +3065,13 @@ static void __enable_ftrace_function_probe(void)
+ if (i == FTRACE_FUNC_HASHSIZE)
+ return;
+
+- ret = __register_ftrace_function(&trace_probe_ops);
+- if (!ret)
+- ret = ftrace_startup(&trace_probe_ops, 0);
++ ret = ftrace_startup(&trace_probe_ops, 0);
+
+ ftrace_probe_registered = 1;
+ }
+
+ static void __disable_ftrace_function_probe(void)
+ {
+- int ret;
+ int i;
+
+ if (!ftrace_probe_registered)
+@@ -3082,9 +3084,7 @@ static void __disable_ftrace_function_probe(void)
+ }
+
+ /* no more funcs left */
+- ret = __unregister_ftrace_function(&trace_probe_ops);
+- if (!ret)
+- ftrace_shutdown(&trace_probe_ops, 0);
++ ftrace_shutdown(&trace_probe_ops, 0);
+
+ ftrace_probe_registered = 0;
+ }
+@@ -4290,12 +4290,15 @@ core_initcall(ftrace_nodyn_init);
+ static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
+ static inline void ftrace_startup_enable(int command) { }
+ /* Keep as macros so we do not need to define the commands */
+-# define ftrace_startup(ops, command) \
+- ({ \
+- (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
+- 0; \
++# define ftrace_startup(ops, command) \
++ ({ \
++ int ___ret = __register_ftrace_function(ops); \
++ if (!___ret) \
++ (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
++ ___ret; \
+ })
+-# define ftrace_shutdown(ops, command) do { } while (0)
++# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
++
+ # define ftrace_startup_sysctl() do { } while (0)
+ # define ftrace_shutdown_sysctl() do { } while (0)
+
+@@ -4695,9 +4698,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
+
+ mutex_lock(&ftrace_lock);
+
+- ret = __register_ftrace_function(ops);
+- if (!ret)
+- ret = ftrace_startup(ops, 0);
++ ret = ftrace_startup(ops, 0);
+
+ mutex_unlock(&ftrace_lock);
+
+@@ -4716,9 +4717,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
+ int ret;
+
+ mutex_lock(&ftrace_lock);
+- ret = __unregister_ftrace_function(ops);
+- if (!ret)
+- ftrace_shutdown(ops, 0);
++ ret = ftrace_shutdown(ops, 0);
+ mutex_unlock(&ftrace_lock);
+
+ return ret;
+@@ -4912,6 +4911,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+ return NOTIFY_DONE;
+ }
+
++/* Just a place holder for function graph */
++static struct ftrace_ops fgraph_ops __read_mostly = {
++ .func = ftrace_stub,
++ .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
++ FTRACE_OPS_FL_RECURSION_SAFE,
++};
++
+ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc)
+ {
+@@ -4938,7 +4944,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ ftrace_graph_return = retfunc;
+ ftrace_graph_entry = entryfunc;
+
+- ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
++ ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
+
+ out:
+ mutex_unlock(&ftrace_lock);
+@@ -4955,7 +4961,7 @@ void unregister_ftrace_graph(void)
+ ftrace_graph_active--;
+ ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+ ftrace_graph_entry = ftrace_graph_entry_stub;
+- ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
++ ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
+ unregister_pm_notifier(&ftrace_suspend_notifier);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 987293d0..93c26528 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
+ /* I: attributes used when instantiating standard unbound pools on demand */
+ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
+
++/* I: attributes used when instantiating ordered pools on demand */
++static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
++
+ struct workqueue_struct *system_wq __read_mostly;
+ EXPORT_SYMBOL(system_wq);
+ struct workqueue_struct *system_highpri_wq __read_mostly;
+@@ -4106,7 +4109,7 @@ out_unlock:
+ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
+ {
+ bool highpri = wq->flags & WQ_HIGHPRI;
+- int cpu;
++ int cpu, ret;
+
+ if (!(wq->flags & WQ_UNBOUND)) {
+ wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
+@@ -4126,6 +4129,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
+ mutex_unlock(&wq->mutex);
+ }
+ return 0;
++ } else if (wq->flags & __WQ_ORDERED) {
++ ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
++ /* there should only be single pwq for ordering guarantee */
++ WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
++ wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
++ "ordering guarantee broken for workqueue %s\n", wq->name);
++ return ret;
+ } else {
+ return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
+ }
+@@ -5051,13 +5061,23 @@ static int __init init_workqueues(void)
+ }
+ }
+
+- /* create default unbound wq attrs */
++ /* create default unbound and ordered wq attrs */
+ for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
+ struct workqueue_attrs *attrs;
+
+ BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+ attrs->nice = std_nice[i];
+ unbound_std_wq_attrs[i] = attrs;
++
++ /*
++ * An ordered wq should have only one pwq as ordering is
++ * guaranteed by max_active which is enforced by pwqs.
++ * Turn off NUMA so that dfl_pwq is used for all nodes.
++ */
++ BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
++ attrs->nice = std_nice[i];
++ attrs->no_numa = true;
++ ordered_wq_attrs[i] = attrs;
+ }
+
+ system_wq = alloc_workqueue("events", 0, 0);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 26559bdb..d76555c4 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -27,6 +27,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/ioport.h>
+ #include <linux/dcache.h>
++#include <linux/cred.h>
+ #include <net/addrconf.h>
+
+ #include <asm/page.h> /* for PAGE_SIZE */
+@@ -1312,11 +1313,37 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ spec.field_width = default_width;
+ return string(buf, end, "pK-error", spec);
+ }
+- if (!((kptr_restrict == 0) ||
+- (kptr_restrict == 1 &&
+- has_capability_noaudit(current, CAP_SYSLOG))))
++
++ switch (kptr_restrict) {
++ case 0:
++ /* Always print %pK values */
++ break;
++ case 1: {
++ /*
++ * Only print the real pointer value if the current
++ * process has CAP_SYSLOG and is running with the
++ * same credentials it started with. This is because
++ * access to files is checked at open() time, but %pK
++ * checks permission at read() time. We don't want to
++ * leak pointer values if a binary opens a file using
++ * %pK and then elevates privileges before reading it.
++ */
++ const struct cred *cred = current_cred();
++
++ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
++ !uid_eq(cred->euid, cred->uid) ||
++ !gid_eq(cred->egid, cred->gid))
++ ptr = NULL;
++ break;
++ }
++ case 2:
++ default:
++ /* Always print 0's for %pK */
+ ptr = NULL;
++ break;
++ }
+ break;
++
+ case 'N':
+ switch (fmt[1]) {
+ case 'F':
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 9d548512..362e5f13 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1856,7 +1856,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
+
+- if (len > TASK_SIZE)
++ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+@@ -1865,7 +1865,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+@@ -1895,7 +1895,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ struct vm_unmapped_area_info info;
+
+ /* requested length too big for entire address space */
+- if (len > TASK_SIZE)
++ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+@@ -1905,14 +1905,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+- info.low_limit = PAGE_SIZE;
++ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.high_limit = mm->mmap_base;
+ info.align_mask = 0;
+ addr = vm_unmapped_area(&info);
+diff --git a/mm/zswap.c b/mm/zswap.c
+index d93510c6..6b862518 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -387,7 +387,7 @@ static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry)
+ enum zswap_get_swap_ret {
+ ZSWAP_SWAPCACHE_NEW,
+ ZSWAP_SWAPCACHE_EXIST,
+- ZSWAP_SWAPCACHE_NOMEM
++ ZSWAP_SWAPCACHE_FAIL,
+ };
+
+ /*
+@@ -401,9 +401,10 @@ enum zswap_get_swap_ret {
+ * added to the swap cache, and returned in retpage.
+ *
+ * If success, the swap cache page is returned in retpage
+- * Returns 0 if page was already in the swap cache, page is not locked
+- * Returns 1 if the new page needs to be populated, page is locked
+- * Returns <0 on error
++ * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
++ * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
++ * the new page is added to swapcache and locked
++ * Returns ZSWAP_SWAPCACHE_FAIL on error
+ */
+ static int zswap_get_swap_cache_page(swp_entry_t entry,
+ struct page **retpage)
+@@ -475,7 +476,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
+ if (new_page)
+ page_cache_release(new_page);
+ if (!found_page)
+- return ZSWAP_SWAPCACHE_NOMEM;
++ return ZSWAP_SWAPCACHE_FAIL;
+ *retpage = found_page;
+ return ZSWAP_SWAPCACHE_EXIST;
+ }
+@@ -529,11 +530,11 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
+
+ /* try to allocate swap cache page */
+ switch (zswap_get_swap_cache_page(swpentry, &page)) {
+- case ZSWAP_SWAPCACHE_NOMEM: /* no memory */
++ case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
+ ret = -ENOMEM;
+ goto fail;
+
+- case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */
++ case ZSWAP_SWAPCACHE_EXIST:
+ /* page is already in the swap cache, ignore for now */
+ page_cache_release(page);
+ ret = -EEXIST;
+@@ -591,7 +592,12 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
+
+ fail:
+ spin_lock(&tree->lock);
+- zswap_entry_put(entry);
++ refcount = zswap_entry_put(entry);
++ if (refcount <= 0) {
++ /* invalidate happened, consider writeback as success */
++ zswap_free_entry(tree, entry);
++ ret = 0;
++ }
+ spin_unlock(&tree->lock);
+ return ret;
+ }
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index eeb71480..d4397eba 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -254,10 +254,10 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
+ rdev = container_of(wk, struct cfg80211_registered_device,
+ sched_scan_results_wk);
+
+- request = rdev->sched_scan_req;
+-
+ rtnl_lock();
+
++ request = rdev->sched_scan_req;
++
+ /* we don't have sched_scan_req anymore if the scan is stopping */
+ if (request) {
+ if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
+diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
+index da4b8b23..6235d052 100644
+--- a/security/selinux/netlabel.c
++++ b/security/selinux/netlabel.c
+@@ -442,8 +442,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
+ sksec->nlbl_state != NLBL_CONNLABELED)
+ return 0;
+
+- local_bh_disable();
+- bh_lock_sock_nested(sk);
++ lock_sock(sk);
+
+ /* connected sockets are allowed to disconnect when the address family
+ * is set to AF_UNSPEC, if that is what is happening we want to reset
+@@ -464,7 +463,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
+ sksec->nlbl_state = NLBL_CONNLABELED;
+
+ socket_connect_return:
+- bh_unlock_sock(sk);
+- local_bh_enable();
++ release_sock(sk);
+ return rc;
+ }
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 3067ed4f..c4671d00 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -2506,12 +2506,8 @@ static int create_out_jack_modes(struct hda_codec *codec, int num_pins,
+
+ for (i = 0; i < num_pins; i++) {
+ hda_nid_t pin = pins[i];
+- if (pin == spec->hp_mic_pin) {
+- int ret = create_hp_mic_jack_mode(codec, pin);
+- if (ret < 0)
+- return ret;
++ if (pin == spec->hp_mic_pin)
+ continue;
+- }
+ if (get_out_jack_num_items(codec, pin) > 1) {
+ struct snd_kcontrol_new *knew;
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+@@ -2764,7 +2760,7 @@ static int hp_mic_jack_mode_put(struct snd_kcontrol *kcontrol,
+ val &= ~(AC_PINCTL_VREFEN | PIN_HP);
+ val |= get_vref_idx(vref_caps, idx) | PIN_IN;
+ } else
+- val = snd_hda_get_default_vref(codec, nid);
++ val = snd_hda_get_default_vref(codec, nid) | PIN_IN;
+ }
+ snd_hda_set_pin_ctl_cache(codec, nid, val);
+ call_hp_automute(codec, NULL);
+@@ -2784,9 +2780,6 @@ static int create_hp_mic_jack_mode(struct hda_codec *codec, hda_nid_t pin)
+ struct hda_gen_spec *spec = codec->spec;
+ struct snd_kcontrol_new *knew;
+
+- if (get_out_jack_num_items(codec, pin) <= 1 &&
+- get_in_jack_num_items(codec, pin) <= 1)
+- return 0; /* no need */
+ knew = snd_hda_gen_add_kctl(spec, "Headphone Mic Jack Mode",
+ &hp_mic_jack_mode_enum);
+ if (!knew)
+@@ -2815,6 +2808,42 @@ static int add_loopback_list(struct hda_gen_spec *spec, hda_nid_t mix, int idx)
+ return 0;
+ }
+
++/* return true if either a volume or a mute amp is found for the given
++ * aamix path; the amp has to be either in the mixer node or its direct leaf
++ */
++static bool look_for_mix_leaf_ctls(struct hda_codec *codec, hda_nid_t mix_nid,
++ hda_nid_t pin, unsigned int *mix_val,
++ unsigned int *mute_val)
++{
++ int idx, num_conns;
++ const hda_nid_t *list;
++ hda_nid_t nid;
++
++ idx = snd_hda_get_conn_index(codec, mix_nid, pin, true);
++ if (idx < 0)
++ return false;
++
++ *mix_val = *mute_val = 0;
++ if (nid_has_volume(codec, mix_nid, HDA_INPUT))
++ *mix_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
++ if (nid_has_mute(codec, mix_nid, HDA_INPUT))
++ *mute_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
++ if (*mix_val && *mute_val)
++ return true;
++
++ /* check leaf node */
++ num_conns = snd_hda_get_conn_list(codec, mix_nid, &list);
++ if (num_conns < idx)
++ return false;
++ nid = list[idx];
++ if (!*mix_val && nid_has_volume(codec, nid, HDA_OUTPUT))
++ *mix_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
++ if (!*mute_val && nid_has_mute(codec, nid, HDA_OUTPUT))
++ *mute_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
++
++ return *mix_val || *mute_val;
++}
++
+ /* create input playback/capture controls for the given pin */
+ static int new_analog_input(struct hda_codec *codec, int input_idx,
+ hda_nid_t pin, const char *ctlname, int ctlidx,
+@@ -2822,12 +2851,11 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
+ {
+ struct hda_gen_spec *spec = codec->spec;
+ struct nid_path *path;
+- unsigned int val;
++ unsigned int mix_val, mute_val;
+ int err, idx;
+
+- if (!nid_has_volume(codec, mix_nid, HDA_INPUT) &&
+- !nid_has_mute(codec, mix_nid, HDA_INPUT))
+- return 0; /* no need for analog loopback */
++ if (!look_for_mix_leaf_ctls(codec, mix_nid, pin, &mix_val, &mute_val))
++ return 0;
+
+ path = snd_hda_add_new_path(codec, pin, mix_nid, 0);
+ if (!path)
+@@ -2836,20 +2864,18 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
+ spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path);
+
+ idx = path->idx[path->depth - 1];
+- if (nid_has_volume(codec, mix_nid, HDA_INPUT)) {
+- val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
+- err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, val);
++ if (mix_val) {
++ err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, mix_val);
+ if (err < 0)
+ return err;
+- path->ctls[NID_PATH_VOL_CTL] = val;
++ path->ctls[NID_PATH_VOL_CTL] = mix_val;
+ }
+
+- if (nid_has_mute(codec, mix_nid, HDA_INPUT)) {
+- val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
+- err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, val);
++ if (mute_val) {
++ err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, mute_val);
+ if (err < 0)
+ return err;
+- path->ctls[NID_PATH_MUTE_CTL] = val;
++ path->ctls[NID_PATH_MUTE_CTL] = mute_val;
+ }
+
+ path->active = true;
+@@ -4383,6 +4409,17 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
+ if (err < 0)
+ return err;
+
++ /* create "Headphone Mic Jack Mode" if no input selection is
++ * available (or user specifies add_jack_modes hint)
++ */
++ if (spec->hp_mic_pin &&
++ (spec->auto_mic || spec->input_mux.num_items == 1 ||
++ spec->add_jack_modes)) {
++ err = create_hp_mic_jack_mode(codec, spec->hp_mic_pin);
++ if (err < 0)
++ return err;
++ }
++
+ if (spec->add_jack_modes) {
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ err = create_out_jack_modes(codec, cfg->line_outs,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 2f39631f..f0f54829 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1771,6 +1771,7 @@ enum {
+ ALC889_FIXUP_IMAC91_VREF,
+ ALC882_FIXUP_INV_DMIC,
+ ALC882_FIXUP_NO_PRIMARY_HP,
++ ALC887_FIXUP_ASUS_BASS,
+ };
+
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2094,6 +2095,13 @@ static const struct hda_fixup alc882_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc882_fixup_no_primary_hp,
+ },
++ [ALC887_FIXUP_ASUS_BASS] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ {0x16, 0x99130130}, /* bass speaker */
++ {}
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2127,6 +2135,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
+ SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
+ SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
++ SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+@@ -4864,6 +4873,7 @@ static int patch_alc662(struct hda_codec *codec)
+ case 0x10ec0272:
+ case 0x10ec0663:
+ case 0x10ec0665:
++ case 0x10ec0668:
+ set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ break;
+ case 0x10ec0273:
+@@ -4921,6 +4931,7 @@ static int patch_alc680(struct hda_codec *codec)
+ */
+ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
++ { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 },
+ { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
+ { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
+ { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
+diff --git a/sound/soc/blackfin/bf5xx-i2s.c b/sound/soc/blackfin/bf5xx-i2s.c
+index 9a174fc4..39d77483 100644
+--- a/sound/soc/blackfin/bf5xx-i2s.c
++++ b/sound/soc/blackfin/bf5xx-i2s.c
+@@ -121,6 +121,7 @@ static int bf5xx_i2s_hw_params(struct snd_pcm_substream *substream,
+ bf5xx_i2s->tcr2 |= 7;
+ bf5xx_i2s->rcr2 |= 7;
+ sport_handle->wdsize = 1;
++ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bf5xx_i2s->tcr2 |= 15;
+ bf5xx_i2s->rcr2 |= 15;
+diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
+index 2d037870..687565d0 100644
+--- a/sound/soc/codecs/ak4642.c
++++ b/sound/soc/codecs/ak4642.c
+@@ -257,7 +257,7 @@ static int ak4642_dai_startup(struct snd_pcm_substream *substream,
+ * This operation came from example code of
+ * "ASAHI KASEI AK4642" (japanese) manual p94.
+ */
+- snd_soc_write(codec, SG_SL1, PMMP | MGAIN0);
++ snd_soc_update_bits(codec, SG_SL1, PMMP | MGAIN0, PMMP | MGAIN0);
+ snd_soc_write(codec, TIMER, ZTM(0x3) | WTM(0x3));
+ snd_soc_write(codec, ALC_CTL1, ALC | LMTH0);
+ snd_soc_update_bits(codec, PW_MGMT1, PMADL, PMADL);
+diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
+index 657808ba..f38ed4d2 100644
+--- a/sound/soc/codecs/arizona.c
++++ b/sound/soc/codecs/arizona.c
+@@ -1525,6 +1525,8 @@ static void arizona_enable_fll(struct arizona_fll *fll,
+ try_wait_for_completion(&fll->ok);
+
+ regmap_update_bits(arizona->regmap, fll->base + 1,
++ ARIZONA_FLL1_FREERUN, 0);
++ regmap_update_bits(arizona->regmap, fll->base + 1,
+ ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
+ if (fll->ref_src >= 0 && fll->sync_src >= 0 &&
+ fll->ref_src != fll->sync_src)
+@@ -1543,6 +1545,8 @@ static void arizona_disable_fll(struct arizona_fll *fll)
+ struct arizona *arizona = fll->arizona;
+ bool change;
+
++ regmap_update_bits(arizona->regmap, fll->base + 1,
++ ARIZONA_FLL1_FREERUN, ARIZONA_FLL1_FREERUN);
+ regmap_update_bits_check(arizona->regmap, fll->base + 1,
+ ARIZONA_FLL1_ENA, 0, &change);
+ regmap_update_bits(arizona->regmap, fll->base + 0x11,
+diff --git a/sound/soc/codecs/cs42l52.h b/sound/soc/codecs/cs42l52.h
+index 4277012c..a935d738 100644
+--- a/sound/soc/codecs/cs42l52.h
++++ b/sound/soc/codecs/cs42l52.h
+@@ -179,7 +179,7 @@
+ #define CS42L52_MICB_CTL 0x11
+ #define CS42L52_MIC_CTL_MIC_SEL_MASK 0xBF
+ #define CS42L52_MIC_CTL_MIC_SEL_SHIFT 6
+-#define CS42L52_MIC_CTL_TYPE_MASK 0xDF
++#define CS42L52_MIC_CTL_TYPE_MASK 0x20
+ #define CS42L52_MIC_CTL_TYPE_SHIFT 5
+
+
+diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
+index bbd64384..0048ce5b 100644
+--- a/sound/soc/codecs/wm5110.c
++++ b/sound/soc/codecs/wm5110.c
+@@ -37,6 +37,47 @@ struct wm5110_priv {
+ struct arizona_fll fll[2];
+ };
+
++static const struct reg_default wm5110_sysclk_revd_patch[] = {
++ { 0x3093, 0x1001 },
++ { 0x30E3, 0x1301 },
++ { 0x3133, 0x1201 },
++ { 0x3183, 0x1501 },
++ { 0x31D3, 0x1401 },
++};
++
++static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_codec *codec = w->codec;
++ struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
++ struct regmap *regmap = codec->control_data;
++ const struct reg_default *patch = NULL;
++ int i, patch_size;
++
++ switch (arizona->rev) {
++ case 3:
++ patch = wm5110_sysclk_revd_patch;
++ patch_size = ARRAY_SIZE(wm5110_sysclk_revd_patch);
++ break;
++ default:
++ return 0;
++ }
++
++ switch (event) {
++ case SND_SOC_DAPM_POST_PMU:
++ if (patch)
++ for (i = 0; i < patch_size; i++)
++ regmap_write(regmap, patch[i].reg,
++ patch[i].def);
++ break;
++
++ default:
++ break;
++ }
++
++ return 0;
++}
++
+ static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+ static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+@@ -400,7 +441,7 @@ static const struct snd_kcontrol_new wm5110_aec_loopback_mux =
+
+ static const struct snd_soc_dapm_widget wm5110_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
+- 0, NULL, 0),
++ 0, wm5110_sysclk_ev, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 11d80f3b..871f8518 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -3722,6 +3722,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
+ if (ret < 0)
+ goto err_enable;
+
++ regcache_cache_only(wm8962->regmap, true);
++
+ /* The drivers should power up as needed */
+ regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
+
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index b38f3506..60b6b593 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1062,6 +1062,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
+ if (i + 1 < algs) {
+ region->len = be32_to_cpu(adsp1_alg[i + 1].dm);
+ region->len -= be32_to_cpu(adsp1_alg[i].dm);
++ region->len *= 4;
+ wm_adsp_create_control(dsp, region);
+ } else {
+ adsp_warn(dsp, "Missing length info for region DM with ID %x\n",
+@@ -1079,6 +1080,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
+ if (i + 1 < algs) {
+ region->len = be32_to_cpu(adsp1_alg[i + 1].zm);
+ region->len -= be32_to_cpu(adsp1_alg[i].zm);
++ region->len *= 4;
+ wm_adsp_create_control(dsp, region);
+ } else {
+ adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
+@@ -1108,6 +1110,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
+ if (i + 1 < algs) {
+ region->len = be32_to_cpu(adsp2_alg[i + 1].xm);
+ region->len -= be32_to_cpu(adsp2_alg[i].xm);
++ region->len *= 4;
+ wm_adsp_create_control(dsp, region);
+ } else {
+ adsp_warn(dsp, "Missing length info for region XM with ID %x\n",
+@@ -1125,6 +1128,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
+ if (i + 1 < algs) {
+ region->len = be32_to_cpu(adsp2_alg[i + 1].ym);
+ region->len -= be32_to_cpu(adsp2_alg[i].ym);
++ region->len *= 4;
+ wm_adsp_create_control(dsp, region);
+ } else {
+ adsp_warn(dsp, "Missing length info for region YM with ID %x\n",
+@@ -1142,6 +1146,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
+ if (i + 1 < algs) {
+ region->len = be32_to_cpu(adsp2_alg[i + 1].zm);
+ region->len -= be32_to_cpu(adsp2_alg[i].zm);
++ region->len *= 4;
+ wm_adsp_create_control(dsp, region);
+ } else {
+ adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
+diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
+index 34043c55..2fc872b2 100644
+--- a/sound/soc/fsl/imx-pcm-fiq.c
++++ b/sound/soc/fsl/imx-pcm-fiq.c
+@@ -44,7 +44,8 @@ struct imx_pcm_runtime_data {
+ struct hrtimer hrt;
+ int poll_time_ns;
+ struct snd_pcm_substream *substream;
+- atomic_t running;
++ atomic_t playing;
++ atomic_t capturing;
+ };
+
+ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
+@@ -56,7 +57,7 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
+ struct pt_regs regs;
+ unsigned long delta;
+
+- if (!atomic_read(&iprtd->running))
++ if (!atomic_read(&iprtd->playing) && !atomic_read(&iprtd->capturing))
+ return HRTIMER_NORESTART;
+
+ get_fiq_regs(&regs);
+@@ -124,7 +125,6 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
+ return 0;
+ }
+
+-static int fiq_enable;
+ static int imx_pcm_fiq;
+
+ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+@@ -136,23 +136,27 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+- atomic_set(&iprtd->running, 1);
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ atomic_set(&iprtd->playing, 1);
++ else
++ atomic_set(&iprtd->capturing, 1);
+ hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns),
+ HRTIMER_MODE_REL);
+- if (++fiq_enable == 1)
+- enable_fiq(imx_pcm_fiq);
+-
++ enable_fiq(imx_pcm_fiq);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+- atomic_set(&iprtd->running, 0);
+-
+- if (--fiq_enable == 0)
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ atomic_set(&iprtd->playing, 0);
++ else
++ atomic_set(&iprtd->capturing, 0);
++ if (!atomic_read(&iprtd->playing) &&
++ !atomic_read(&iprtd->capturing))
+ disable_fiq(imx_pcm_fiq);
+-
+ break;
++
+ default:
+ return -EINVAL;
+ }
+@@ -200,7 +204,8 @@ static int snd_imx_open(struct snd_pcm_substream *substream)
+
+ iprtd->substream = substream;
+
+- atomic_set(&iprtd->running, 0);
++ atomic_set(&iprtd->playing, 0);
++ atomic_set(&iprtd->capturing, 0);
+ hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ iprtd->hrt.function = snd_hrtimer_callback;
+
+diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
+index 0a193281..78f4c92e 100644
+--- a/tools/perf/ui/hist.c
++++ b/tools/perf/ui/hist.c
+@@ -117,7 +117,7 @@ static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct perf_hpp *hpp, struct hist_entry *he) \
+ { \
+ return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
+- (hpp_snprint_fn)percent_color_snprintf, true); \
++ percent_color_snprintf, true); \
+ }
+
+ #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
+diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
+index 11e46da1..66e44a50 100644
+--- a/tools/perf/util/color.c
++++ b/tools/perf/util/color.c
+@@ -318,8 +318,15 @@ int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
+ return r;
+ }
+
+-int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent)
++int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...)
+ {
+- const char *color = get_percent_color(percent);
++ va_list args;
++ double percent;
++ const char *color;
++
++ va_start(args, fmt);
++ percent = va_arg(args, double);
++ va_end(args);
++ color = get_percent_color(percent);
+ return color_snprintf(bf, size, color, fmt, percent);
+ }
+diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
+index dea082b7..fced3840 100644
+--- a/tools/perf/util/color.h
++++ b/tools/perf/util/color.h
+@@ -39,7 +39,7 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
+ int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
+ int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
+ int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
+-int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent);
++int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...);
+ int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
+ const char *get_percent_color(double percent);
+
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index 49096ea5..1ae1b082 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -212,8 +212,10 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ &event->mmap.start, &event->mmap.len, prot,
+ &event->mmap.pgoff,
+ execname);
+-
+- if (n != 5)
++ /*
++ * Anon maps don't have the execname.
++ */
++ if (n < 4)
+ continue;
+
+ if (prot[2] != 'x')
diff --git a/1003_linux-3.12.4.patch b/1003_linux-3.12.4.patch
new file mode 100644
index 00000000..bb91ef80
--- /dev/null
+++ b/1003_linux-3.12.4.patch
@@ -0,0 +1,4725 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index a46d78583ae1..7d8dc93fe2eb 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -588,9 +588,6 @@ tcp_limit_output_bytes - INTEGER
+ typical pfifo_fast qdiscs.
+ tcp_limit_output_bytes limits the number of bytes on qdisc
+ or device to reduce artificial RTT/cwnd and reduce bufferbloat.
+- Note: For GSO/TSO enabled flows, we try to have at least two
+- packets in flight. Reducing tcp_limit_output_bytes might also
+- reduce the size of individual GSO packet (64KB being the max)
+ Default: 131072
+
+ tcp_challenge_ack_limit - INTEGER
+diff --git a/Makefile b/Makefile
+index b28bc57d1769..3b7165eb4734 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 516593e1ce33..26328e800869 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -788,5 +788,7 @@ void bpf_jit_free(struct sk_filter *fp)
+ if (fp->bpf_func != sk_run_filter) {
+ INIT_WORK(&fp->work, bpf_jit_free_deferred);
+ schedule_work(&fp->work);
++ } else {
++ kfree(fp);
+ }
+ }
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 0c611d89d748..fce4b9387f36 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -741,9 +741,17 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+
+ q->sg_reserved_size = INT_MAX;
+
++ /* Protect q->elevator from elevator_change */
++ mutex_lock(&q->sysfs_lock);
++
+ /* init elevator */
+- if (elevator_init(q, NULL))
++ if (elevator_init(q, NULL)) {
++ mutex_unlock(&q->sysfs_lock);
+ return NULL;
++ }
++
++ mutex_unlock(&q->sysfs_lock);
++
+ return q;
+ }
+ EXPORT_SYMBOL(blk_init_allocated_queue);
+diff --git a/block/elevator.c b/block/elevator.c
+index 2bcbd8cc14d4..b7ff2861b6bd 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -186,6 +186,12 @@ int elevator_init(struct request_queue *q, char *name)
+ struct elevator_type *e = NULL;
+ int err;
+
++ /*
++ * q->sysfs_lock must be held to provide mutual exclusion between
++ * elevator_switch() and here.
++ */
++ lockdep_assert_held(&q->sysfs_lock);
++
+ if (unlikely(q->elevator))
+ return 0;
+
+@@ -959,7 +965,7 @@ fail_init:
+ /*
+ * Switch this queue to the given IO scheduler.
+ */
+-int elevator_change(struct request_queue *q, const char *name)
++static int __elevator_change(struct request_queue *q, const char *name)
+ {
+ char elevator_name[ELV_NAME_MAX];
+ struct elevator_type *e;
+@@ -981,6 +987,18 @@ int elevator_change(struct request_queue *q, const char *name)
+
+ return elevator_switch(q, e);
+ }
++
++int elevator_change(struct request_queue *q, const char *name)
++{
++ int ret;
++
++ /* Protect q->elevator from elevator_init() */
++ mutex_lock(&q->sysfs_lock);
++ ret = __elevator_change(q, name);
++ mutex_unlock(&q->sysfs_lock);
++
++ return ret;
++}
+ EXPORT_SYMBOL(elevator_change);
+
+ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+@@ -991,7 +1009,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ if (!q->elevator)
+ return count;
+
+- ret = elevator_change(q, name);
++ ret = __elevator_change(q, name);
+ if (!ret)
+ return count;
+
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 0262210cad38..850246206b12 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ struct hash_ctx *ctx = ask->private;
+ int err;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ sg_init_table(ctx->sgl.sg, 1);
+ sg_set_page(ctx->sgl.sg, page, size, offset);
+@@ -161,8 +164,6 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+ else if (len < ds)
+ msg->msg_flags |= MSG_TRUNC;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+ if (ctx->more) {
+ ctx->more = 0;
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index a1c4f0a55583..a19c027b29bd 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+ struct skcipher_sg_list *sgl;
+ int err = -EINVAL;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+@@ -432,7 +435,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+ long copied = 0;
+
+ lock_sock(sk);
+- msg->msg_namelen = 0;
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 272f00927761..1bdf104e90bb 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev)
+ tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
+ if (tmp) {
+ memcpy(card->atmdev->esi, tmp->dev_addr, 6);
+-
++ dev_put(tmp);
+ printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
+ }
+ /*
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index c73fc2b74de2..18c5b9b16645 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -32,11 +32,23 @@
+ #include <linux/atomic.h>
+ #include <linux/pid_namespace.h>
+
+-#include <asm/unaligned.h>
+-
+ #include <linux/cn_proc.h>
+
+-#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
++/*
++ * Size of a cn_msg followed by a proc_event structure. Since the
++ * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
++ * add one 4-byte word to the size here, and then start the actual
++ * cn_msg structure 4 bytes into the stack buffer. The result is that
++ * the immediately following proc_event structure is aligned to 8 bytes.
++ */
++#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
++
++/* See comment above; we test our assumption about sizeof struct cn_msg here. */
++static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
++{
++ BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
++ return (struct cn_msg *)(buffer + 4);
++}
+
+ static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
+ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
+@@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+ struct task_struct *parent;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_FORK;
+ rcu_read_lock();
+ parent = rcu_dereference(task->real_parent);
+@@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_EXEC;
+ ev->event_data.exec.process_pid = task->pid;
+ ev->event_data.exec.process_tgid = task->tgid;
+@@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+ const struct cred *cred;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ ev->what = which_id;
+@@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ rcu_read_unlock();
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+@@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_SID;
+ ev->event_data.sid.process_pid = task->pid;
+ ev->event_data.sid.process_tgid = task->tgid;
+@@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_PTRACE;
+ ev->event_data.ptrace.process_pid = task->pid;
+ ev->event_data.ptrace.process_tgid = task->tgid;
+@@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_COMM;
+ ev->event_data.comm.process_pid = task->pid;
+ ev->event_data.comm.process_tgid = task->tgid;
+@@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_COREDUMP;
+ ev->event_data.coredump.process_pid = task->pid;
+ ev->event_data.coredump.process_tgid = task->tgid;
+@@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_EXIT;
+ ev->event_data.exit.process_pid = task->pid;
+ ev->event_data.exit.process_tgid = task->tgid;
+@@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ msg->seq = rcvd_seq;
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->cpu = -1;
+ ev->what = PROC_EVENT_NONE;
+ ev->event_data.ack.err = err;
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index 06022e3b9c3b..615c5b290e78 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -24,6 +24,7 @@
+ * Authors: Christian König
+ */
+ #include <linux/hdmi.h>
++#include <linux/gcd.h>
+ #include <drm/drmP.h>
+ #include <drm/radeon_drm.h>
+ #include "radeon.h"
+@@ -57,35 +58,57 @@ enum r600_hdmi_iec_status_bits {
+ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
+ /* 32kHz 44.1kHz 48kHz */
+ /* Clock N CTS N CTS N CTS */
+- { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
++ { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
+ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
+ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
+ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
+ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
+ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
+- { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
++ { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+- { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
++ { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+- { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
+ };
+
++
+ /*
+- * calculate CTS value if it's not found in the table
++ * calculate CTS and N values if they are not found in the table
+ */
+-static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
++static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
+ {
+- u64 n;
+- u32 d;
+-
+- if (*CTS == 0) {
+- n = (u64)clock * (u64)N * 1000ULL;
+- d = 128 * freq;
+- do_div(n, d);
+- *CTS = n;
+- }
+- DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
+- N, *CTS, freq);
++ int n, cts;
++ unsigned long div, mul;
++
++ /* Safe, but overly large values */
++ n = 128 * freq;
++ cts = clock * 1000;
++
++ /* Smallest valid fraction */
++ div = gcd(n, cts);
++
++ n /= div;
++ cts /= div;
++
++ /*
++ * The optimal N is 128*freq/1000. Calculate the closest larger
++ * value that doesn't truncate any bits.
++ */
++ mul = ((128*freq/1000) + (n-1))/n;
++
++ n *= mul;
++ cts *= mul;
++
++ /* Check that we are in spec (not always possible) */
++ if (n < (128*freq/1500))
++ printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
++ if (n > (128*freq/300))
++ printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
++
++ *N = n;
++ *CTS = cts;
++
++ DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
++ *N, *CTS, freq);
+ }
+
+ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+@@ -93,15 +116,16 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+ struct radeon_hdmi_acr res;
+ u8 i;
+
+- for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+- r600_hdmi_predefined_acr[i].clock != 0; i++)
+- ;
+- res = r600_hdmi_predefined_acr[i];
++ /* Precalculated values for common clocks */
++ for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
++ if (r600_hdmi_predefined_acr[i].clock == clock)
++ return r600_hdmi_predefined_acr[i];
++ }
+
+- /* In case some CTS are missing */
+- r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+- r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+- r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
++ /* And odd clocks get manually calculated */
++ r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
++ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
++ r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
+
+ return res;
+ }
+diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
+index f042a6cf8b18..55e4920f967b 100644
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -181,7 +181,40 @@ fail:
+ */
+ static bool elo_broken_firmware(struct usb_device *dev)
+ {
+- return use_fw_quirk && le16_to_cpu(dev->descriptor.bcdDevice) == 0x10d;
++ struct usb_device *hub = dev->parent;
++ struct usb_device *child = NULL;
++ u16 fw_lvl = le16_to_cpu(dev->descriptor.bcdDevice);
++ u16 child_vid, child_pid;
++ int i;
++
++ if (!use_fw_quirk)
++ return false;
++ if (fw_lvl != 0x10d)
++ return false;
++
++ /* iterate sibling devices of the touch controller */
++ usb_hub_for_each_child(hub, i, child) {
++ child_vid = le16_to_cpu(child->descriptor.idVendor);
++ child_pid = le16_to_cpu(child->descriptor.idProduct);
++
++ /*
++ * If one of the devices below is present attached as a sibling of
++ * the touch controller then this is a newer IBM 4820 monitor that
++ * does not need the IBM-requested workaround if fw level is
++ * 0x010d - aka 'M'.
++ * No other HW can have this combination.
++ */
++ if (child_vid==0x04b3) {
++ switch (child_pid) {
++ case 0x4676: /* 4820 21x Video */
++ case 0x4677: /* 4820 51x Video */
++ case 0x4678: /* 4820 2Lx Video */
++ case 0x4679: /* 4820 5Lx Video */
++ return false;
++ }
++ }
++ }
++ return true;
+ }
+
+ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index 1bfd292cac8f..06eb45fa6331 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -47,6 +47,7 @@
+ #define DFP_RDESC_ORIG_SIZE 97
+ #define FV_RDESC_ORIG_SIZE 130
+ #define MOMO_RDESC_ORIG_SIZE 87
++#define MOMO2_RDESC_ORIG_SIZE 87
+
+ /* Fixed report descriptors for Logitech Driving Force (and Pro)
+ * wheel controllers
+@@ -284,6 +285,54 @@ static __u8 momo_rdesc_fixed[] = {
+ 0xC0 /* End Collection */
+ };
+
++static __u8 momo2_rdesc_fixed[] = {
++0x05, 0x01, /* Usage Page (Desktop), */
++0x09, 0x04, /* Usage (Joystik), */
++0xA1, 0x01, /* Collection (Application), */
++0xA1, 0x02, /* Collection (Logical), */
++0x95, 0x01, /* Report Count (1), */
++0x75, 0x0A, /* Report Size (10), */
++0x15, 0x00, /* Logical Minimum (0), */
++0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
++0x35, 0x00, /* Physical Minimum (0), */
++0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
++0x09, 0x30, /* Usage (X), */
++0x81, 0x02, /* Input (Variable), */
++0x95, 0x0A, /* Report Count (10), */
++0x75, 0x01, /* Report Size (1), */
++0x25, 0x01, /* Logical Maximum (1), */
++0x45, 0x01, /* Physical Maximum (1), */
++0x05, 0x09, /* Usage Page (Button), */
++0x19, 0x01, /* Usage Minimum (01h), */
++0x29, 0x0A, /* Usage Maximum (0Ah), */
++0x81, 0x02, /* Input (Variable), */
++0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
++0x09, 0x00, /* Usage (00h), */
++0x95, 0x04, /* Report Count (4), */
++0x81, 0x02, /* Input (Variable), */
++0x95, 0x01, /* Report Count (1), */
++0x75, 0x08, /* Report Size (8), */
++0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++0x09, 0x01, /* Usage (01h), */
++0x81, 0x02, /* Input (Variable), */
++0x05, 0x01, /* Usage Page (Desktop), */
++0x09, 0x31, /* Usage (Y), */
++0x81, 0x02, /* Input (Variable), */
++0x09, 0x32, /* Usage (Z), */
++0x81, 0x02, /* Input (Variable), */
++0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
++0x09, 0x00, /* Usage (00h), */
++0x81, 0x02, /* Input (Variable), */
++0xC0, /* End Collection, */
++0xA1, 0x02, /* Collection (Logical), */
++0x09, 0x02, /* Usage (02h), */
++0x95, 0x07, /* Report Count (7), */
++0x91, 0x02, /* Output (Variable), */
++0xC0, /* End Collection, */
++0xC0 /* End Collection */
++};
++
+ /*
+ * Certain Logitech keyboards send in report #3 keys which are far
+ * above the logical maximum described in descriptor. This extends
+@@ -343,6 +392,15 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ }
+ break;
+
++ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
++ if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
++ hid_info(hdev,
++ "fixing up Logitech Momo Racing Force (Black) report descriptor\n");
++ rdesc = momo2_rdesc_fixed;
++ *rsize = sizeof(momo2_rdesc_fixed);
++ }
++ break;
++
+ case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
+ if (*rsize == FV_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 15e9b57e9cf0..40203ada635e 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+ int offset;
+
+ BUG_ON(!domain->pgd);
+- BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
++
++ if (addr_width < BITS_PER_LONG && pfn >> addr_width)
++ /* Address beyond IOMMU's addressing capabilities. */
++ return NULL;
++
+ parent = domain->pgd;
+
+ while (level > 0) {
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index f71673dbb23d..b97d70b1abe0 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void)
+ if (disable_irq_remap)
+ return 0;
+ if (irq_remap_broken) {
+- WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+- "This system BIOS has enabled interrupt remapping\n"
+- "on a chipset that contains an erratum making that\n"
+- "feature unstable. To maintain system stability\n"
+- "interrupt remapping is being disabled. Please\n"
+- "contact your BIOS vendor for an update\n");
++ printk(KERN_WARNING
++ "This system BIOS has enabled interrupt remapping\n"
++ "on a chipset that contains an erratum making that\n"
++ "feature unstable. To maintain system stability\n"
++ "interrupt remapping is being disabled. Please\n"
++ "contact your BIOS vendor for an update\n");
++ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ disable_irq_remap = 1;
+ return 0;
+ }
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index baf2686aa8eb..02125e6a9109 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
+ spin_unlock_irqrestore(&card->isdnloop_lock, flags);
+ return -ENOMEM;
+ }
+- for (i = 0; i < 3; i++)
+- strcpy(card->s0num[i], sdef.num[i]);
++ for (i = 0; i < 3; i++) {
++ strlcpy(card->s0num[i], sdef.num[i],
++ sizeof(card->s0num[0]));
++ }
+ break;
+ case ISDN_PTYPE_1TR6:
+ if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
+@@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
+ spin_unlock_irqrestore(&card->isdnloop_lock, flags);
+ return -ENOMEM;
+ }
+- strcpy(card->s0num[0], sdef.num[0]);
++ strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
+ card->s0num[1][0] = '\0';
+ card->s0num[2][0] = '\0';
+ break;
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index e47dcb9d1e91..5cefb479c707 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ {
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+- struct sockaddr_mISDN *maddr;
+
+ int copied, err;
+
+@@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return err;
+
+- if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
+- msg->msg_namelen = sizeof(struct sockaddr_mISDN);
+- maddr = (struct sockaddr_mISDN *)msg->msg_name;
++ if (msg->msg_name) {
++ struct sockaddr_mISDN *maddr = msg->msg_name;
++
+ maddr->family = AF_ISDN;
+ maddr->dev = _pms(sk)->dev->id;
+ if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
+@@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ maddr->sapi = _pms(sk)->ch.addr & 0xFF;
+ maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
+ }
+- } else {
+- if (msg->msg_namelen)
+- printk(KERN_WARNING "%s: too small namelen %d\n",
+- __func__, msg->msg_namelen);
+- msg->msg_namelen = 0;
++ msg->msg_namelen = sizeof(*maddr);
+ }
+
+ copied = skb->len + MISDN_HEADER_LEN;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index e4109f618c7b..8a0665d04567 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5214,15 +5214,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
+ return 0;
+ }
+
+-static int alloc_thread_groups(struct r5conf *conf, int cnt);
++static int alloc_thread_groups(struct r5conf *conf, int cnt,
++ int *group_cnt,
++ int *worker_cnt_per_group,
++ struct r5worker_group **worker_groups);
+ static ssize_t
+ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+ {
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+ int err;
+- struct r5worker_group *old_groups;
+- int old_group_cnt;
++ struct r5worker_group *new_groups, *old_groups;
++ int group_cnt, worker_cnt_per_group;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+@@ -5238,17 +5241,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+ mddev_suspend(mddev);
+
+ old_groups = conf->worker_groups;
+- old_group_cnt = conf->worker_cnt_per_group;
+-
+ if (old_groups)
+ flush_workqueue(raid5_wq);
+
+- conf->worker_groups = NULL;
+- err = alloc_thread_groups(conf, new);
+- if (err) {
+- conf->worker_groups = old_groups;
+- conf->worker_cnt_per_group = old_group_cnt;
+- } else {
++ err = alloc_thread_groups(conf, new,
++ &group_cnt, &worker_cnt_per_group,
++ &new_groups);
++ if (!err) {
++ spin_lock_irq(&conf->device_lock);
++ conf->group_cnt = group_cnt;
++ conf->worker_cnt_per_group = worker_cnt_per_group;
++ conf->worker_groups = new_groups;
++ spin_unlock_irq(&conf->device_lock);
++
+ if (old_groups)
+ kfree(old_groups[0].workers);
+ kfree(old_groups);
+@@ -5278,33 +5283,36 @@ static struct attribute_group raid5_attrs_group = {
+ .attrs = raid5_attrs,
+ };
+
+-static int alloc_thread_groups(struct r5conf *conf, int cnt)
++static int alloc_thread_groups(struct r5conf *conf, int cnt,
++ int *group_cnt,
++ int *worker_cnt_per_group,
++ struct r5worker_group **worker_groups)
+ {
+ int i, j;
+ ssize_t size;
+ struct r5worker *workers;
+
+- conf->worker_cnt_per_group = cnt;
++ *worker_cnt_per_group = cnt;
+ if (cnt == 0) {
+- conf->worker_groups = NULL;
++ *group_cnt = 0;
++ *worker_groups = NULL;
+ return 0;
+ }
+- conf->group_cnt = num_possible_nodes();
++ *group_cnt = num_possible_nodes();
+ size = sizeof(struct r5worker) * cnt;
+- workers = kzalloc(size * conf->group_cnt, GFP_NOIO);
+- conf->worker_groups = kzalloc(sizeof(struct r5worker_group) *
+- conf->group_cnt, GFP_NOIO);
+- if (!conf->worker_groups || !workers) {
++ workers = kzalloc(size * *group_cnt, GFP_NOIO);
++ *worker_groups = kzalloc(sizeof(struct r5worker_group) *
++ *group_cnt, GFP_NOIO);
++ if (!*worker_groups || !workers) {
+ kfree(workers);
+- kfree(conf->worker_groups);
+- conf->worker_groups = NULL;
++ kfree(*worker_groups);
+ return -ENOMEM;
+ }
+
+- for (i = 0; i < conf->group_cnt; i++) {
++ for (i = 0; i < *group_cnt; i++) {
+ struct r5worker_group *group;
+
+- group = &conf->worker_groups[i];
++ group = &(*worker_groups)[i];
+ INIT_LIST_HEAD(&group->handle_list);
+ group->conf = conf;
+ group->workers = workers + i * cnt;
+@@ -5462,6 +5470,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
+ struct md_rdev *rdev;
+ struct disk_info *disk;
+ char pers_name[6];
++ int group_cnt, worker_cnt_per_group;
++ struct r5worker_group *new_group;
+
+ if (mddev->new_level != 5
+ && mddev->new_level != 4
+@@ -5496,7 +5506,12 @@ static struct r5conf *setup_conf(struct mddev *mddev)
+ if (conf == NULL)
+ goto abort;
+ /* Don't enable multi-threading by default*/
+- if (alloc_thread_groups(conf, 0))
++ if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
++ &new_group)) {
++ conf->group_cnt = group_cnt;
++ conf->worker_cnt_per_group = worker_cnt_per_group;
++ conf->worker_groups = new_group;
++ } else
+ goto abort;
+ spin_lock_init(&conf->device_lock);
+ seqcount_init(&conf->gen_lock);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index e883bfe2e727..dd8057d0bae7 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3395,20 +3395,20 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+
+- ASSERT_RTNL();
+-
++ rcu_read_lock();
+ if (USES_PRIMARY(bond->params.mode)) {
+- slave = rtnl_dereference(bond->curr_active_slave);
++ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave) {
+ dev_uc_sync(slave->dev, bond_dev);
+ dev_mc_sync(slave->dev, bond_dev);
+ }
+ } else {
+- bond_for_each_slave(bond, slave) {
++ bond_for_each_slave_rcu(bond, slave) {
+ dev_uc_sync_multiple(slave->dev, bond_dev);
+ dev_mc_sync_multiple(slave->dev, bond_dev);
+ }
+ }
++ rcu_read_unlock();
+ }
+
+ static int bond_neigh_init(struct neighbour *n)
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index c29b836749b6..b60f95b2196e 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -587,8 +587,9 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ goto out;
+ }
+ if (bond->params.mode == BOND_MODE_ALB ||
+- bond->params.mode == BOND_MODE_TLB) {
+- pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
++ bond->params.mode == BOND_MODE_TLB ||
++ bond->params.mode == BOND_MODE_8023AD) {
++ pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
+ bond->dev->name, bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+@@ -759,6 +760,8 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+ bond->dev->name);
+@@ -792,6 +795,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ }
+
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
+@@ -814,6 +818,8 @@ static ssize_t bonding_store_updelay(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+ bond->dev->name);
+@@ -847,6 +853,7 @@ static ssize_t bonding_store_updelay(struct device *d,
+ }
+
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
+diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
+index 2c210ec35d59..f2f6d85f3788 100644
+--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
+@@ -2890,6 +2890,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
+ PHY_INTERFACE_MODE_GMII);
+ if (!mp->phy)
+ err = -ENODEV;
++ else
++ phy_addr_set(mp, mp->phy->addr);
+ } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
+ mp->phy = phy_scan(mp, pd->phy_addr);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
+index a071cda2dd04..0d087b03a7b0 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
+@@ -264,6 +264,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ mdev->port_cnt++;
+
++ /* Initialize time stamp mechanism */
++ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
++ mlx4_en_init_timestamp(mdev);
++
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ if (!dev->caps.comp_pool) {
+ mdev->profile.prof[i].rx_ring_num =
+@@ -301,10 +305,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
+ mdev->pndev[i] = NULL;
+ }
+
+- /* Initialize time stamp mechanism */
+- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+- mlx4_en_init_timestamp(mdev);
+-
+ return mdev;
+
+ err_mr:
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index d2e591955bdd..0095af50fb81 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp)
+ le32_to_cpu(txd->opts1) & 0xffff,
+ PCI_DMA_TODEVICE);
+
+- bytes_compl += skb->len;
+- pkts_compl++;
+-
+ if (status & LastFrag) {
+ if (status & (TxError | TxFIFOUnder)) {
+ netif_dbg(cp, tx_err, cp->dev,
+@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp)
+ netif_dbg(cp, tx_done, cp->dev,
+ "tx done, slot %d\n", tx_tail);
+ }
++ bytes_compl += skb->len;
++ pkts_compl++;
+ dev_kfree_skb_irq(skb);
+ }
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 3397cee89777..fb3f8dc1b8b1 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
+ rtl_writephy(tp, 0x14, 0x9065);
+ rtl_writephy(tp, 0x14, 0x1065);
+
++ /* Check ALDPS bit, disable it if enabled */
++ rtl_writephy(tp, 0x1f, 0x0a43);
++ if (rtl_readphy(tp, 0x10) & 0x0004)
++ rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
++
+ rtl_writephy(tp, 0x1f, 0x0000);
+ }
+
+diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
+index 98eedb90cdc3..fc3e25ca7135 100644
+--- a/drivers/net/ethernet/smsc/smc91x.h
++++ b/drivers/net/ethernet/smsc/smc91x.h
+@@ -46,7 +46,8 @@
+ defined(CONFIG_MACH_LITTLETON) ||\
+ defined(CONFIG_MACH_ZYLONITE2) ||\
+ defined(CONFIG_ARCH_VIPER) ||\
+- defined(CONFIG_MACH_STARGATE2)
++ defined(CONFIG_MACH_STARGATE2) ||\
++ defined(CONFIG_ARCH_VERSATILE)
+
+ #include <asm/mach-types.h>
+
+@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+ #define SMC_outl(v, a, r) writel(v, (a) + (r))
+ #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+ #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
++#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
++#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+ #define SMC_IRQ_FLAGS (-1) /* from resource */
+
+ /* We actually can't write halfwords properly if not word aligned */
+@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+ #define RPC_LSA_DEFAULT RPC_LED_TX_RX
+ #define RPC_LSB_DEFAULT RPC_LED_100_10
+
+-#elif defined(CONFIG_ARCH_VERSATILE)
+-
+-#define SMC_CAN_USE_8BIT 1
+-#define SMC_CAN_USE_16BIT 1
+-#define SMC_CAN_USE_32BIT 1
+-#define SMC_NOWAIT 1
+-
+-#define SMC_inb(a, r) readb((a) + (r))
+-#define SMC_inw(a, r) readw((a) + (r))
+-#define SMC_inl(a, r) readl((a) + (r))
+-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+-#define SMC_outw(v, a, r) writew(v, (a) + (r))
+-#define SMC_outl(v, a, r) writel(v, (a) + (r))
+-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+-#define SMC_IRQ_FLAGS (-1) /* from resource */
+-
+ #elif defined(CONFIG_MN10300)
+
+ /*
+diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
+index d022bf936572..ad61d26a44f3 100644
+--- a/drivers/net/ethernet/via/via-velocity.c
++++ b/drivers/net/ethernet/via/via-velocity.c
+@@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget)
+ unsigned int rx_done;
+ unsigned long flags;
+
+- spin_lock_irqsave(&vptr->lock, flags);
+ /*
+ * Do rx and tx twice for performance (taken from the VIA
+ * out-of-tree driver).
+ */
+- rx_done = velocity_rx_srv(vptr, budget / 2);
+- velocity_tx_srv(vptr);
+- rx_done += velocity_rx_srv(vptr, budget - rx_done);
++ rx_done = velocity_rx_srv(vptr, budget);
++ spin_lock_irqsave(&vptr->lock, flags);
+ velocity_tx_srv(vptr);
+-
+ /* If budget not fully consumed, exit the polling mode */
+ if (rx_done < budget) {
+ napi_complete(napi);
+@@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
+ if (ret < 0)
+ goto out_free_tmp_vptr_1;
+
++ napi_disable(&vptr->napi);
++
+ spin_lock_irqsave(&vptr->lock, flags);
+
+ netif_stop_queue(dev);
+@@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
+
+ velocity_give_many_rx_descs(vptr);
+
++ napi_enable(&vptr->napi);
++
+ mac_enable_int(vptr->mac_regs);
+ netif_start_queue(dev);
+
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 9dccb1edfd2a..dc76670c2f2a 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -628,6 +628,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ const struct iovec *iv, unsigned long total_len,
+ size_t count, int noblock)
+ {
++ int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+ struct sk_buff *skb;
+ struct macvlan_dev *vlan;
+ unsigned long len = total_len;
+@@ -670,6 +671,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+ copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
++ if (copylen > good_linear)
++ copylen = good_linear;
+ linear = copylen;
+ if (iov_pages(iv, vnet_hdr_len + copylen, count)
+ <= MAX_SKB_FRAGS)
+@@ -678,7 +681,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+
+ if (!zerocopy) {
+ copylen = len;
+- linear = vnet_hdr.hdr_len;
++ if (vnet_hdr.hdr_len > good_linear)
++ linear = good_linear;
++ else
++ linear = vnet_hdr.hdr_len;
+ }
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 5f66e30d9823..82ee6ed954cb 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -979,8 +979,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (error < 0)
+ goto end;
+
+- m->msg_namelen = 0;
+-
+ if (skb) {
+ total_len = min_t(size_t, total_len, skb->len);
+ error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 50e43e64d51d..6327df255404 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team,
+ return 0;
+ }
+
++static void __team_carrier_check(struct team *team);
++
+ static int team_user_linkup_option_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+ {
+@@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team,
+
+ port->user.linkup = ctx->data.bool_val;
+ team_refresh_port_linkup(port);
++ __team_carrier_check(port->team);
+ return 0;
+ }
+
+@@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team,
+
+ port->user.linkup_enabled = ctx->data.bool_val;
+ team_refresh_port_linkup(port);
++ __team_carrier_check(port->team);
+ return 0;
+ }
+
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7cb105c103fe..782e38bfc1ee 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ struct sk_buff *skb;
+ size_t len = total_len, align = NET_SKB_PAD, linear;
+ struct virtio_net_hdr gso = { 0 };
++ int good_linear;
+ int offset = 0;
+ int copylen;
+ bool zerocopy = false;
+@@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ return -EINVAL;
+ }
+
++ good_linear = SKB_MAX_HEAD(align);
++
+ if (msg_control) {
+ /* There are 256 bytes to be copied in skb, so there is
+ * enough room for skb expand head in case it is used.
+ * The rest of the buffer is mapped from userspace.
+ */
+ copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
++ if (copylen > good_linear)
++ copylen = good_linear;
+ linear = copylen;
+ if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
+ zerocopy = true;
+@@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+
+ if (!zerocopy) {
+ copylen = len;
+- linear = gso.hdr_len;
++ if (gso.hdr_len > good_linear)
++ linear = good_linear;
++ else
++ linear = gso.hdr_len;
+ }
+
+ skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 90a429b7ebad..8494bb53ebdc 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -204,9 +204,6 @@ static void intr_complete (struct urb *urb)
+ break;
+ }
+
+- if (!netif_running (dev->net))
+- return;
+-
+ status = usb_submit_urb (urb, GFP_ATOMIC);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
+index fa33b5edf931..e3eb95292a7f 100644
+--- a/drivers/net/wireless/rt2x00/rt2800.h
++++ b/drivers/net/wireless/rt2x00/rt2800.h
+@@ -52,6 +52,7 @@
+ * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
+ * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5592 2.4G/5G 2T2R
++ * RF3070 2.4G 1T1R
+ * RF5360 2.4G 1T1R
+ * RF5370 2.4G 1T1R
+ * RF5390 2.4G 1T1R
+@@ -70,6 +71,7 @@
+ #define RF3322 0x000c
+ #define RF3053 0x000d
+ #define RF5592 0x000f
++#define RF3070 0x3070
+ #define RF3290 0x3290
+ #define RF5360 0x5360
+ #define RF5370 0x5370
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 14007870302b..446eadeaaef6 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -3152,6 +3152,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ case RF3322:
+ rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
+ break;
++ case RF3070:
+ case RF5360:
+ case RF5370:
+ case RF5372:
+@@ -3166,7 +3167,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
+ }
+
+- if (rt2x00_rf(rt2x00dev, RF3290) ||
++ if (rt2x00_rf(rt2x00dev, RF3070) ||
++ rt2x00_rf(rt2x00dev, RF3290) ||
+ rt2x00_rf(rt2x00dev, RF3322) ||
+ rt2x00_rf(rt2x00dev, RF5360) ||
+ rt2x00_rf(rt2x00dev, RF5370) ||
+@@ -4264,6 +4266,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
+ rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
+ break;
+ case RF3053:
++ case RF3070:
+ case RF3290:
+ case RF5360:
+ case RF5370:
+@@ -7024,6 +7027,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+ case RF3022:
+ case RF3052:
+ case RF3053:
++ case RF3070:
+ case RF3290:
+ case RF3320:
+ case RF3322:
+@@ -7546,6 +7550,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022) ||
++ rt2x00_rf(rt2x00dev, RF3070) ||
+ rt2x00_rf(rt2x00dev, RF3290) ||
+ rt2x00_rf(rt2x00dev, RF3320) ||
+ rt2x00_rf(rt2x00dev, RF3322) ||
+@@ -7674,6 +7679,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ case RF3320:
+ case RF3052:
+ case RF3053:
++ case RF3070:
+ case RF3290:
+ case RF5360:
+ case RF5370:
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 900da4b243ad..625585034ef4 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -39,6 +39,7 @@
+ #include <linux/udp.h>
+
+ #include <net/tcp.h>
++#include <net/ip6_checksum.h>
+
+ #include <xen/xen.h>
+ #include <xen/events.h>
+diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
+index ed0834e2b72e..ab69245f86dc 100644
+--- a/drivers/usb/musb/davinci.c
++++ b/drivers/usb/musb/davinci.c
+@@ -509,7 +509,7 @@ static u64 davinci_dmamask = DMA_BIT_MASK(32);
+
+ static int davinci_probe(struct platform_device *pdev)
+ {
+- struct resource musb_resources[2];
++ struct resource musb_resources[3];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct davinci_glue *glue;
+@@ -567,6 +567,15 @@ static int davinci_probe(struct platform_device *pdev)
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
++ /*
++ * For DM6467 3 resources are passed. A placeholder for the 3rd
++ * resource is always there, so it's safe to always copy it...
++ */
++ musb_resources[2].name = pdev->resource[2].name;
++ musb_resources[2].start = pdev->resource[2].start;
++ musb_resources[2].end = pdev->resource[2].end;
++ musb_resources[2].flags = pdev->resource[2].flags;
++
+ ret = platform_device_add_resources(musb, musb_resources,
+ ARRAY_SIZE(musb_resources));
+ if (ret) {
+diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
+index 6ad02f57c366..3dcf66f345e9 100644
+--- a/drivers/usb/wusbcore/wa-xfer.c
++++ b/drivers/usb/wusbcore/wa-xfer.c
+@@ -91,7 +91,8 @@
+ #include "wusbhc.h"
+
+ enum {
+- WA_SEGS_MAX = 255,
++ /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
++ WA_SEGS_MAX = 128,
+ };
+
+ enum wa_seg_status {
+@@ -446,7 +447,7 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
+ }
+ xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
+ xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
+- if (xfer->segs >= WA_SEGS_MAX) {
++ if (xfer->segs > WA_SEGS_MAX) {
+ dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
+ (int)(urb->transfer_buffer_length / xfer->seg_size),
+ WA_SEGS_MAX);
+diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
+index 6157f74ac600..ec7fc87fa5ab 100644
+--- a/drivers/video/kyro/fbdev.c
++++ b/drivers/video/kyro/fbdev.c
+@@ -625,15 +625,15 @@ static int kyrofb_ioctl(struct fb_info *info,
+ }
+ break;
+ case KYRO_IOCTL_UVSTRIDE:
+- if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long)))
++ if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(deviceInfo.ulOverlayUVStride)))
+ return -EFAULT;
+ break;
+ case KYRO_IOCTL_STRIDE:
+- if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(unsigned long)))
++ if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(deviceInfo.ulOverlayStride)))
+ return -EFAULT;
+ break;
+ case KYRO_IOCTL_OVERLAY_OFFSET:
+- if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(unsigned long)))
++ if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(deviceInfo.ulOverlayOffset)))
+ return -EFAULT;
+ break;
+ }
+diff --git a/fs/aio.c b/fs/aio.c
+index 067e3d340c35..6efb7f6cb22e 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -36,10 +36,10 @@
+ #include <linux/eventfd.h>
+ #include <linux/blkdev.h>
+ #include <linux/compat.h>
+-#include <linux/anon_inodes.h>
+ #include <linux/migrate.h>
+ #include <linux/ramfs.h>
+ #include <linux/percpu-refcount.h>
++#include <linux/mount.h>
+
+ #include <asm/kmap_types.h>
+ #include <asm/uaccess.h>
+@@ -80,6 +80,8 @@ struct kioctx {
+ struct percpu_ref users;
+ atomic_t dead;
+
++ struct percpu_ref reqs;
++
+ unsigned long user_id;
+
+ struct __percpu kioctx_cpu *cpu;
+@@ -107,7 +109,6 @@ struct kioctx {
+ struct page **ring_pages;
+ long nr_pages;
+
+- struct rcu_head rcu_head;
+ struct work_struct free_work;
+
+ struct {
+@@ -152,12 +153,67 @@ unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio request
+ static struct kmem_cache *kiocb_cachep;
+ static struct kmem_cache *kioctx_cachep;
+
++static struct vfsmount *aio_mnt;
++
++static const struct file_operations aio_ring_fops;
++static const struct address_space_operations aio_ctx_aops;
++
++static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
++{
++ struct qstr this = QSTR_INIT("[aio]", 5);
++ struct file *file;
++ struct path path;
++ struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
++ if (IS_ERR(inode))
++ return ERR_CAST(inode);
++
++ inode->i_mapping->a_ops = &aio_ctx_aops;
++ inode->i_mapping->private_data = ctx;
++ inode->i_size = PAGE_SIZE * nr_pages;
++
++ path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
++ if (!path.dentry) {
++ iput(inode);
++ return ERR_PTR(-ENOMEM);
++ }
++ path.mnt = mntget(aio_mnt);
++
++ d_instantiate(path.dentry, inode);
++ file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops);
++ if (IS_ERR(file)) {
++ path_put(&path);
++ return file;
++ }
++
++ file->f_flags = O_RDWR;
++ file->private_data = ctx;
++ return file;
++}
++
++static struct dentry *aio_mount(struct file_system_type *fs_type,
++ int flags, const char *dev_name, void *data)
++{
++ static const struct dentry_operations ops = {
++ .d_dname = simple_dname,
++ };
++ return mount_pseudo(fs_type, "aio:", NULL, &ops, 0xa10a10a1);
++}
++
+ /* aio_setup
+ * Creates the slab caches used by the aio routines, panic on
+ * failure as this is done early during the boot sequence.
+ */
+ static int __init aio_setup(void)
+ {
++ static struct file_system_type aio_fs = {
++ .name = "aio",
++ .mount = aio_mount,
++ .kill_sb = kill_anon_super,
++ };
++ aio_mnt = kern_mount(&aio_fs);
++ if (IS_ERR(aio_mnt))
++ panic("Failed to create aio fs mount.");
++
+ kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+
+@@ -195,8 +251,10 @@ static void aio_free_ring(struct kioctx *ctx)
+
+ put_aio_ring_file(ctx);
+
+- if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
++ if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
+ kfree(ctx->ring_pages);
++ ctx->ring_pages = NULL;
++ }
+ }
+
+ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+@@ -283,16 +341,12 @@ static int aio_setup_ring(struct kioctx *ctx)
+ if (nr_pages < 0)
+ return -EINVAL;
+
+- file = anon_inode_getfile_private("[aio]", &aio_ring_fops, ctx, O_RDWR);
++ file = aio_private_file(ctx, nr_pages);
+ if (IS_ERR(file)) {
+ ctx->aio_ring_file = NULL;
+ return -EAGAIN;
+ }
+
+- file->f_inode->i_mapping->a_ops = &aio_ctx_aops;
+- file->f_inode->i_mapping->private_data = ctx;
+- file->f_inode->i_size = PAGE_SIZE * (loff_t)nr_pages;
+-
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page;
+ page = find_or_create_page(file->f_inode->i_mapping,
+@@ -313,8 +367,10 @@ static int aio_setup_ring(struct kioctx *ctx)
+ if (nr_pages > AIO_RING_PAGES) {
+ ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+- if (!ctx->ring_pages)
++ if (!ctx->ring_pages) {
++ put_aio_ring_file(ctx);
+ return -ENOMEM;
++ }
+ }
+
+ ctx->mmap_size = nr_pages * PAGE_SIZE;
+@@ -412,26 +468,34 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
+ return cancel(kiocb);
+ }
+
+-static void free_ioctx_rcu(struct rcu_head *head)
++static void free_ioctx(struct work_struct *work)
+ {
+- struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
++ struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+
++ pr_debug("freeing %p\n", ctx);
++
++ aio_free_ring(ctx);
+ free_percpu(ctx->cpu);
+ kmem_cache_free(kioctx_cachep, ctx);
+ }
+
++static void free_ioctx_reqs(struct percpu_ref *ref)
++{
++ struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
++
++ INIT_WORK(&ctx->free_work, free_ioctx);
++ schedule_work(&ctx->free_work);
++}
++
+ /*
+ * When this function runs, the kioctx has been removed from the "hash table"
+ * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
+ * now it's safe to cancel any that need to be.
+ */
+-static void free_ioctx(struct work_struct *work)
++static void free_ioctx_users(struct percpu_ref *ref)
+ {
+- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+- struct aio_ring *ring;
++ struct kioctx *ctx = container_of(ref, struct kioctx, users);
+ struct kiocb *req;
+- unsigned cpu, avail;
+- DEFINE_WAIT(wait);
+
+ spin_lock_irq(&ctx->ctx_lock);
+
+@@ -445,54 +509,8 @@ static void free_ioctx(struct work_struct *work)
+
+ spin_unlock_irq(&ctx->ctx_lock);
+
+- for_each_possible_cpu(cpu) {
+- struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu);
+-
+- atomic_add(kcpu->reqs_available, &ctx->reqs_available);
+- kcpu->reqs_available = 0;
+- }
+-
+- while (1) {
+- prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+-
+- ring = kmap_atomic(ctx->ring_pages[0]);
+- avail = (ring->head <= ring->tail)
+- ? ring->tail - ring->head
+- : ctx->nr_events - ring->head + ring->tail;
+-
+- atomic_add(avail, &ctx->reqs_available);
+- ring->head = ring->tail;
+- kunmap_atomic(ring);
+-
+- if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
+- break;
+-
+- schedule();
+- }
+- finish_wait(&ctx->wait, &wait);
+-
+- WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
+-
+- aio_free_ring(ctx);
+-
+- pr_debug("freeing %p\n", ctx);
+-
+- /*
+- * Here the call_rcu() is between the wait_event() for reqs_active to
+- * hit 0, and freeing the ioctx.
+- *
+- * aio_complete() decrements reqs_active, but it has to touch the ioctx
+- * after to issue a wakeup so we use rcu.
+- */
+- call_rcu(&ctx->rcu_head, free_ioctx_rcu);
+-}
+-
+-static void free_ioctx_ref(struct percpu_ref *ref)
+-{
+- struct kioctx *ctx = container_of(ref, struct kioctx, users);
+-
+- INIT_WORK(&ctx->free_work, free_ioctx);
+- schedule_work(&ctx->free_work);
++ percpu_ref_kill(&ctx->reqs);
++ percpu_ref_put(&ctx->reqs);
+ }
+
+ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+@@ -551,6 +569,16 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+ }
+ }
+
++static void aio_nr_sub(unsigned nr)
++{
++ spin_lock(&aio_nr_lock);
++ if (WARN_ON(aio_nr - nr > aio_nr))
++ aio_nr = 0;
++ else
++ aio_nr -= nr;
++ spin_unlock(&aio_nr_lock);
++}
++
+ /* ioctx_alloc
+ * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
+ */
+@@ -588,8 +616,11 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+
+ ctx->max_reqs = nr_events;
+
+- if (percpu_ref_init(&ctx->users, free_ioctx_ref))
+- goto out_freectx;
++ if (percpu_ref_init(&ctx->users, free_ioctx_users))
++ goto err;
++
++ if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
++ goto err;
+
+ spin_lock_init(&ctx->ctx_lock);
+ spin_lock_init(&ctx->completion_lock);
+@@ -600,10 +631,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+
+ ctx->cpu = alloc_percpu(struct kioctx_cpu);
+ if (!ctx->cpu)
+- goto out_freeref;
++ goto err;
+
+ if (aio_setup_ring(ctx) < 0)
+- goto out_freepcpu;
++ goto err;
+
+ atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
+ ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
+@@ -615,7 +646,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+ if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
+ aio_nr + nr_events < aio_nr) {
+ spin_unlock(&aio_nr_lock);
+- goto out_cleanup;
++ err = -EAGAIN;
++ goto err_ctx;
+ }
+ aio_nr += ctx->max_reqs;
+ spin_unlock(&aio_nr_lock);
+@@ -624,23 +656,20 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+
+ err = ioctx_add_table(ctx, mm);
+ if (err)
+- goto out_cleanup_put;
++ goto err_cleanup;
+
+ pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
+ ctx, ctx->user_id, mm, ctx->nr_events);
+ return ctx;
+
+-out_cleanup_put:
+- percpu_ref_put(&ctx->users);
+-out_cleanup:
+- err = -EAGAIN;
++err_cleanup:
++ aio_nr_sub(ctx->max_reqs);
++err_ctx:
+ aio_free_ring(ctx);
+-out_freepcpu:
++err:
+ free_percpu(ctx->cpu);
+-out_freeref:
++ free_percpu(ctx->reqs.pcpu_count);
+ free_percpu(ctx->users.pcpu_count);
+-out_freectx:
+- put_aio_ring_file(ctx);
+ kmem_cache_free(kioctx_cachep, ctx);
+ pr_debug("error allocating ioctx %d\n", err);
+ return ERR_PTR(err);
+@@ -675,10 +704,7 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+ * -EAGAIN with no ioctxs actually in use (as far as userspace
+ * could tell).
+ */
+- spin_lock(&aio_nr_lock);
+- BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
+- aio_nr -= ctx->max_reqs;
+- spin_unlock(&aio_nr_lock);
++ aio_nr_sub(ctx->max_reqs);
+
+ if (ctx->mmap_size)
+ vm_munmap(ctx->mmap_base, ctx->mmap_size);
+@@ -810,6 +836,8 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
+ if (unlikely(!req))
+ goto out_put;
+
++ percpu_ref_get(&ctx->reqs);
++
+ req->ki_ctx = ctx;
+ return req;
+ out_put:
+@@ -879,12 +907,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
+ return;
+ }
+
+- /*
+- * Take rcu_read_lock() in case the kioctx is being destroyed, as we
+- * need to issue a wakeup after incrementing reqs_available.
+- */
+- rcu_read_lock();
+-
+ if (iocb->ki_list.next) {
+ unsigned long flags;
+
+@@ -959,7 +981,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+
+- rcu_read_unlock();
++ percpu_ref_put(&ctx->reqs);
+ }
+ EXPORT_SYMBOL(aio_complete);
+
+@@ -1370,6 +1392,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+ return 0;
+ out_put_req:
+ put_reqs_available(ctx, 1);
++ percpu_ref_put(&ctx->reqs);
+ kiocb_free(req);
+ return ret;
+ }
+diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
+index 85c961849953..22f9698a1214 100644
+--- a/fs/anon_inodes.c
++++ b/fs/anon_inodes.c
+@@ -24,7 +24,6 @@
+
+ static struct vfsmount *anon_inode_mnt __read_mostly;
+ static struct inode *anon_inode_inode;
+-static const struct file_operations anon_inode_fops;
+
+ /*
+ * anon_inodefs_dname() is called from d_path().
+@@ -39,51 +38,6 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
+ .d_dname = anon_inodefs_dname,
+ };
+
+-/*
+- * nop .set_page_dirty method so that people can use .page_mkwrite on
+- * anon inodes.
+- */
+-static int anon_set_page_dirty(struct page *page)
+-{
+- return 0;
+-};
+-
+-static const struct address_space_operations anon_aops = {
+- .set_page_dirty = anon_set_page_dirty,
+-};
+-
+-/*
+- * A single inode exists for all anon_inode files. Contrary to pipes,
+- * anon_inode inodes have no associated per-instance data, so we need
+- * only allocate one of them.
+- */
+-static struct inode *anon_inode_mkinode(struct super_block *s)
+-{
+- struct inode *inode = new_inode_pseudo(s);
+-
+- if (!inode)
+- return ERR_PTR(-ENOMEM);
+-
+- inode->i_ino = get_next_ino();
+- inode->i_fop = &anon_inode_fops;
+-
+- inode->i_mapping->a_ops = &anon_aops;
+-
+- /*
+- * Mark the inode dirty from the very beginning,
+- * that way it will never be moved to the dirty
+- * list because mark_inode_dirty() will think
+- * that it already _is_ on the dirty list.
+- */
+- inode->i_state = I_DIRTY;
+- inode->i_mode = S_IRUSR | S_IWUSR;
+- inode->i_uid = current_fsuid();
+- inode->i_gid = current_fsgid();
+- inode->i_flags |= S_PRIVATE;
+- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+- return inode;
+-}
+-
+ static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
+@@ -92,7 +46,7 @@ static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
+ &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
+ if (!IS_ERR(root)) {
+ struct super_block *s = root->d_sb;
+- anon_inode_inode = anon_inode_mkinode(s);
++ anon_inode_inode = alloc_anon_inode(s);
+ if (IS_ERR(anon_inode_inode)) {
+ dput(root);
+ deactivate_locked_super(s);
+@@ -134,7 +88,7 @@ struct file *anon_inode_getfile_private(const char *name,
+ if (fops->owner && !try_module_get(fops->owner))
+ return ERR_PTR(-ENOENT);
+
+- inode = anon_inode_mkinode(anon_inode_mnt->mnt_sb);
++ inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+ if (IS_ERR(inode)) {
+ file = ERR_PTR(-ENOMEM);
+ goto err_module;
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 3a3a9b53bf5a..193e0c29fb94 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -993,3 +993,46 @@ EXPORT_SYMBOL_GPL(simple_attr_open);
+ EXPORT_SYMBOL_GPL(simple_attr_release);
+ EXPORT_SYMBOL_GPL(simple_attr_read);
+ EXPORT_SYMBOL_GPL(simple_attr_write);
++
++/*
++ * nop .set_page_dirty method so that people can use .page_mkwrite on
++ * anon inodes.
++ */
++static int anon_set_page_dirty(struct page *page)
++{
++ return 0;
++};
++
++/*
++ * A single inode exists for all anon_inode files. Contrary to pipes,
++ * anon_inode inodes have no associated per-instance data, so we need
++ * only allocate one of them.
++ */
++struct inode *alloc_anon_inode(struct super_block *s)
++{
++ static const struct address_space_operations anon_aops = {
++ .set_page_dirty = anon_set_page_dirty,
++ };
++ struct inode *inode = new_inode_pseudo(s);
++
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++
++ inode->i_ino = get_next_ino();
++ inode->i_mapping->a_ops = &anon_aops;
++
++ /*
++ * Mark the inode dirty from the very beginning,
++ * that way it will never be moved to the dirty
++ * list because mark_inode_dirty() will think
++ * that it already _is_ on the dirty list.
++ */
++ inode->i_state = I_DIRTY;
++ inode->i_mode = S_IRUSR | S_IWUSR;
++ inode->i_uid = current_fsuid();
++ inode->i_gid = current_fsgid();
++ inode->i_flags |= S_PRIVATE;
++ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
++ return inode;
++}
++EXPORT_SYMBOL(alloc_anon_inode);
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 668e8f4ccf5e..2e1e6c33841d 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1717,6 +1717,12 @@ xfs_file_ioctl(
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
+ return -XFS_ERROR(EROFS);
+
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (mp->m_flags & XFS_MOUNT_RDONLY)
++ return -XFS_ERROR(EROFS);
++
+ if (copy_from_user(&eofb, arg, sizeof(eofb)))
+ return -XFS_ERROR(EFAULT);
+
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 3f40547ba191..fefa7b00ba42 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2562,6 +2562,7 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
+ extern int simple_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
++extern struct inode *alloc_anon_inode(struct super_block *);
+
+ extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
+ extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
+diff --git a/include/linux/net.h b/include/linux/net.h
+index 4f27575ce1d6..8bd9d926b3cf 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -163,6 +163,14 @@ struct proto_ops {
+ #endif
+ int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len);
++ /* Notes for implementing recvmsg:
++ * ===============================
++ * msg->msg_namelen should get updated by the recvmsg handlers
++ * iff msg_name != NULL. It is by default 0 to prevent
++ * returning uninitialized memory to user space. The recvfrom
++ * handlers can assume that msg.msg_name is either NULL or has
++ * a minimum size of sizeof(struct sockaddr_storage).
++ */
+ int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags);
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 6312dd9ba449..bf9085e89fb5 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -50,9 +50,9 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+ {
+ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+- state->s1 = __seed(i, 1);
+- state->s2 = __seed(i, 7);
+- state->s3 = __seed(i, 15);
++ state->s1 = __seed(i, 2);
++ state->s2 = __seed(i, 8);
++ state->s3 = __seed(i, 16);
+ }
+
+ #ifdef CONFIG_ARCH_RANDOM
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index c2d89335f637..f66f346dd164 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -333,11 +333,6 @@ typedef unsigned int sk_buff_data_t;
+ typedef unsigned char *sk_buff_data_t;
+ #endif
+
+-#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
+- defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+-#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
+-#endif
+-
+ /**
+ * struct sk_buff - socket buffer
+ * @next: Next buffer in list
+@@ -370,7 +365,6 @@ typedef unsigned char *sk_buff_data_t;
+ * @protocol: Packet protocol from driver
+ * @destructor: Destruct function
+ * @nfct: Associated connection, if any
+- * @nfct_reasm: netfilter conntrack re-assembly pointer
+ * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
+ * @skb_iif: ifindex of device we arrived on
+ * @tc_index: Traffic control index
+@@ -459,9 +453,6 @@ struct sk_buff {
+ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct nf_conntrack *nfct;
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- struct sk_buff *nfct_reasm;
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ struct nf_bridge_info *nf_bridge;
+ #endif
+@@ -2605,18 +2596,6 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
+ atomic_inc(&nfct->use);
+ }
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+-static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
+-{
+- if (skb)
+- atomic_inc(&skb->users);
+-}
+-static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
+-{
+- if (skb)
+- kfree_skb(skb);
+-}
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+ {
+@@ -2635,10 +2614,6 @@ static inline void nf_reset(struct sk_buff *skb)
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = NULL;
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- nf_conntrack_put_reasm(skb->nfct_reasm);
+- skb->nfct_reasm = NULL;
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ nf_bridge_put(skb->nf_bridge);
+ skb->nf_bridge = NULL;
+@@ -2660,10 +2635,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+ nf_conntrack_get(src->nfct);
+ dst->nfctinfo = src->nfctinfo;
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- dst->nfct_reasm = src->nfct_reasm;
+- nf_conntrack_get_reasm(src->nfct_reasm);
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ dst->nf_bridge = src->nf_bridge;
+ nf_bridge_get(src->nf_bridge);
+@@ -2675,9 +2646,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ nf_conntrack_put(dst->nfct);
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- nf_conntrack_put_reasm(dst->nfct_reasm);
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ nf_bridge_put(dst->nf_bridge);
+ #endif
+diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
+index 1855f0a22add..c557c6d096de 100644
+--- a/include/linux/vm_event_item.h
++++ b/include/linux/vm_event_item.h
+@@ -39,6 +39,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+ PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+ #ifdef CONFIG_NUMA_BALANCING
+ NUMA_PTE_UPDATES,
++ NUMA_HUGE_PTE_UPDATES,
+ NUMA_HINT_FAULTS,
+ NUMA_HINT_FAULTS_LOCAL,
+ NUMA_PAGE_MIGRATE,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 5e5268807a1c..301f10c9b563 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -464,7 +464,7 @@ extern int compat_ip_getsockopt(struct sock *sk, int level,
+ int optname, char __user *optval, int __user *optlen);
+ extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
+
+-extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
++extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
+ extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload);
+ extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 9c4d37ec45a1..772252ddc115 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -109,7 +109,6 @@ extern int ip_vs_conn_tab_size;
+ struct ip_vs_iphdr {
+ __u32 len; /* IPv4 simply where L4 starts
+ IPv6 where L4 Transport Header starts */
+- __u32 thoff_reasm; /* Transport Header Offset in nfct_reasm skb */
+ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
+ __s16 protocol;
+ __s32 flags;
+@@ -117,34 +116,12 @@ struct ip_vs_iphdr {
+ union nf_inet_addr daddr;
+ };
+
+-/* Dependency to module: nf_defrag_ipv6 */
+-#if defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+-static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
+-{
+- return skb->nfct_reasm;
+-}
+-static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
+- int len, void *buffer,
+- const struct ip_vs_iphdr *ipvsh)
+-{
+- if (unlikely(ipvsh->fragoffs && skb_nfct_reasm(skb)))
+- return skb_header_pointer(skb_nfct_reasm(skb),
+- ipvsh->thoff_reasm, len, buffer);
+-
+- return skb_header_pointer(skb, offset, len, buffer);
+-}
+-#else
+-static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
+-{
+- return NULL;
+-}
+ static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
+ int len, void *buffer,
+ const struct ip_vs_iphdr *ipvsh)
+ {
+ return skb_header_pointer(skb, offset, len, buffer);
+ }
+-#endif
+
+ static inline void
+ ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
+@@ -171,19 +148,12 @@ ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr)
+ (struct ipv6hdr *)skb_network_header(skb);
+ iphdr->saddr.in6 = iph->saddr;
+ iphdr->daddr.in6 = iph->daddr;
+- /* ipv6_find_hdr() updates len, flags, thoff_reasm */
+- iphdr->thoff_reasm = 0;
++ /* ipv6_find_hdr() updates len, flags */
+ iphdr->len = 0;
+ iphdr->flags = 0;
+ iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1,
+ &iphdr->fragoffs,
+ &iphdr->flags);
+- /* get proto from re-assembled packet and it's offset */
+- if (skb_nfct_reasm(skb))
+- iphdr->protocol = ipv6_find_hdr(skb_nfct_reasm(skb),
+- &iphdr->thoff_reasm,
+- -1, NULL, NULL);
+-
+ } else
+ #endif
+ {
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index bbf1c8fb8511..1f96efd30816 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -802,8 +802,10 @@ extern int compat_ipv6_getsockopt(struct sock *sk,
+ extern int ip6_datagram_connect(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
+
+-extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+-extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
++extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
++extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
+ extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+ extern void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+index fd79c9a1779d..17920d847b40 100644
+--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
++++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+@@ -6,10 +6,7 @@ extern void nf_defrag_ipv6_enable(void);
+ extern int nf_ct_frag6_init(void);
+ extern void nf_ct_frag6_cleanup(void);
+ extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+- struct net_device *in,
+- struct net_device *out,
+- int (*okfn)(struct sk_buff *));
++extern void nf_ct_frag6_consume_orig(struct sk_buff *skb);
+
+ struct inet_frags_ctl;
+
+diff --git a/include/net/ping.h b/include/net/ping.h
+index 5db0224b73ac..2b496e9f9ebd 100644
+--- a/include/net/ping.h
++++ b/include/net/ping.h
+@@ -31,7 +31,8 @@
+
+ /* Compatibility glue so we can support IPv6 when it's compiled as a module */
+ struct pingv6_ops {
+- int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len);
++ int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
+ int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb);
+ int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
+diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
+index 9b829134d422..66f925d3c07b 100644
+--- a/include/uapi/linux/pkt_sched.h
++++ b/include/uapi/linux/pkt_sched.h
+@@ -759,13 +759,14 @@ enum {
+
+ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
+
+- TCA_FQ_FLOW_DEFAULT_RATE,/* for sockets with unspecified sk_rate,
+- * use the following rate
+- */
++ TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
+
+ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
+
+ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
++
++ TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
++
+ __TCA_FQ_MAX
+ };
+
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index bb2215174f05..af8d1d4f3d55 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -475,6 +475,7 @@ static void sync_cmos_clock(struct work_struct *work)
+ * called as close as possible to 500 ms before the new second starts.
+ * This code is run on a timer. If the clock is set, that timer
+ * may not expire at the correct time. Thus, we adjust...
++ * We want the clock to be within a couple of ticks from the target.
+ */
+ if (!ntp_synced()) {
+ /*
+@@ -485,7 +486,7 @@ static void sync_cmos_clock(struct work_struct *work)
+ }
+
+ getnstimeofday(&now);
+- if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) {
++ if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
+ struct timespec adjust = now;
+
+ fail = -ENODEV;
+diff --git a/lib/random32.c b/lib/random32.c
+index 52280d5526be..01e8890d1089 100644
+--- a/lib/random32.c
++++ b/lib/random32.c
+@@ -141,7 +141,7 @@ void prandom_seed(u32 entropy)
+ */
+ for_each_possible_cpu (i) {
+ struct rnd_state *state = &per_cpu(net_rand_state, i);
+- state->s1 = __seed(state->s1 ^ entropy, 1);
++ state->s1 = __seed(state->s1 ^ entropy, 2);
+ }
+ }
+ EXPORT_SYMBOL(prandom_seed);
+@@ -158,9 +158,9 @@ static int __init prandom_init(void)
+ struct rnd_state *state = &per_cpu(net_rand_state,i);
+
+ #define LCG(x) ((x) * 69069) /* super-duper LCG */
+- state->s1 = __seed(LCG(i + jiffies), 1);
+- state->s2 = __seed(LCG(state->s1), 7);
+- state->s3 = __seed(LCG(state->s2), 15);
++ state->s1 = __seed(LCG(i + jiffies), 2);
++ state->s2 = __seed(LCG(state->s1), 8);
++ state->s3 = __seed(LCG(state->s2), 16);
+
+ /* "warm it up" */
+ prandom_u32_state(state);
+@@ -187,9 +187,9 @@ static int __init prandom_reseed(void)
+ u32 seeds[3];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+- state->s1 = __seed(seeds[0], 1);
+- state->s2 = __seed(seeds[1], 7);
+- state->s3 = __seed(seeds[2], 15);
++ state->s1 = __seed(seeds[0], 2);
++ state->s2 = __seed(seeds[1], 8);
++ state->s3 = __seed(seeds[2], 16);
+
+ /* mix it in */
+ prandom_u32_state(state);
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 412ba2b7326a..6c3f56f19275 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -138,6 +138,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+ pmd_t *pmd;
+ unsigned long next;
+ unsigned long pages = 0;
++ unsigned long nr_huge_updates = 0;
+ bool all_same_node;
+
+ pmd = pmd_offset(pud, addr);
+@@ -148,7 +149,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+ split_huge_page_pmd(vma, addr, pmd);
+ else if (change_huge_pmd(vma, pmd, addr, newprot,
+ prot_numa)) {
+- pages++;
++ pages += HPAGE_PMD_NR;
++ nr_huge_updates++;
+ continue;
+ }
+ /* fall through */
+@@ -168,6 +170,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+ change_pmd_protnuma(vma->vm_mm, addr, pmd);
+ } while (pmd++, addr = next, addr != end);
+
++ if (nr_huge_updates)
++ count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
++
+ return pages;
+ }
+
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 9bb314577911..5a442a723d79 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -812,6 +812,7 @@ const char * const vmstat_text[] = {
+
+ #ifdef CONFIG_NUMA_BALANCING
+ "numa_pte_updates",
++ "numa_huge_pte_updates",
+ "numa_hint_faults",
+ "numa_hint_faults_local",
+ "numa_pages_migrated",
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index 7fee50d637f9..7d424ac6e760 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1735,7 +1735,6 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ size_t size, int flags)
+ {
+ struct sock *sk = sock->sk;
+- struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
+ struct ddpehdr *ddp;
+ int copied = 0;
+ int offset = 0;
+@@ -1764,14 +1763,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ }
+ err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
+
+- if (!err) {
+- if (sat) {
+- sat->sat_family = AF_APPLETALK;
+- sat->sat_port = ddp->deh_sport;
+- sat->sat_addr.s_node = ddp->deh_snode;
+- sat->sat_addr.s_net = ddp->deh_snet;
+- }
+- msg->msg_namelen = sizeof(*sat);
++ if (!err && msg->msg_name) {
++ struct sockaddr_at *sat = msg->msg_name;
++ sat->sat_family = AF_APPLETALK;
++ sat->sat_port = ddp->deh_sport;
++ sat->sat_addr.s_node = ddp->deh_snode;
++ sat->sat_addr.s_net = ddp->deh_snet;
++ msg->msg_namelen = sizeof(*sat);
+ }
+
+ skb_free_datagram(sk, skb); /* Free the datagram. */
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 737bef59ce89..7b491006eaf4 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -531,8 +531,6 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ struct sk_buff *skb;
+ int copied, error = -EINVAL;
+
+- msg->msg_namelen = 0;
+-
+ if (sock->state != SS_CONNECTED)
+ return -ENOTCONN;
+
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 4b4d2b779ec1..78c474f8f615 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1636,11 +1636,11 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+- if (msg->msg_namelen != 0) {
+- struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
++ if (msg->msg_name) {
+ ax25_digi digi;
+ ax25_address src;
+ const unsigned char *mac = skb_mac_header(skb);
++ struct sockaddr_ax25 *sax = msg->msg_name;
+
+ memset(sax, 0, sizeof(struct full_sockaddr_ax25));
+ ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 9096137c889c..6629cdc134dc 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -221,8 +221,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -287,8 +285,6 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ BT_DBG("sk %p size %zu", sk, size);
+
+ lock_sock(sk);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 9bd7d959e384..fa4bf6631425 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -752,8 +752,6 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return err;
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 30b3721dc6d7..c1c6028e389a 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -608,7 +608,6 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
+ rfcomm_dlc_accept(d);
+- msg->msg_namelen = 0;
+ return 0;
+ }
+
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 96bd388d93a4..d021e441b6e6 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -715,7 +715,6 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ sco_conn_defer_accept(pi->conn->hcon, pi->setting);
+ sk->sk_state = BT_CONFIG;
+- msg->msg_namelen = 0;
+
+ release_sock(sk);
+ return 0;
+diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
+index c41d5fbb91d0..547504ccba69 100644
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -172,6 +172,8 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
+ del_nbp(p);
+ }
+
++ br_fdb_delete_by_port(br, NULL, 1);
++
+ del_timer_sync(&br->gc_timer);
+
+ br_sysfs_delbr(br->dev);
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index 05a41c7ec304..d6be3edb7a43 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -286,8 +286,6 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
+
+- m->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+@@ -361,8 +359,6 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags&MSG_OOB)
+ goto out;
+
+- msg->msg_namelen = 0;
+-
+ /*
+ * Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+diff --git a/net/compat.c b/net/compat.c
+index 89032580bd1d..dd32e34c1e2c 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+- return -EINVAL;
++ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ kmsg->msg_name = compat_ptr(tmp1);
+ kmsg->msg_iov = compat_ptr(tmp2);
+ kmsg->msg_control = compat_ptr(tmp3);
+@@ -93,7 +93,8 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ if (err < 0)
+ return err;
+ }
+- kern_msg->msg_name = kern_address;
++ if (kern_msg->msg_name)
++ kern_msg->msg_name = kern_address;
+ } else
+ kern_msg->msg_name = NULL;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3430b1ed12e5..3d1387461279 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1691,13 +1691,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+- skb->protocol = eth_type_trans(skb, dev);
+
+- /* eth_type_trans() can set pkt_type.
+- * call skb_scrub_packet() after it to clear pkt_type _after_ calling
+- * eth_type_trans().
+- */
+ skb_scrub_packet(skb, true);
++ skb->protocol = eth_type_trans(skb, dev);
+
+ return netif_rx(skb);
+ }
+@@ -4819,7 +4815,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+- if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
++ if (ops->ndo_change_rx_flags)
+ ops->ndo_change_rx_flags(dev, flags);
+ }
+
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 2e654138433c..f409e0bd35c0 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -460,7 +460,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
+ if (frh->action && (frh->action != rule->action))
+ continue;
+
+- if (frh->table && (frh_get_table(frh, tb) != rule->table))
++ if (frh_get_table(frh, tb) &&
++ (frh_get_table(frh, tb) != rule->table))
+ continue;
+
+ if (tb[FRA_PRIORITY] &&
+diff --git a/net/core/iovec.c b/net/core/iovec.c
+index b77eeecc0011..7d84ea1fbb20 100644
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -48,7 +48,8 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
+ if (err < 0)
+ return err;
+ }
+- m->msg_name = address;
++ if (m->msg_name)
++ m->msg_name = address;
+ } else {
+ m->msg_name = NULL;
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 261357a66300..a797fff7f222 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
+ if (x) {
+ int ret;
+ __u8 *eth;
++ struct iphdr *iph;
++
+ nhead = x->props.header_len - skb_headroom(skb);
+ if (nhead > 0) {
+ ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
+@@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
+ eth = (__u8 *) skb_push(skb, ETH_HLEN);
+ memcpy(eth, pkt_dev->hh, 12);
+ *(u16 *) &eth[12] = protocol;
++
++ /* Update IPv4 header len as well as checksum value */
++ iph = ip_hdr(skb);
++ iph->tot_len = htons(skb->len - ETH_HLEN);
++ ip_send_check(iph);
+ }
+ }
+ return 1;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index d81cff119f73..c28c7fed0d0b 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -580,9 +580,6 @@ static void skb_release_head_state(struct sk_buff *skb)
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_conntrack_put(skb->nfct);
+ #endif
+-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+- nf_conntrack_put_reasm(skb->nfct_reasm);
+-#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ nf_bridge_put(skb->nf_bridge);
+ #endif
+@@ -2758,6 +2755,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ struct sk_buff *segs = NULL;
+ struct sk_buff *tail = NULL;
+ struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
++ skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int doffset = skb->data - skb_mac_header(skb);
+ unsigned int offset = doffset;
+@@ -2797,16 +2795,38 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ if (hsize > len || !sg)
+ hsize = len;
+
+- if (!hsize && i >= nfrags) {
+- BUG_ON(fskb->len != len);
++ if (!hsize && i >= nfrags && skb_headlen(fskb) &&
++ (skb_headlen(fskb) == len || sg)) {
++ BUG_ON(skb_headlen(fskb) > len);
++
++ i = 0;
++ nfrags = skb_shinfo(fskb)->nr_frags;
++ skb_frag = skb_shinfo(fskb)->frags;
++ pos += skb_headlen(fskb);
++
++ while (pos < offset + len) {
++ BUG_ON(i >= nfrags);
++
++ size = skb_frag_size(skb_frag);
++ if (pos + size > offset + len)
++ break;
++
++ i++;
++ pos += size;
++ skb_frag++;
++ }
+
+- pos += len;
+ nskb = skb_clone(fskb, GFP_ATOMIC);
+ fskb = fskb->next;
+
+ if (unlikely(!nskb))
+ goto err;
+
++ if (unlikely(pskb_trim(nskb, len))) {
++ kfree_skb(nskb);
++ goto err;
++ }
++
+ hsize = skb_end_offset(nskb);
+ if (skb_cow_head(nskb, doffset + headroom)) {
+ kfree_skb(nskb);
+@@ -2850,7 +2870,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ nskb->data - tnl_hlen,
+ doffset + tnl_hlen);
+
+- if (fskb != skb_shinfo(skb)->frag_list)
++ if (nskb->len == len + doffset)
+ goto perform_csum_check;
+
+ if (!sg) {
+@@ -2868,8 +2888,28 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+
+ skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+
+- while (pos < offset + len && i < nfrags) {
+- *frag = skb_shinfo(skb)->frags[i];
++ while (pos < offset + len) {
++ if (i >= nfrags) {
++ BUG_ON(skb_headlen(fskb));
++
++ i = 0;
++ nfrags = skb_shinfo(fskb)->nr_frags;
++ skb_frag = skb_shinfo(fskb)->frags;
++
++ BUG_ON(!nfrags);
++
++ fskb = fskb->next;
++ }
++
++ if (unlikely(skb_shinfo(nskb)->nr_frags >=
++ MAX_SKB_FRAGS)) {
++ net_warn_ratelimited(
++ "skb_segment: too many frags: %u %u\n",
++ pos, mss);
++ goto err;
++ }
++
++ *frag = *skb_frag;
+ __skb_frag_ref(frag);
+ size = skb_frag_size(frag);
+
+@@ -2882,6 +2922,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+
+ if (pos + size <= offset + len) {
+ i++;
++ skb_frag++;
+ pos += size;
+ } else {
+ skb_frag_size_sub(frag, pos + size - (offset + len));
+@@ -2891,25 +2932,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ frag++;
+ }
+
+- if (pos < offset + len) {
+- struct sk_buff *fskb2 = fskb;
+-
+- BUG_ON(pos + fskb->len != offset + len);
+-
+- pos += fskb->len;
+- fskb = fskb->next;
+-
+- if (fskb2->next) {
+- fskb2 = skb_clone(fskb2, GFP_ATOMIC);
+- if (!fskb2)
+- goto err;
+- } else
+- skb_get(fskb2);
+-
+- SKB_FRAG_ASSERT(nskb);
+- skb_shinfo(nskb)->frag_list = fskb2;
+- }
+-
+ skip_fraglist:
+ nskb->data_len = len - hsize;
+ nskb->len += nskb->data_len;
+diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
+index ff41b4d60d30..008f33703a33 100644
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -957,7 +957,7 @@ lowpan_process_data(struct sk_buff *skb)
+ * Traffic class carried in-line
+ * ECN + DSCP (1 byte), Flow Label is elided
+ */
+- case 1: /* 10b */
++ case 2: /* 10b */
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto drop;
+
+@@ -968,7 +968,7 @@ lowpan_process_data(struct sk_buff *skb)
+ * Flow Label carried in-line
+ * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+ */
+- case 2: /* 01b */
++ case 1: /* 01b */
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto drop;
+
+diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
+index 581a59504bd5..1865fdf5a5a5 100644
+--- a/net/ieee802154/dgram.c
++++ b/net/ieee802154/dgram.c
+@@ -315,9 +315,8 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
+ if (saddr) {
+ saddr->family = AF_IEEE802154;
+ saddr->addr = mac_cb(skb)->sa;
+- }
+- if (addr_len)
+ *addr_len = sizeof(*saddr);
++ }
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index b28e863fe0a7..19e36376d2a0 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -57,7 +57,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index d9c4f113d709..23e6ab0a2dc0 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -368,7 +368,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
+ /*
+ * Handle MSG_ERRQUEUE
+ */
+-int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
++int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+@@ -405,6 +405,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ serr->addr_offset);
+ sin->sin_port = serr->port;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 63a6d6d6b875..254f11c24aa5 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -454,6 +454,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
+
++ skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
++
+ if (tunnel->dev->type == ARPHRD_ETHER) {
+ skb->protocol = eth_type_trans(skb, tunnel->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+@@ -461,8 +463,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ skb->dev = tunnel->dev;
+ }
+
+- skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
+-
+ gro_cells_receive(&tunnel->gro_cells, skb);
+ return 0;
+
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 6e87f853d033..26847e189c04 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -190,6 +190,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (!rt->dst.xfrm ||
+ rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
+ dev->stats.tx_carrier_errors++;
++ ip_rt_put(rt);
+ goto tx_error_icmp;
+ }
+ tdev = rt->dst.dev;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index d7d9882d4cae..c482f7c7dd32 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -769,7 +769,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ err = PTR_ERR(rt);
+ rt = NULL;
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+@@ -827,8 +827,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ {
+ struct inet_sock *isk = inet_sk(sk);
+ int family = sk->sk_family;
+- struct sockaddr_in *sin;
+- struct sockaddr_in6 *sin6;
+ struct sk_buff *skb;
+ int copied, err;
+
+@@ -838,19 +836,13 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len) {
+- if (family == AF_INET)
+- *addr_len = sizeof(*sin);
+- else if (family == AF_INET6 && addr_len)
+- *addr_len = sizeof(*sin6);
+- }
+-
+ if (flags & MSG_ERRQUEUE) {
+ if (family == AF_INET) {
+- return ip_recv_error(sk, msg, len);
++ return ip_recv_error(sk, msg, len, addr_len);
+ #if IS_ENABLED(CONFIG_IPV6)
+ } else if (family == AF_INET6) {
+- return pingv6_ops.ipv6_recv_error(sk, msg, len);
++ return pingv6_ops.ipv6_recv_error(sk, msg, len,
++ addr_len);
+ #endif
+ }
+ }
+@@ -874,11 +866,15 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+
+ /* Copy the address and add cmsg data. */
+ if (family == AF_INET) {
+- sin = (struct sockaddr_in *) msg->msg_name;
+- sin->sin_family = AF_INET;
+- sin->sin_port = 0 /* skb->h.uh->source */;
+- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+- memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
++
++ if (sin) {
++ sin->sin_family = AF_INET;
++ sin->sin_port = 0 /* skb->h.uh->source */;
++ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
++ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
++ }
+
+ if (isk->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+@@ -887,17 +883,21 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ } else if (family == AF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6hdr *ip6 = ipv6_hdr(skb);
+- sin6 = (struct sockaddr_in6 *) msg->msg_name;
+- sin6->sin6_family = AF_INET6;
+- sin6->sin6_port = 0;
+- sin6->sin6_addr = ip6->saddr;
+-
+- sin6->sin6_flowinfo = 0;
+- if (np->sndflow)
+- sin6->sin6_flowinfo = ip6_flowinfo(ip6);
+-
+- sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
+- IP6CB(skb)->iif);
++ struct sockaddr_in6 *sin6 =
++ (struct sockaddr_in6 *)msg->msg_name;
++
++ if (sin6) {
++ sin6->sin6_family = AF_INET6;
++ sin6->sin6_port = 0;
++ sin6->sin6_addr = ip6->saddr;
++ sin6->sin6_flowinfo = 0;
++ if (np->sndflow)
++ sin6->sin6_flowinfo = ip6_flowinfo(ip6);
++ sin6->sin6_scope_id =
++ ipv6_iface_scope_id(&sin6->sin6_addr,
++ IP6CB(skb)->iif);
++ *addr_len = sizeof(*sin6);
++ }
+
+ if (inet6_sk(sk)->rxopt.all)
+ pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 193db03540ad..7d3db7838e62 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -694,11 +694,8 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE) {
+- err = ip_recv_error(sk, msg, len);
++ err = ip_recv_error(sk, msg, len, addr_len);
+ goto out;
+ }
+
+@@ -724,6 +721,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6011615e810d..62290b5124c8 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1772,8 +1772,12 @@ local_input:
+ rth->dst.error= -err;
+ rth->rt_flags &= ~RTCF_LOCAL;
+ }
+- if (do_cache)
+- rt_cache_route(&FIB_RES_NH(res), rth);
++ if (do_cache) {
++ if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
++ rth->dst.flags |= DST_NOCACHE;
++ rt_add_uncached_list(rth);
++ }
++ }
+ skb_dst_set(skb, &rth->dst);
+ err = 0;
+ goto out;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 6e5617b9f9db..be5246e1d5b6 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -806,12 +806,6 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
+ xmit_size_goal = min_t(u32, gso_size,
+ sk->sk_gso_max_size - 1 - hlen);
+
+- /* TSQ : try to have at least two segments in flight
+- * (one in NIC TX ring, another in Qdisc)
+- */
+- xmit_size_goal = min_t(u32, xmit_size_goal,
+- sysctl_tcp_limit_output_bytes >> 1);
+-
+ xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
+
+ /* We try hard to avoid divides here */
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index b14266bb91eb..5031f68b545d 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ return err;
+ }
+
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index 52f3c6b971d2..310711433358 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -659,10 +659,13 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+ struct tcp_fastopen_cookie *cookie, bool syn_lost)
+ {
++ struct dst_entry *dst = __sk_dst_get(sk);
+ struct tcp_metrics_block *tm;
+
++ if (!dst)
++ return;
+ rcu_read_lock();
+- tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
++ tm = tcp_get_metrics(sk, dst, true);
+ if (tm) {
+ struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
+
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 533c58a5cfb7..910ab81bc0dd 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -272,33 +272,32 @@ static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *
+ {
+ const struct iphdr *iph = skb_gro_network_header(skb);
+ __wsum wsum;
+- __sum16 sum;
++
++ /* Don't bother verifying checksum if we're going to flush anyway. */
++ if (NAPI_GRO_CB(skb)->flush)
++ goto skip_csum;
++
++ wsum = skb->csum;
+
+ switch (skb->ip_summed) {
++ case CHECKSUM_NONE:
++ wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
++ 0);
++
++ /* fall through */
++
+ case CHECKSUM_COMPLETE:
+ if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
+- skb->csum)) {
++ wsum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+-flush:
++
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+-
+- case CHECKSUM_NONE:
+- wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+- skb_gro_len(skb), IPPROTO_TCP, 0);
+- sum = csum_fold(skb_checksum(skb,
+- skb_gro_offset(skb),
+- skb_gro_len(skb),
+- wsum));
+- if (sum)
+- goto flush;
+-
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- break;
+ }
+
++skip_csum:
+ return tcp_gro_receive(head, skb);
+ }
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index d46f2143305c..e912634b2f05 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1875,8 +1875,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ * - better RTT estimation and ACK scheduling
+ * - faster recovery
+ * - high rates
++ * Alas, some drivers / subsystems require a fair amount
++ * of queued bytes to ensure line rate.
++ * One example is wifi aggregation (802.11 AMPDU)
+ */
+- limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
++ limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
++ sk->sk_pacing_rate >> 10);
+
+ if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+ set_bit(TSQ_THROTTLED, &tp->tsq_flags);
+@@ -3108,7 +3112,6 @@ void tcp_send_window_probe(struct sock *sk)
+ {
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
+- tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
+ tcp_xmit_probe_skb(sk, 0);
+ }
+ }
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 0ca44df51ee9..5e2c2f1a075d 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -973,7 +973,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ err = PTR_ERR(rt);
+ rt = NULL;
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+@@ -1072,6 +1072,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
+ struct udp_sock *up = udp_sk(sk);
+ int ret;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ if (!up->pending) {
+ struct msghdr msg = { .msg_flags = flags|MSG_MORE };
+
+@@ -1209,14 +1212,8 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ int is_udplite = IS_UDPLITE(sk);
+ bool slow;
+
+- /*
+- * Check any passed addresses
+- */
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ip_recv_error(sk, msg, len);
++ return ip_recv_error(sk, msg, len, addr_len);
+
+ try_again:
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+@@ -1276,6 +1273,7 @@ try_again:
+ sin->sin_port = udp_hdr(skb)->source;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index ccde54248c8c..adf998322bd2 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -104,10 +104,14 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ const struct iphdr *iph = ip_hdr(skb);
+ u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
+ struct flowi4 *fl4 = &fl->u.ip4;
++ int oif = 0;
++
++ if (skb_dst(skb))
++ oif = skb_dst(skb)->dev->ifindex;
+
+ memset(fl4, 0, sizeof(struct flowi4));
+ fl4->flowi4_mark = skb->mark;
+- fl4->flowi4_oif = skb_dst(skb)->dev->ifindex;
++ fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
+
+ if (!ip_is_fragment(iph)) {
+ switch (iph->protocol) {
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 7c96100b021e..8132b4457b20 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -965,10 +965,10 @@ out:
+
+ #ifdef CONFIG_SYSCTL
+ sysctl_fail:
+- ipv6_packet_cleanup();
++ pingv6_exit();
+ #endif
+ pingv6_fail:
+- pingv6_exit();
++ ipv6_packet_cleanup();
+ ipv6_packet_fail:
+ tcpv6_exit();
+ tcpv6_fail:
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 48b6bd2a9a14..c66c6df6e881 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -318,7 +318,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
+ /*
+ * Handle MSG_ERRQUEUE
+ */
+-int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
++int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sock_exterr_skb *serr;
+@@ -369,6 +369,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ &sin->sin6_addr);
+ sin->sin6_scope_id = 0;
+ }
++ *addr_len = sizeof(*sin);
+ }
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+@@ -377,6 +378,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
+ sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
++ sin->sin6_port = 0;
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ sin->sin6_addr = ipv6_hdr(skb)->saddr;
+ if (np->rxopt.all)
+@@ -423,7 +425,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error);
+ /*
+ * Handle IPV6_RECVPATHMTU
+ */
+-int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
++int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sk_buff *skb;
+@@ -457,6 +460,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
+ sin->sin6_port = 0;
+ sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
+ sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
++ *addr_len = sizeof(*sin);
+ }
+
+ put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index 46e88433ec7d..f0ccdb787100 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -453,8 +453,10 @@ static int mem_check(struct sock *sk)
+ if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
+ return 0;
+
++ rcu_read_lock_bh();
+ for_each_sk_fl_rcu(np, sfl)
+ count++;
++ rcu_read_unlock_bh();
+
+ if (room <= 0 ||
+ ((count >= FL_MAX_PER_SOCK ||
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 91fb4e8212f5..b6fa35e7425c 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -116,8 +116,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
+ }
+ rcu_read_unlock_bh();
+
+- IP6_INC_STATS_BH(dev_net(dst->dev),
+- ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
++ IP6_INC_STATS(dev_net(dst->dev),
++ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -125,7 +125,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
+ static int ip6_finish_output(struct sk_buff *skb)
+ {
+ if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+- dst_allfrag(skb_dst(skb)))
++ dst_allfrag(skb_dst(skb)) ||
++ (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
+ return ip6_fragment(skb, ip6_finish_output2);
+ else
+ return ip6_finish_output2(skb);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 583b77e2f69b..c1e11b5d6ccc 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1635,6 +1635,15 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
+ return ip6_tnl_update(t, &p);
+ }
+
++static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
++{
++ struct net *net = dev_net(dev);
++ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
++
++ if (dev != ip6n->fb_tnl_dev)
++ unregister_netdevice_queue(dev, head);
++}
++
+ static size_t ip6_tnl_get_size(const struct net_device *dev)
+ {
+ return
+@@ -1699,6 +1708,7 @@ static struct rtnl_link_ops ip6_link_ops __read_mostly = {
+ .validate = ip6_tnl_validate,
+ .newlink = ip6_tnl_newlink,
+ .changelink = ip6_tnl_changelink,
++ .dellink = ip6_tnl_dellink,
+ .get_size = ip6_tnl_get_size,
+ .fill_info = ip6_tnl_fill_info,
+ };
+@@ -1715,9 +1725,9 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
+ .priority = 1,
+ };
+
+-static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
++static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
+ {
+- struct net *net = dev_net(ip6n->fb_tnl_dev);
++ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ struct net_device *dev, *aux;
+ int h;
+ struct ip6_tnl *t;
+@@ -1785,10 +1795,8 @@ err_alloc_dev:
+
+ static void __net_exit ip6_tnl_exit_net(struct net *net)
+ {
+- struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+-
+ rtnl_lock();
+- ip6_tnl_destroy_tunnels(ip6n);
++ ip6_tnl_destroy_tunnels(net);
+ rtnl_unlock();
+ }
+
+diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+index d6e4dd8b58df..83ab37cc8e6a 100644
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -169,63 +169,13 @@ out:
+ return nf_conntrack_confirm(skb);
+ }
+
+-static unsigned int __ipv6_conntrack_in(struct net *net,
+- unsigned int hooknum,
+- struct sk_buff *skb,
+- const struct net_device *in,
+- const struct net_device *out,
+- int (*okfn)(struct sk_buff *))
+-{
+- struct sk_buff *reasm = skb->nfct_reasm;
+- const struct nf_conn_help *help;
+- struct nf_conn *ct;
+- enum ip_conntrack_info ctinfo;
+-
+- /* This packet is fragmented and has reassembled packet. */
+- if (reasm) {
+- /* Reassembled packet isn't parsed yet ? */
+- if (!reasm->nfct) {
+- unsigned int ret;
+-
+- ret = nf_conntrack_in(net, PF_INET6, hooknum, reasm);
+- if (ret != NF_ACCEPT)
+- return ret;
+- }
+-
+- /* Conntrack helpers need the entire reassembled packet in the
+- * POST_ROUTING hook. In case of unconfirmed connections NAT
+- * might reassign a helper, so the entire packet is also
+- * required.
+- */
+- ct = nf_ct_get(reasm, &ctinfo);
+- if (ct != NULL && !nf_ct_is_untracked(ct)) {
+- help = nfct_help(ct);
+- if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
+- nf_conntrack_get_reasm(reasm);
+- NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
+- (struct net_device *)in,
+- (struct net_device *)out,
+- okfn, NF_IP6_PRI_CONNTRACK + 1);
+- return NF_DROP_ERR(-ECANCELED);
+- }
+- }
+-
+- nf_conntrack_get(reasm->nfct);
+- skb->nfct = reasm->nfct;
+- skb->nfctinfo = reasm->nfctinfo;
+- return NF_ACCEPT;
+- }
+-
+- return nf_conntrack_in(net, PF_INET6, hooknum, skb);
+-}
+-
+ static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+ {
+- return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
++ return nf_conntrack_in(dev_net(in), PF_INET6, hooknum, skb);
+ }
+
+ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+@@ -239,7 +189,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+ net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
+ return NF_ACCEPT;
+ }
+- return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
++ return nf_conntrack_in(dev_net(out), PF_INET6, hooknum, skb);
+ }
+
+ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index dffdc1a389c5..253566a8d55b 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -621,31 +621,16 @@ ret_orig:
+ return skb;
+ }
+
+-void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+- struct net_device *in, struct net_device *out,
+- int (*okfn)(struct sk_buff *))
++void nf_ct_frag6_consume_orig(struct sk_buff *skb)
+ {
+ struct sk_buff *s, *s2;
+- unsigned int ret = 0;
+
+ for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
+- nf_conntrack_put_reasm(s->nfct_reasm);
+- nf_conntrack_get_reasm(skb);
+- s->nfct_reasm = skb;
+-
+ s2 = s->next;
+ s->next = NULL;
+-
+- if (ret != -ECANCELED)
+- ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
+- in, out, okfn,
+- NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+- else
+- kfree_skb(s);
+-
++ consume_skb(s);
+ s = s2;
+ }
+- nf_conntrack_put_reasm(skb);
+ }
+
+ static int nf_ct_net_init(struct net *net)
+diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+index aacd121fe8c5..581dd9ede0de 100644
+--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
++++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+@@ -75,8 +75,11 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
+ if (reasm == skb)
+ return NF_ACCEPT;
+
+- nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
+- (struct net_device *)out, okfn);
++ nf_ct_frag6_consume_orig(reasm);
++
++ NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
++ (struct net_device *) in, (struct net_device *) out,
++ okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+
+ return NF_STOLEN;
+ }
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 18f19df4189f..7856e962a3e6 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -57,7 +57,8 @@ static struct inet_protosw pingv6_protosw = {
+
+
+ /* Compatibility glue so we can support IPv6 when it's compiled as a module */
+-static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
++static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len)
+ {
+ return -EAFNOSUPPORT;
+ }
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index a4ed2416399e..430067cb9210 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -466,14 +466,11 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- if (addr_len)
+- *addr_len=sizeof(*sin6);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+- return ipv6_recv_rxpmtu(sk, msg, len);
++ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+@@ -507,6 +504,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ sin6->sin6_flowinfo = 0;
+ sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
+ IP6CB(skb)->iif);
++ *addr_len = sizeof(*sin6);
+ }
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 04e17b3309fb..77308af056bc 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -731,8 +731,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+ prefix = &prefix_buf;
+ }
+
+- rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
+- dev->ifindex);
++ if (rinfo->prefix_len == 0)
++ rt = rt6_get_dflt_router(gwaddr, dev);
++ else
++ rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
++ gwaddr, dev->ifindex);
+
+ if (rt && !lifetime) {
+ ip6_del_rt(rt);
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 19269453a8ea..b43388452bf8 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1594,6 +1594,15 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
+ #endif
+ };
+
++static void ipip6_dellink(struct net_device *dev, struct list_head *head)
++{
++ struct net *net = dev_net(dev);
++ struct sit_net *sitn = net_generic(net, sit_net_id);
++
++ if (dev != sitn->fb_tunnel_dev)
++ unregister_netdevice_queue(dev, head);
++}
++
+ static struct rtnl_link_ops sit_link_ops __read_mostly = {
+ .kind = "sit",
+ .maxtype = IFLA_IPTUN_MAX,
+@@ -1605,6 +1614,7 @@ static struct rtnl_link_ops sit_link_ops __read_mostly = {
+ .changelink = ipip6_changelink,
+ .get_size = ipip6_get_size,
+ .fill_info = ipip6_fill_info,
++ .dellink = ipip6_dellink,
+ };
+
+ static struct xfrm_tunnel sit_handler __read_mostly = {
+@@ -1619,9 +1629,10 @@ static struct xfrm_tunnel ipip_handler __read_mostly = {
+ .priority = 2,
+ };
+
+-static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
++static void __net_exit sit_destroy_tunnels(struct net *net,
++ struct list_head *head)
+ {
+- struct net *net = dev_net(sitn->fb_tunnel_dev);
++ struct sit_net *sitn = net_generic(net, sit_net_id);
+ struct net_device *dev, *aux;
+ int prio;
+
+@@ -1696,11 +1707,10 @@ err_alloc_dev:
+
+ static void __net_exit sit_exit_net(struct net *net)
+ {
+- struct sit_net *sitn = net_generic(net, sit_net_id);
+ LIST_HEAD(list);
+
+ rtnl_lock();
+- sit_destroy_tunnels(sitn, &list);
++ sit_destroy_tunnels(net, &list);
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+ }
+diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
+index 2ec6bf6a0aa0..a7a2384b30f5 100644
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -37,34 +37,32 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
+ {
+ const struct ipv6hdr *iph = skb_gro_network_header(skb);
+ __wsum wsum;
+- __sum16 sum;
++
++ /* Don't bother verifying checksum if we're going to flush anyway. */
++ if (NAPI_GRO_CB(skb)->flush)
++ goto skip_csum;
++
++ wsum = skb->csum;
+
+ switch (skb->ip_summed) {
++ case CHECKSUM_NONE:
++ wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
++ wsum);
++
++ /* fall through */
++
+ case CHECKSUM_COMPLETE:
+ if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
+- skb->csum)) {
++ wsum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+-flush:
++
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+-
+- case CHECKSUM_NONE:
+- wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+- skb_gro_len(skb),
+- IPPROTO_TCP, 0));
+- sum = csum_fold(skb_checksum(skb,
+- skb_gro_offset(skb),
+- skb_gro_len(skb),
+- wsum));
+- if (sum)
+- goto flush;
+-
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- break;
+ }
+
++skip_csum:
+ return tcp_gro_receive(head, skb);
+ }
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 18786098fd41..3d2758d4494e 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -374,14 +374,11 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ int is_udp4;
+ bool slow;
+
+- if (addr_len)
+- *addr_len = sizeof(struct sockaddr_in6);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+- return ipv6_recv_rxpmtu(sk, msg, len);
++ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
+
+ try_again:
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+@@ -462,7 +459,7 @@ try_again:
+ ipv6_iface_scope_id(&sin6->sin6_addr,
+ IP6CB(skb)->iif);
+ }
+-
++ *addr_len = sizeof(*sin6);
+ }
+ if (is_udp4) {
+ if (inet->cmsg_flags)
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 60559511bd9c..34c6fff3ae84 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -88,7 +88,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+
+ /* Check if there is enough headroom to insert fragment header. */
+ tnl_hlen = skb_tnl_header_len(skb);
+- if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
++ if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index 08ed2772b7aa..550b195bb2fc 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -135,10 +135,14 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ struct ipv6_opt_hdr *exthdr;
+ const unsigned char *nh = skb_network_header(skb);
+ u8 nexthdr = nh[IP6CB(skb)->nhoff];
++ int oif = 0;
++
++ if (skb_dst(skb))
++ oif = skb_dst(skb)->dev->ifindex;
+
+ memset(fl6, 0, sizeof(struct flowi6));
+ fl6->flowi6_mark = skb->mark;
+- fl6->flowi6_oif = skb_dst(skb)->dev->ifindex;
++ fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
+
+ fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
+ fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
+diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
+index 7a1e0fc1bd4d..e096025b477f 100644
+--- a/net/ipx/af_ipx.c
++++ b/net/ipx/af_ipx.c
+@@ -1823,8 +1823,6 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (skb->tstamp.tv64)
+ sk->sk_stamp = skb->tstamp;
+
+- msg->msg_namelen = sizeof(*sipx);
+-
+ if (sipx) {
+ sipx->sipx_family = AF_IPX;
+ sipx->sipx_port = ipx->ipx_source.sock;
+@@ -1832,6 +1830,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
+ sipx->sipx_type = ipx->ipx_type;
+ sipx->sipx_zero = 0;
++ msg->msg_namelen = sizeof(*sipx);
+ }
+ rc = copied;
+
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index 0578d4fa00a9..a5e62ef57155 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1385,8 +1385,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
+
+ IRDA_DEBUG(4, "%s()\n", __func__);
+
+- msg->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+@@ -1451,8 +1449,6 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, noblock);
+
+- msg->msg_namelen = 0;
+-
+ do {
+ int chunk;
+ struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 168aff5e60de..c4b7218058b6 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1324,8 +1324,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int err = 0;
+ u32 offset;
+
+- msg->msg_namelen = 0;
+-
+ if ((sk->sk_state == IUCV_DISCONN) &&
+ skb_queue_empty(&iucv->backlog_skb_q) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 911ef03bf8fb..545f047868ad 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -3616,7 +3616,6 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
+ if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
+ goto out;
+
+- msg->msg_namelen = 0;
+ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ if (skb == NULL)
+ goto out;
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 571db8dd2292..da1a1cee1a08 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -518,9 +518,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+@@ -543,6 +540,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index b8a6039314e8..e6e8408c9e36 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -665,7 +665,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ *addr_len = sizeof(*lsa);
+
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 8c46b271064a..44441c0c5037 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -197,8 +197,6 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+- msg->msg_namelen = 0;
+-
+ err = 0;
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 6cba486353e8..7b01b9f5846c 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -720,8 +720,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int target; /* Read at least this many bytes */
+ long timeo;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+ copied = -ENOTCONN;
+ if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 74fd00c27210..3581736446d5 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1139,12 +1139,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
+ ip_vs_fill_iph_skb(af, skb, &iph);
+ #ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+- if (!iph.fragoffs && skb_nfct_reasm(skb)) {
+- struct sk_buff *reasm = skb_nfct_reasm(skb);
+- /* Save fw mark for coming frags */
+- reasm->ipvs_property = 1;
+- reasm->mark = skb->mark;
+- }
+ if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
+ int related;
+ int verdict = ip_vs_out_icmp_v6(skb, &related,
+@@ -1614,12 +1608,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
+
+ #ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+- if (!iph.fragoffs && skb_nfct_reasm(skb)) {
+- struct sk_buff *reasm = skb_nfct_reasm(skb);
+- /* Save fw mark for coming frags. */
+- reasm->ipvs_property = 1;
+- reasm->mark = skb->mark;
+- }
+ if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
+ int related;
+ int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
+@@ -1671,9 +1659,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
+ /* sorry, all this trouble for a no-hit :) */
+ IP_VS_DBG_PKT(12, af, pp, skb, 0,
+ "ip_vs_in: packet continues traversal as normal");
+- if (iph.fragoffs && !skb_nfct_reasm(skb)) {
++ if (iph.fragoffs) {
+ /* Fragment that couldn't be mapped to a conn entry
+- * and don't have any pointer to a reasm skb
+ * is missing module nf_defrag_ipv6
+ */
+ IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
+@@ -1756,38 +1743,6 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
+ #ifdef CONFIG_IP_VS_IPV6
+
+ /*
+- * AF_INET6 fragment handling
+- * Copy info from first fragment, to the rest of them.
+- */
+-static unsigned int
+-ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
+- const struct net_device *in,
+- const struct net_device *out,
+- int (*okfn)(struct sk_buff *))
+-{
+- struct sk_buff *reasm = skb_nfct_reasm(skb);
+- struct net *net;
+-
+- /* Skip if not a "replay" from nf_ct_frag6_output or first fragment.
+- * ipvs_property is set when checking first fragment
+- * in ip_vs_in() and ip_vs_out().
+- */
+- if (reasm)
+- IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property);
+- if (!reasm || !reasm->ipvs_property)
+- return NF_ACCEPT;
+-
+- net = skb_net(skb);
+- if (!net_ipvs(net)->enable)
+- return NF_ACCEPT;
+-
+- /* Copy stored fw mark, saved in ip_vs_{in,out} */
+- skb->mark = reasm->mark;
+-
+- return NF_ACCEPT;
+-}
+-
+-/*
+ * AF_INET6 handler in NF_INET_LOCAL_IN chain
+ * Schedule and forward packets from remote clients
+ */
+@@ -1924,14 +1879,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
+ .priority = 100,
+ },
+ #ifdef CONFIG_IP_VS_IPV6
+- /* After mangle & nat fetch 2:nd fragment and following */
+- {
+- .hook = ip_vs_preroute_frag6,
+- .owner = THIS_MODULE,
+- .pf = NFPROTO_IPV6,
+- .hooknum = NF_INET_PRE_ROUTING,
+- .priority = NF_IP6_PRI_NAT_DST + 1,
+- },
+ /* After packet filtering, change source only for VS/NAT */
+ {
+ .hook = ip_vs_reply6,
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index 9ef22bdce9f1..bed5f7042529 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -65,7 +65,6 @@ static int get_callid(const char *dptr, unsigned int dataoff,
+ static int
+ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ {
+- struct sk_buff *reasm = skb_nfct_reasm(skb);
+ struct ip_vs_iphdr iph;
+ unsigned int dataoff, datalen, matchoff, matchlen;
+ const char *dptr;
+@@ -79,15 +78,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ /* todo: IPv6 fragments:
+ * I think this only should be done for the first fragment. /HS
+ */
+- if (reasm) {
+- skb = reasm;
+- dataoff = iph.thoff_reasm + sizeof(struct udphdr);
+- } else
+- dataoff = iph.len + sizeof(struct udphdr);
++ dataoff = iph.len + sizeof(struct udphdr);
+
+ if (dataoff >= skb->len)
+ return -EINVAL;
+- /* todo: Check if this will mess-up the reasm skb !!! /HS */
+ retc = skb_linearize(skb);
+ if (retc < 0)
+ return retc;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 8df7f64c6db3..613563555515 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2335,8 +2335,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
+ }
+ #endif
+
+- msg->msg_namelen = 0;
+-
+ copied = data_skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 698814bfa7ad..53c19a35fc6d 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1179,10 +1179,9 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
+ sax->sax25_family = AF_NETROM;
+ skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
+ AX25_ADDR_LEN);
++ msg->msg_namelen = sizeof(*sax);
+ }
+
+- msg->msg_namelen = sizeof(*sax);
+-
+ skb_free_datagram(sk, skb);
+
+ release_sock(sk);
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index d308402b67d8..824c6056bf82 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -807,8 +807,6 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ pr_debug("%p %zu\n", sk, len);
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+
+ if (sk->sk_state == LLCP_CLOSED &&
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 313bf1bc848a..5d11f4ac3ecb 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -241,8 +241,6 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return rc;
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 2e8286b47c28..ba2548bd85bf 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -244,11 +244,15 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
+ static void register_prot_hook(struct sock *sk)
+ {
+ struct packet_sock *po = pkt_sk(sk);
++
+ if (!po->running) {
+- if (po->fanout)
++ if (po->fanout) {
+ __fanout_link(sk, po);
+- else
++ } else {
+ dev_add_pack(&po->prot_hook);
++ rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
++ }
++
+ sock_hold(sk);
+ po->running = 1;
+ }
+@@ -266,10 +270,13 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
+ struct packet_sock *po = pkt_sk(sk);
+
+ po->running = 0;
+- if (po->fanout)
++ if (po->fanout) {
+ __fanout_unlink(sk, po);
+- else
++ } else {
+ __dev_remove_pack(&po->prot_hook);
++ RCU_INIT_POINTER(po->cached_dev, NULL);
++ }
++
+ __sock_put(sk);
+
+ if (sync) {
+@@ -432,9 +439,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
+
+ pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+
+- spin_lock(&rb_queue->lock);
++ spin_lock_bh(&rb_queue->lock);
+ pkc->delete_blk_timer = 1;
+- spin_unlock(&rb_queue->lock);
++ spin_unlock_bh(&rb_queue->lock);
+
+ prb_del_retire_blk_timer(pkc);
+ }
+@@ -2052,12 +2059,24 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ return tp_len;
+ }
+
++static struct net_device *packet_cached_dev_get(struct packet_sock *po)
++{
++ struct net_device *dev;
++
++ rcu_read_lock();
++ dev = rcu_dereference(po->cached_dev);
++ if (dev)
++ dev_hold(dev);
++ rcu_read_unlock();
++
++ return dev;
++}
++
+ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ {
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+- bool need_rls_dev = false;
+ int err, reserve = 0;
+ void *ph;
+ struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
+@@ -2070,7 +2089,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ mutex_lock(&po->pg_vec_lock);
+
+ if (saddr == NULL) {
+- dev = po->prot_hook.dev;
++ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+ } else {
+@@ -2084,19 +2103,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+- need_rls_dev = true;
+ }
+
+ err = -ENXIO;
+ if (unlikely(dev == NULL))
+ goto out;
+-
+- reserve = dev->hard_header_len;
+-
+ err = -ENETDOWN;
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_put;
+
++ reserve = dev->hard_header_len;
++
+ size_max = po->tx_ring.frame_size
+ - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
+
+@@ -2173,8 +2190,7 @@ out_status:
+ __packet_set_status(po, ph, status);
+ kfree_skb(skb);
+ out_put:
+- if (need_rls_dev)
+- dev_put(dev);
++ dev_put(dev);
+ out:
+ mutex_unlock(&po->pg_vec_lock);
+ return err;
+@@ -2212,7 +2228,6 @@ static int packet_snd(struct socket *sock,
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+- bool need_rls_dev = false;
+ unsigned char *addr;
+ int err, reserve = 0;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+@@ -2228,7 +2243,7 @@ static int packet_snd(struct socket *sock,
+ */
+
+ if (saddr == NULL) {
+- dev = po->prot_hook.dev;
++ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+ } else {
+@@ -2240,19 +2255,17 @@ static int packet_snd(struct socket *sock,
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+- need_rls_dev = true;
+ }
+
+ err = -ENXIO;
+- if (dev == NULL)
++ if (unlikely(dev == NULL))
+ goto out_unlock;
+- if (sock->type == SOCK_RAW)
+- reserve = dev->hard_header_len;
+-
+ err = -ENETDOWN;
+- if (!(dev->flags & IFF_UP))
++ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_unlock;
+
++ if (sock->type == SOCK_RAW)
++ reserve = dev->hard_header_len;
+ if (po->has_vnet_hdr) {
+ vnet_hdr_len = sizeof(vnet_hdr);
+
+@@ -2386,15 +2399,14 @@ static int packet_snd(struct socket *sock,
+ if (err > 0 && (err = net_xmit_errno(err)) != 0)
+ goto out_unlock;
+
+- if (need_rls_dev)
+- dev_put(dev);
++ dev_put(dev);
+
+ return len;
+
+ out_free:
+ kfree_skb(skb);
+ out_unlock:
+- if (dev && need_rls_dev)
++ if (dev)
+ dev_put(dev);
+ out:
+ return err;
+@@ -2614,6 +2626,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
+ po = pkt_sk(sk);
+ sk->sk_family = PF_PACKET;
+ po->num = proto;
++ RCU_INIT_POINTER(po->cached_dev, NULL);
+
+ sk->sk_destruct = packet_sock_destruct;
+ sk_refcnt_debug_inc(sk);
+@@ -2660,7 +2673,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int copied, err;
+- struct sockaddr_ll *sll;
+ int vnet_hdr_len = 0;
+
+ err = -EINVAL;
+@@ -2744,22 +2756,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out_free;
+ }
+
+- /*
+- * If the address length field is there to be filled in, we fill
+- * it in now.
++ /* You lose any data beyond the buffer you gave. If it worries
++ * a user program they can ask the device for its MTU
++ * anyway.
+ */
+-
+- sll = &PACKET_SKB_CB(skb)->sa.ll;
+- if (sock->type == SOCK_PACKET)
+- msg->msg_namelen = sizeof(struct sockaddr_pkt);
+- else
+- msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
+-
+- /*
+- * You lose any data beyond the buffer you gave. If it worries a
+- * user program they can ask the device for its MTU anyway.
+- */
+-
+ copied = skb->len;
+ if (copied > len) {
+ copied = len;
+@@ -2772,9 +2772,20 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+
+- if (msg->msg_name)
++ if (msg->msg_name) {
++ /* If the address length field is there to be filled
++ * in, we fill it in now.
++ */
++ if (sock->type == SOCK_PACKET) {
++ msg->msg_namelen = sizeof(struct sockaddr_pkt);
++ } else {
++ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
++ msg->msg_namelen = sll->sll_halen +
++ offsetof(struct sockaddr_ll, sll_addr);
++ }
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
+ msg->msg_namelen);
++ }
+
+ if (pkt_sk(sk)->auxdata) {
+ struct tpacket_auxdata aux;
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index c4e4b4561207..1035fa2d909c 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -113,6 +113,7 @@ struct packet_sock {
+ unsigned int tp_loss:1;
+ unsigned int tp_tx_has_off:1;
+ unsigned int tp_tstamp;
++ struct net_device __rcu *cached_dev;
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
+ };
+
+diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
+index 12c30f3e643e..38946b26e471 100644
+--- a/net/phonet/datagram.c
++++ b/net/phonet/datagram.c
+@@ -139,9 +139,6 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
+ MSG_CMSG_COMPAT))
+ goto out_nofree;
+
+- if (addr_len)
+- *addr_len = sizeof(sa);
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &rval);
+ if (skb == NULL)
+ goto out_nofree;
+@@ -162,8 +159,10 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
+
+ rval = (flags & MSG_TRUNC) ? skb->len : copylen;
+
+- if (msg->msg_name != NULL)
+- memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
++ if (msg->msg_name != NULL) {
++ memcpy(msg->msg_name, &sa, sizeof(sa));
++ *addr_len = sizeof(sa);
++ }
+
+ out:
+ skb_free_datagram(sk, skb);
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index 9f0f17cf6bf9..de339b24ca14 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -410,8 +410,6 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+
+ rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+
+- msg->msg_namelen = 0;
+-
+ if (msg_flags & MSG_OOB)
+ goto out;
+
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index e98fcfbe6007..33af77246bfe 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1216,7 +1216,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ {
+ struct sock *sk = sock->sk;
+ struct rose_sock *rose = rose_sk(sk);
+- struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
+ size_t copied;
+ unsigned char *asmptr;
+ struct sk_buff *skb;
+@@ -1252,8 +1251,11 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+- if (srose != NULL) {
+- memset(srose, 0, msg->msg_namelen);
++ if (msg->msg_name) {
++ struct sockaddr_rose *srose;
++
++ memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
++ srose = msg->msg_name;
+ srose->srose_family = AF_ROSE;
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
+index 4b48687c3890..898492a8d61b 100644
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -143,10 +143,13 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ /* copy the peer address and timestamp */
+ if (!continue_call) {
+- if (msg->msg_name && msg->msg_namelen > 0)
++ if (msg->msg_name) {
++ size_t len =
++ sizeof(call->conn->trans->peer->srx);
+ memcpy(msg->msg_name,
+- &call->conn->trans->peer->srx,
+- sizeof(call->conn->trans->peer->srx));
++ &call->conn->trans->peer->srx, len);
++ msg->msg_namelen = len;
++ }
+ sock_recv_ts_and_drops(msg, &rx->sk, skb);
+ }
+
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index a9dfdda9ed1d..2e55f8189502 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -88,7 +88,7 @@ struct fq_sched_data {
+ struct fq_flow internal; /* for non classified or high prio packets */
+ u32 quantum;
+ u32 initial_quantum;
+- u32 flow_default_rate;/* rate per flow : bytes per second */
++ u32 flow_refill_delay;
+ u32 flow_max_rate; /* optional max rate per flow */
+ u32 flow_plimit; /* max packets per flow */
+ struct rb_root *fq_root;
+@@ -115,6 +115,7 @@ static struct fq_flow detached, throttled;
+ static void fq_flow_set_detached(struct fq_flow *f)
+ {
+ f->next = &detached;
++ f->age = jiffies;
+ }
+
+ static bool fq_flow_is_detached(const struct fq_flow *f)
+@@ -209,21 +210,15 @@ static void fq_gc(struct fq_sched_data *q,
+ }
+ }
+
+-static const u8 prio2band[TC_PRIO_MAX + 1] = {
+- 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+-};
+-
+ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
+ {
+ struct rb_node **p, *parent;
+ struct sock *sk = skb->sk;
+ struct rb_root *root;
+ struct fq_flow *f;
+- int band;
+
+ /* warning: no starvation prevention... */
+- band = prio2band[skb->priority & TC_PRIO_MAX];
+- if (unlikely(band == 0))
++ if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
+ return &q->internal;
+
+ if (unlikely(!sk)) {
+@@ -372,17 +367,20 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ }
+
+ f->qlen++;
+- flow_queue_add(f, skb);
+ if (skb_is_retransmit(skb))
+ q->stat_tcp_retrans++;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ if (fq_flow_is_detached(f)) {
+ fq_flow_add_tail(&q->new_flows, f);
+- if (q->quantum > f->credit)
+- f->credit = q->quantum;
++ if (time_after(jiffies, f->age + q->flow_refill_delay))
++ f->credit = max_t(u32, f->credit, q->quantum);
+ q->inactive_flows--;
+ qdisc_unthrottled(sch);
+ }
++
++ /* Note: this overwrites f->age */
++ flow_queue_add(f, skb);
++
+ if (unlikely(f == &q->internal)) {
+ q->stat_internal_packets++;
+ qdisc_unthrottled(sch);
+@@ -460,7 +458,6 @@ begin:
+ fq_flow_add_tail(&q->old_flows, f);
+ } else {
+ fq_flow_set_detached(f);
+- f->age = jiffies;
+ q->inactive_flows++;
+ }
+ goto begin;
+@@ -614,6 +611,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
+ [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
+ [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
++ [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
+ };
+
+ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+@@ -655,7 +653,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+
+ if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
+- q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
++ pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
++ nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
+
+ if (tb[TCA_FQ_FLOW_MAX_RATE])
+ q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+@@ -669,6 +668,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ err = -EINVAL;
+ }
+
++ if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
++ u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
++
++ q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
++ }
++
+ if (!err)
+ err = fq_resize(q, fq_log);
+
+@@ -704,7 +709,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+ q->flow_plimit = 100;
+ q->quantum = 2 * psched_mtu(qdisc_dev(sch));
+ q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
+- q->flow_default_rate = 0;
++ q->flow_refill_delay = msecs_to_jiffies(40);
+ q->flow_max_rate = ~0U;
+ q->rate_enable = 1;
+ q->new_flows.first = NULL;
+@@ -731,15 +736,16 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
+ if (opts == NULL)
+ goto nla_put_failure;
+
+- /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
+- * do not bother giving its value
+- */
++ /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
++
+ if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
+ nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
+ nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
+ nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
++ nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
++ jiffies_to_usecs(q->flow_refill_delay)) ||
+ nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
+ goto nla_put_failure;
+
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index 1aaf1b6e51a2..6ddda282f9c7 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -21,6 +21,7 @@
+ #include <net/netlink.h>
+ #include <net/sch_generic.h>
+ #include <net/pkt_sched.h>
++#include <net/tcp.h>
+
+
+ /* Simple Token Bucket Filter.
+@@ -117,6 +118,22 @@ struct tbf_sched_data {
+ };
+
+
++/*
++ * Return length of individual segments of a gso packet,
++ * including all headers (MAC, IP, TCP/UDP)
++ */
++static unsigned int skb_gso_seglen(const struct sk_buff *skb)
++{
++ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
++ const struct skb_shared_info *shinfo = skb_shinfo(skb);
++
++ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
++ hdr_len += tcp_hdrlen(skb);
++ else
++ hdr_len += sizeof(struct udphdr);
++ return hdr_len + shinfo->gso_size;
++}
++
+ /* GSO packet is too big, segment it so that tbf can transmit
+ * each segment in time
+ */
+@@ -136,12 +153,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ while (segs) {
+ nskb = segs->next;
+ segs->next = NULL;
+- if (likely(segs->len <= q->max_size)) {
+- qdisc_skb_cb(segs)->pkt_len = segs->len;
+- ret = qdisc_enqueue(segs, q->qdisc);
+- } else {
+- ret = qdisc_reshape_fail(skb, sch);
+- }
++ qdisc_skb_cb(segs)->pkt_len = segs->len;
++ ret = qdisc_enqueue(segs, q->qdisc);
+ if (ret != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
+@@ -163,7 +176,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ int ret;
+
+ if (qdisc_pkt_len(skb) > q->max_size) {
+- if (skb_is_gso(skb))
++ if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
+ return tbf_segment(skb, sch);
+ return qdisc_reshape_fail(skb, sch);
+ }
+@@ -316,6 +329,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+ if (max_size < 0)
+ goto done;
+
++ if (max_size < psched_mtu(qdisc_dev(sch)))
++ pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n",
++ max_size, qdisc_dev(sch)->name,
++ psched_mtu(qdisc_dev(sch)));
++
+ if (q->qdisc != &noop_qdisc) {
+ err = fifo_set_limit(q->qdisc, qopt->limit);
+ if (err)
+diff --git a/net/socket.c b/net/socket.c
+index c226aceee65b..e83c416708af 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -221,12 +221,13 @@ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
+ int err;
+ int len;
+
++ BUG_ON(klen > sizeof(struct sockaddr_storage));
+ err = get_user(len, ulen);
+ if (err)
+ return err;
+ if (len > klen)
+ len = klen;
+- if (len < 0 || len > sizeof(struct sockaddr_storage))
++ if (len < 0)
+ return -EINVAL;
+ if (len) {
+ if (audit_sockaddr(klen, kaddr))
+@@ -1840,8 +1841,10 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ msg.msg_iov = &iov;
+ iov.iov_len = size;
+ iov.iov_base = ubuf;
+- msg.msg_name = (struct sockaddr *)&address;
+- msg.msg_namelen = sizeof(address);
++ /* Save some cycles and don't copy the address if not needed */
++ msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
++ /* We assume all kernel code knows the size of sockaddr_storage */
++ msg.msg_namelen = 0;
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = sock_recvmsg(sock, &msg, size, flags);
+@@ -1970,7 +1973,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+- return -EINVAL;
++ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ return 0;
+ }
+
+@@ -2221,16 +2224,14 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ goto out;
+ }
+
+- /*
+- * Save the user-mode address (verify_iovec will change the
+- * kernel msghdr to use the kernel address space)
++ /* Save the user-mode address (verify_iovec will change the
++ * kernel msghdr to use the kernel address space)
+ */
+-
+ uaddr = (__force void __user *)msg_sys->msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+- if (MSG_CMSG_COMPAT & flags) {
++ if (MSG_CMSG_COMPAT & flags)
+ err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
+- } else
++ else
+ err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
+ if (err < 0)
+ goto out_freeiov;
+@@ -2239,6 +2240,9 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ cmsg_ptr = (unsigned long)msg_sys->msg_control;
+ msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
+
++ /* We assume all kernel code knows the size of sockaddr_storage */
++ msg_sys->msg_namelen = 0;
++
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 6cc7ddd2fb7c..dffdbeac18ca 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -984,9 +984,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
+- /* will be updated in set_orig_addr() if needed */
+- m->msg_namelen = 0;
+-
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+
+@@ -1095,9 +1092,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
+- /* will be updated in set_orig_addr() if needed */
+- m->msg_namelen = 0;
+-
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index c1f403bed683..01625ccc3ae6 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1754,7 +1754,6 @@ static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
+ {
+ struct unix_sock *u = unix_sk(sk);
+
+- msg->msg_namelen = 0;
+ if (u->addr) {
+ msg->msg_namelen = u->addr->len;
+ memcpy(msg->msg_name, u->addr->name, u->addr->len);
+@@ -1778,8 +1777,6 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags&MSG_OOB)
+ goto out;
+
+- msg->msg_namelen = 0;
+-
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err) {
+ err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+@@ -1924,8 +1921,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+
+- msg->msg_namelen = 0;
+-
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 545c08b8a1d4..5adfd94c5b85 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1662,8 +1662,6 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
+ vsk = vsock_sk(sk);
+ err = 0;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+
+ if (sk->sk_state != SS_CONNECTED) {
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index 9d6986634e0b..687360da62d9 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -1746,8 +1746,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
+ if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ /* Retrieve the head sk_buff from the socket's receive queue. */
+ err = 0;
+ skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 45a3ab5612c1..7622789d3750 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1340,10 +1340,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (sx25) {
+ sx25->sx25_family = AF_X25;
+ sx25->sx25_addr = x25->dest_addr;
++ msg->msg_namelen = sizeof(*sx25);
+ }
+
+- msg->msg_namelen = sizeof(struct sockaddr_x25);
+-
+ x25_check_rbuf(sk);
+ rc = copied;
+ out_free_dgram:
diff --git a/1004_linux-3.12.5.patch b/1004_linux-3.12.5.patch
new file mode 100644
index 00000000..95a21bae
--- /dev/null
+++ b/1004_linux-3.12.5.patch
@@ -0,0 +1,2489 @@
+diff --git a/Makefile b/Makefile
+index 3b7165eb4734..986f3cdbad56 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
+index 90ce29dbe119..08a56bcfc724 100644
+--- a/arch/arm/boot/dts/armada-370-db.dts
++++ b/arch/arm/boot/dts/armada-370-db.dts
+@@ -99,22 +99,22 @@
+ spi-max-frequency = <50000000>;
+ };
+ };
++ };
+
+- pcie-controller {
++ pcie-controller {
++ status = "okay";
++ /*
++ * The two PCIe units are accessible through
++ * both standard PCIe slots and mini-PCIe
++ * slots on the board.
++ */
++ pcie@1,0 {
++ /* Port 0, Lane 0 */
++ status = "okay";
++ };
++ pcie@2,0 {
++ /* Port 1, Lane 0 */
+ status = "okay";
+- /*
+- * The two PCIe units are accessible through
+- * both standard PCIe slots and mini-PCIe
+- * slots on the board.
+- */
+- pcie@1,0 {
+- /* Port 0, Lane 0 */
+- status = "okay";
+- };
+- pcie@2,0 {
+- /* Port 1, Lane 0 */
+- status = "okay";
+- };
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
+index 1de2dae0fdae..b97ab017d4a2 100644
+--- a/arch/arm/boot/dts/armada-370-xp.dtsi
++++ b/arch/arm/boot/dts/armada-370-xp.dtsi
+@@ -117,7 +117,7 @@
+
+ coherency-fabric@20200 {
+ compatible = "marvell,coherency-fabric";
+- reg = <0x20200 0xb0>, <0x21810 0x1c>;
++ reg = <0x20200 0xb0>, <0x21010 0x1c>;
+ };
+
+ serial@12000 {
+diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+index 0358a33cba48..9dc7381d57c2 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+@@ -47,7 +47,7 @@
+ /*
+ * MV78230 has 2 PCIe units Gen2.0: One unit can be
+ * configured as x4 or quad x1 lanes. One unit is
+- * x4/x1.
++ * x1 only.
+ */
+ pcie-controller {
+ compatible = "marvell,armada-xp-pcie";
+@@ -61,10 +61,10 @@
+
+ ranges =
+ <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */
+- 0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000 /* Port 2.0 registers */
+ 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */
+ 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
+ 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
++ 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
+ 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
+ 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
+ 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
+@@ -73,8 +73,8 @@
+ 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
+ 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
+ 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
+- 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
+- 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>;
++ 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
++ 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */>;
+
+ pcie@1,0 {
+ device_type = "pci";
+@@ -144,20 +144,20 @@
+ status = "disabled";
+ };
+
+- pcie@9,0 {
++ pcie@5,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
+- reg = <0x4800 0 0 0 0>;
++ assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
++ reg = <0x2800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+- ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
+- 0x81000000 0 0 0x81000000 0x9 0 1 0>;
++ ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
++ 0x81000000 0 0 0x81000000 0x5 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &mpic 99>;
+- marvell,pcie-port = <2>;
++ interrupt-map = <0 0 0 0 &mpic 62>;
++ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <0>;
+- clocks = <&gateclk 26>;
++ clocks = <&gateclk 9>;
+ status = "disabled";
+ };
+ };
+diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+index 0e82c5062243..a598ce9035f5 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+@@ -48,7 +48,7 @@
+ /*
+ * MV78260 has 3 PCIe units Gen2.0: Two units can be
+ * configured as x4 or quad x1 lanes. One unit is
+- * x4/x1.
++ * x4 only.
+ */
+ pcie-controller {
+ compatible = "marvell,armada-xp-pcie";
+@@ -67,7 +67,9 @@
+ 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
+ 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
+ 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
+- 0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000 /* Port 3.0 registers */
++ 0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000 /* Port 1.1 registers */
++ 0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000 /* Port 1.2 registers */
++ 0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000 /* Port 1.3 registers */
+ 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
+ 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
+ 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
+@@ -76,10 +78,18 @@
+ 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
+ 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
+ 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
+- 0x82000000 0x9 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
+- 0x81000000 0x9 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */
+- 0x82000000 0xa 0 MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */
+- 0x81000000 0xa 0 MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO */>;
++
++ 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
++ 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */
++ 0x82000000 0x6 0 MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
++ 0x81000000 0x6 0 MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO */
++ 0x82000000 0x7 0 MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
++ 0x81000000 0x7 0 MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO */
++ 0x82000000 0x8 0 MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
++ 0x81000000 0x8 0 MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO */
++
++ 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
++ 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>;
+
+ pcie@1,0 {
+ device_type = "pci";
+@@ -105,8 +115,8 @@
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+- ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+- 0x81000000 0 0 0x81000000 0x2 0 1 0>;
++ ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
++ 0x81000000 0 0 0x81000000 0x2 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 59>;
+ marvell,pcie-port = <0>;
+@@ -149,37 +159,88 @@
+ status = "disabled";
+ };
+
+- pcie@9,0 {
++ pcie@5,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
+- reg = <0x4800 0 0 0 0>;
++ assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
++ reg = <0x2800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+- ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
+- 0x81000000 0 0 0x81000000 0x9 0 1 0>;
++ ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
++ 0x81000000 0 0 0x81000000 0x5 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &mpic 99>;
+- marvell,pcie-port = <2>;
++ interrupt-map = <0 0 0 0 &mpic 62>;
++ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <0>;
+- clocks = <&gateclk 26>;
++ clocks = <&gateclk 9>;
+ status = "disabled";
+ };
+
+- pcie@10,0 {
++ pcie@6,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x82000 0 0x2000>;
+- reg = <0x5000 0 0 0 0>;
++ assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
++ reg = <0x3000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+- ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0
+- 0x81000000 0 0 0x81000000 0xa 0 1 0>;
++ ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
++ 0x81000000 0 0 0x81000000 0x6 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &mpic 103>;
+- marvell,pcie-port = <3>;
++ interrupt-map = <0 0 0 0 &mpic 63>;
++ marvell,pcie-port = <1>;
++ marvell,pcie-lane = <1>;
++ clocks = <&gateclk 10>;
++ status = "disabled";
++ };
++
++ pcie@7,0 {
++ device_type = "pci";
++ assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
++ reg = <0x3800 0 0 0 0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ #interrupt-cells = <1>;
++ ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0
++ 0x81000000 0 0 0x81000000 0x7 0 1 0>;
++ interrupt-map-mask = <0 0 0 0>;
++ interrupt-map = <0 0 0 0 &mpic 64>;
++ marvell,pcie-port = <1>;
++ marvell,pcie-lane = <2>;
++ clocks = <&gateclk 11>;
++ status = "disabled";
++ };
++
++ pcie@8,0 {
++ device_type = "pci";
++ assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
++ reg = <0x4000 0 0 0 0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ #interrupt-cells = <1>;
++ ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0
++ 0x81000000 0 0 0x81000000 0x8 0 1 0>;
++ interrupt-map-mask = <0 0 0 0>;
++ interrupt-map = <0 0 0 0 &mpic 65>;
++ marvell,pcie-port = <1>;
++ marvell,pcie-lane = <3>;
++ clocks = <&gateclk 12>;
++ status = "disabled";
++ };
++
++ pcie@9,0 {
++ device_type = "pci";
++ assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
++ reg = <0x4800 0 0 0 0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ #interrupt-cells = <1>;
++ ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
++ 0x81000000 0 0 0x81000000 0x9 0 1 0>;
++ interrupt-map-mask = <0 0 0 0>;
++ interrupt-map = <0 0 0 0 &mpic 99>;
++ marvell,pcie-port = <2>;
+ marvell,pcie-lane = <0>;
+- clocks = <&gateclk 27>;
++ clocks = <&gateclk 26>;
+ status = "disabled";
+ };
+ };
+diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
+index 814ab67c8c29..b74879b42515 100644
+--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
++++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
+@@ -239,15 +239,6 @@
+ 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */
+ >;
+ };
+-};
+-
+-&omap4_pmx_wkup {
+- led_wkgpio_pins: pinmux_leds_wkpins {
+- pinctrl-single,pins = <
+- 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */
+- 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
+- >;
+- };
+
+ /*
+ * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP
+@@ -267,7 +258,7 @@
+ pinctrl-single,pins = <
+ 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */
+ 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
+- 0x108 (PIN_OUTPUT | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
++ 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
+ 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
+ 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
+ 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
+@@ -277,6 +268,15 @@
+ };
+ };
+
++&omap4_pmx_wkup {
++ led_wkgpio_pins: pinmux_leds_wkpins {
++ pinctrl-single,pins = <
++ 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */
++ 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
++ >;
++ };
++};
++
+ &i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
+index 119fc378fc52..fe88105a0421 100644
+--- a/arch/arm/configs/multi_v7_defconfig
++++ b/arch/arm/configs/multi_v7_defconfig
+@@ -132,12 +132,14 @@ CONFIG_USB_GPIO_VBUS=y
+ CONFIG_USB_ISP1301=y
+ CONFIG_USB_MXS_PHY=y
+ CONFIG_MMC=y
++CONFIG_MMC_BLOCK_MINORS=16
+ CONFIG_MMC_ARMMMCI=y
+ CONFIG_MMC_SDHCI=y
+ CONFIG_MMC_SDHCI_PLTFM=y
+ CONFIG_MMC_SDHCI_ESDHC_IMX=y
+ CONFIG_MMC_SDHCI_TEGRA=y
+ CONFIG_MMC_SDHCI_SPEAR=y
++CONFIG_MMC_SDHCI_BCM_KONA=y
+ CONFIG_MMC_OMAP=y
+ CONFIG_MMC_OMAP_HS=y
+ CONFIG_EDAC=y
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index be956dbf6bae..1571d126e9dd 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ * mapping to be mapped at. This is particularly important for
+ * non-high vector CPUs.
+ */
+-#define FIRST_USER_ADDRESS PAGE_SIZE
++#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
+
+ /*
+ * Use TASK_SIZE as the ceiling argument for free_pgtables() and
+diff --git a/arch/arm/mach-at91/sama5d3.c b/arch/arm/mach-at91/sama5d3.c
+index 3ea86428ee09..a28873fe3049 100644
+--- a/arch/arm/mach-at91/sama5d3.c
++++ b/arch/arm/mach-at91/sama5d3.c
+@@ -95,19 +95,19 @@ static struct clk twi0_clk = {
+ .name = "twi0_clk",
+ .pid = SAMA5D3_ID_TWI0,
+ .type = CLK_TYPE_PERIPHERAL,
+- .div = AT91_PMC_PCR_DIV2,
++ .div = AT91_PMC_PCR_DIV8,
+ };
+ static struct clk twi1_clk = {
+ .name = "twi1_clk",
+ .pid = SAMA5D3_ID_TWI1,
+ .type = CLK_TYPE_PERIPHERAL,
+- .div = AT91_PMC_PCR_DIV2,
++ .div = AT91_PMC_PCR_DIV8,
+ };
+ static struct clk twi2_clk = {
+ .name = "twi2_clk",
+ .pid = SAMA5D3_ID_TWI2,
+ .type = CLK_TYPE_PERIPHERAL,
+- .div = AT91_PMC_PCR_DIV2,
++ .div = AT91_PMC_PCR_DIV8,
+ };
+ static struct clk mmc0_clk = {
+ .name = "mci0_clk",
+diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
+index 2739ca2c1334..e0091685fd48 100644
+--- a/arch/arm/mach-footbridge/common.c
++++ b/arch/arm/mach-footbridge/common.c
+@@ -15,6 +15,7 @@
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/spinlock.h>
++#include <video/vga.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/page.h>
+@@ -196,6 +197,8 @@ void __init footbridge_map_io(void)
+ iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
+ pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO));
+ }
++
++ vga_base = PCIMEM_BASE;
+ }
+
+ void footbridge_restart(enum reboot_mode mode, const char *cmd)
+diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
+index 3490a24f969e..7c2fdae9a38b 100644
+--- a/arch/arm/mach-footbridge/dc21285.c
++++ b/arch/arm/mach-footbridge/dc21285.c
+@@ -18,7 +18,6 @@
+ #include <linux/irq.h>
+ #include <linux/io.h>
+ #include <linux/spinlock.h>
+-#include <video/vga.h>
+
+ #include <asm/irq.h>
+ #include <asm/mach/pci.h>
+@@ -291,7 +290,6 @@ void __init dc21285_preinit(void)
+ int cfn_mode;
+
+ pcibios_min_mem = 0x81000000;
+- vga_base = PCIMEM_BASE;
+
+ mem_size = (unsigned int)high_memory - PAGE_OFFSET;
+ for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
+diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c
+index b08243500e2e..1a7235fb52ac 100644
+--- a/arch/arm/mach-footbridge/ebsa285.c
++++ b/arch/arm/mach-footbridge/ebsa285.c
+@@ -30,21 +30,24 @@ static const struct {
+ const char *name;
+ const char *trigger;
+ } ebsa285_leds[] = {
+- { "ebsa285:amber", "heartbeat", },
+- { "ebsa285:green", "cpu0", },
++ { "ebsa285:amber", "cpu0", },
++ { "ebsa285:green", "heartbeat", },
+ { "ebsa285:red",},
+ };
+
++static unsigned char hw_led_state;
++
+ static void ebsa285_led_set(struct led_classdev *cdev,
+ enum led_brightness b)
+ {
+ struct ebsa285_led *led = container_of(cdev,
+ struct ebsa285_led, cdev);
+
+- if (b != LED_OFF)
+- *XBUS_LEDS |= led->mask;
++ if (b == LED_OFF)
++ hw_led_state |= led->mask;
+ else
+- *XBUS_LEDS &= ~led->mask;
++ hw_led_state &= ~led->mask;
++ *XBUS_LEDS = hw_led_state;
+ }
+
+ static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
+@@ -52,18 +55,19 @@ static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
+ struct ebsa285_led *led = container_of(cdev,
+ struct ebsa285_led, cdev);
+
+- return (*XBUS_LEDS & led->mask) ? LED_FULL : LED_OFF;
++ return hw_led_state & led->mask ? LED_OFF : LED_FULL;
+ }
+
+ static int __init ebsa285_leds_init(void)
+ {
+ int i;
+
+- if (machine_is_ebsa285())
++ if (!machine_is_ebsa285())
+ return -ENODEV;
+
+- /* 3 LEDS All ON */
+- *XBUS_LEDS |= XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
++ /* 3 LEDS all off */
++ hw_led_state = XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
++ *XBUS_LEDS = hw_led_state;
+
+ for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) {
+ struct ebsa285_led *led;
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 0c6356255fe3..304661d21369 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -146,7 +146,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+- info.low_limit = PAGE_SIZE;
++ info.low_limit = FIRST_USER_ADDRESS;
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
+index 0acb089d0f70..1046b373d1ae 100644
+--- a/arch/arm/mm/pgd.c
++++ b/arch/arm/mm/pgd.c
+@@ -87,7 +87,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ init_pud = pud_offset(init_pgd, 0);
+ init_pmd = pmd_offset(init_pud, 0);
+ init_pte = pte_offset_map(init_pmd, 0);
+- set_pte_ext(new_pte, *init_pte, 0);
++ set_pte_ext(new_pte + 0, init_pte[0], 0);
++ set_pte_ext(new_pte + 1, init_pte[1], 0);
+ pte_unmap(init_pte);
+ pte_unmap(new_pte);
+ }
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 5dfd248e3f1a..0d3a9d4927b5 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -61,8 +61,15 @@ static int get_offset(struct address_space *mapping)
+ return (unsigned long) mapping >> 8;
+ }
+
+-static unsigned long get_shared_area(struct address_space *mapping,
+- unsigned long addr, unsigned long len, unsigned long pgoff)
++static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
++{
++ struct address_space *mapping = filp ? filp->f_mapping : NULL;
++
++ return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
++}
++
++static unsigned long get_shared_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff)
+ {
+ struct vm_unmapped_area_info info;
+
+@@ -71,7 +78,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+ info.low_limit = PAGE_ALIGN(addr);
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & (SHMLBA - 1);
+- info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
++ info.align_offset = shared_align_offset(filp, pgoff);
+ return vm_unmapped_area(&info);
+ }
+
+@@ -82,20 +89,18 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ return -ENOMEM;
+ if (flags & MAP_FIXED) {
+ if ((flags & MAP_SHARED) &&
+- (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
++ (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
+ return -EINVAL;
+ return addr;
+ }
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+- if (filp) {
+- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+- } else if(flags & MAP_SHARED) {
+- addr = get_shared_area(NULL, addr, len, pgoff);
+- } else {
++ if (filp || (flags & MAP_SHARED))
++ addr = get_shared_area(filp, addr, len, pgoff);
++ else
+ addr = get_unshared_area(addr, len);
+- }
++
+ return addr;
+ }
+
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 2e4b5be31a1b..94e20dd2729f 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -55,8 +55,7 @@ struct pcc_param {
+
+ struct s390_xts_ctx {
+ u8 key[32];
+- u8 xts_param[16];
+- struct pcc_param pcc;
++ u8 pcc_key[32];
+ long enc;
+ long dec;
+ int key_len;
+@@ -591,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ xts_ctx->enc = KM_XTS_128_ENCRYPT;
+ xts_ctx->dec = KM_XTS_128_DECRYPT;
+ memcpy(xts_ctx->key + 16, in_key, 16);
+- memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
++ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
+ break;
+ case 48:
+ xts_ctx->enc = 0;
+@@ -602,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ xts_ctx->enc = KM_XTS_256_ENCRYPT;
+ xts_ctx->dec = KM_XTS_256_DECRYPT;
+ memcpy(xts_ctx->key, in_key, 32);
+- memcpy(xts_ctx->pcc.key, in_key + 32, 32);
++ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
+ break;
+ default:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+@@ -621,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+ unsigned int nbytes = walk->nbytes;
+ unsigned int n;
+ u8 *in, *out;
+- void *param;
++ struct pcc_param pcc_param;
++ struct {
++ u8 key[32];
++ u8 init[16];
++ } xts_param;
+
+ if (!nbytes)
+ goto out;
+
+- memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
+- memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
+- memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
+- memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
+- param = xts_ctx->pcc.key + offset;
+- ret = crypt_s390_pcc(func, param);
++ memset(pcc_param.block, 0, sizeof(pcc_param.block));
++ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
++ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
++ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
++ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
++ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
+ if (ret < 0)
+ return -EIO;
+
+- memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
+- param = xts_ctx->key + offset;
++ memcpy(xts_param.key, xts_ctx->key, 32);
++ memcpy(xts_param.init, pcc_param.xts, 16);
+ do {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+
+- ret = crypt_s390_km(func, param, out, in, n);
++ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
+
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 41250fb33985..eda00f9be0cf 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -31,6 +31,9 @@ ifeq ($(CONFIG_X86_32),y)
+
+ KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
+
++ # Don't autogenerate SSE instructions
++ KBUILD_CFLAGS += -mno-sse
++
+ # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
+ # with nonstandard options
+ KBUILD_CFLAGS += -fno-pic
+@@ -57,8 +60,11 @@ else
+ KBUILD_AFLAGS += -m64
+ KBUILD_CFLAGS += -m64
+
++ # Don't autogenerate SSE instructions
++ KBUILD_CFLAGS += -mno-sse
++
+ # Use -mpreferred-stack-boundary=3 if supported.
+- KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
++ KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+
+ # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
+ cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
+diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
+index ae6969a7ffd4..2e34c386d760 100644
+--- a/block/blk-cgroup.h
++++ b/block/blk-cgroup.h
+@@ -430,9 +430,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+ uint64_t v;
+
+ do {
+- start = u64_stats_fetch_begin(&stat->syncp);
++ start = u64_stats_fetch_begin_bh(&stat->syncp);
+ v = stat->cnt;
+- } while (u64_stats_fetch_retry(&stat->syncp, start));
++ } while (u64_stats_fetch_retry_bh(&stat->syncp, start));
+
+ return v;
+ }
+@@ -498,9 +498,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+ struct blkg_rwstat tmp;
+
+ do {
+- start = u64_stats_fetch_begin(&rwstat->syncp);
++ start = u64_stats_fetch_begin_bh(&rwstat->syncp);
+ tmp = *rwstat;
+- } while (u64_stats_fetch_retry(&rwstat->syncp, start));
++ } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
+
+ return tmp;
+ }
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 850246206b12..585c3b279feb 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -117,6 +117,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ sg_init_table(ctx->sgl.sg, 1);
+ sg_set_page(ctx->sgl.sg, page, size, offset);
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index a19c027b29bd..918a3b4148b8 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -381,6 +381,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+diff --git a/crypto/authenc.c b/crypto/authenc.c
+index ffce19de05cf..528b00bc4769 100644
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -368,9 +368,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
+ if (!err) {
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+- struct ablkcipher_request *abreq = aead_request_ctx(areq);
+- u8 *iv = (u8 *)(abreq + 1) +
+- crypto_ablkcipher_reqsize(ctx->enc);
++ struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
++ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
++ + ctx->reqoff);
++ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
+
+ err = crypto_authenc_genicv(areq, iv, 0);
+ }
+diff --git a/crypto/ccm.c b/crypto/ccm.c
+index 499c91717d93..ed009b77e67d 100644
+--- a/crypto/ccm.c
++++ b/crypto/ccm.c
+@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
+ }
+
+ /* compute plaintext into mac */
+- get_data_to_compute(cipher, pctx, plain, cryptlen);
++ if (cryptlen)
++ get_data_to_compute(cipher, pctx, plain, cryptlen);
+
+ out:
+ return err;
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index db6dfcfa3e2e..ab58556d347c 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -3625,6 +3625,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
+ shost->max_lun = 1;
+ shost->max_channel = 1;
+ shost->max_cmd_len = 16;
++ shost->no_write_same = 1;
+
+ /* Schedule policy is determined by ->qc_defer()
+ * callback and it needs to see every deferred qc.
+diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
+index 40cc0cf2ded6..e6939e13e338 100644
+--- a/drivers/char/i8k.c
++++ b/drivers/char/i8k.c
+@@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
+ },
+ },
++ {
++ .ident = "Dell XPS421",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
++ },
++ },
+ { }
+ };
+
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index d75040ddd2b3..22c07fb6ab78 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -448,7 +448,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
+ */
+ void cpuidle_unregister_device(struct cpuidle_device *dev)
+ {
+- if (dev->registered == 0)
++ if (!dev || dev->registered == 0)
+ return;
+
+ cpuidle_pause_and_lock();
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 281029daf98c..b0bb056458a3 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1623,6 +1623,7 @@ static struct scsi_host_template scsi_driver_template = {
+ .cmd_per_lun = 1,
+ .can_queue = 1,
+ .sdev_attrs = sbp2_scsi_sysfs_attrs,
++ .no_write_same = 1,
+ };
+
+ MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
+index 5002d50e3781..743fd426f21b 100644
+--- a/drivers/firmware/efi/efi-pstore.c
++++ b/drivers/firmware/efi/efi-pstore.c
+@@ -18,14 +18,12 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
+ static int efi_pstore_open(struct pstore_info *psi)
+ {
+- efivar_entry_iter_begin();
+ psi->data = NULL;
+ return 0;
+ }
+
+ static int efi_pstore_close(struct pstore_info *psi)
+ {
+- efivar_entry_iter_end();
+ psi->data = NULL;
+ return 0;
+ }
+@@ -39,6 +37,12 @@ struct pstore_read_data {
+ char **buf;
+ };
+
++static inline u64 generic_id(unsigned long timestamp,
++ unsigned int part, int count)
++{
++ return (timestamp * 100 + part) * 1000 + count;
++}
++
+ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+ {
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+@@ -57,7 +61,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+
+ if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
+ cb_data->type, &part, &cnt, &time, &data_type) == 5) {
+- *cb_data->id = part;
++ *cb_data->id = generic_id(time, part, cnt);
+ *cb_data->count = cnt;
+ cb_data->timespec->tv_sec = time;
+ cb_data->timespec->tv_nsec = 0;
+@@ -67,7 +71,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+ *cb_data->compressed = false;
+ } else if (sscanf(name, "dump-type%u-%u-%d-%lu",
+ cb_data->type, &part, &cnt, &time) == 4) {
+- *cb_data->id = part;
++ *cb_data->id = generic_id(time, part, cnt);
+ *cb_data->count = cnt;
+ cb_data->timespec->tv_sec = time;
+ cb_data->timespec->tv_nsec = 0;
+@@ -79,7 +83,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+ * which doesn't support holding
+ * multiple logs, remains.
+ */
+- *cb_data->id = part;
++ *cb_data->id = generic_id(time, part, 0);
+ *cb_data->count = 0;
+ cb_data->timespec->tv_sec = time;
+ cb_data->timespec->tv_nsec = 0;
+@@ -91,19 +95,125 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+ __efivar_entry_get(entry, &entry->var.Attributes,
+ &entry->var.DataSize, entry->var.Data);
+ size = entry->var.DataSize;
++ memcpy(*cb_data->buf, entry->var.Data,
++ (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size));
+
+- *cb_data->buf = kmemdup(entry->var.Data, size, GFP_KERNEL);
+- if (*cb_data->buf == NULL)
+- return -ENOMEM;
+ return size;
+ }
+
++/**
++ * efi_pstore_scan_sysfs_enter
++ * @entry: scanning entry
++ * @next: next entry
++ * @head: list head
++ */
++static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos,
++ struct efivar_entry *next,
++ struct list_head *head)
++{
++ pos->scanning = true;
++ if (&next->list != head)
++ next->scanning = true;
++}
++
++/**
++ * __efi_pstore_scan_sysfs_exit
++ * @entry: deleting entry
++ * @turn_off_scanning: Check if a scanning flag should be turned off
++ */
++static inline void __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
++ bool turn_off_scanning)
++{
++ if (entry->deleting) {
++ list_del(&entry->list);
++ efivar_entry_iter_end();
++ efivar_unregister(entry);
++ efivar_entry_iter_begin();
++ } else if (turn_off_scanning)
++ entry->scanning = false;
++}
++
++/**
++ * efi_pstore_scan_sysfs_exit
++ * @pos: scanning entry
++ * @next: next entry
++ * @head: list head
++ * @stop: a flag checking if scanning will stop
++ */
++static void efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
++ struct efivar_entry *next,
++ struct list_head *head, bool stop)
++{
++ __efi_pstore_scan_sysfs_exit(pos, true);
++ if (stop)
++ __efi_pstore_scan_sysfs_exit(next, &next->list != head);
++}
++
++/**
++ * efi_pstore_sysfs_entry_iter
++ *
++ * @data: function-specific data to pass to callback
++ * @pos: entry to begin iterating from
++ *
++ * You MUST call efivar_enter_iter_begin() before this function, and
++ * efivar_entry_iter_end() afterwards.
++ *
++ * It is possible to begin iteration from an arbitrary entry within
++ * the list by passing @pos. @pos is updated on return to point to
++ * the next entry of the last one passed to efi_pstore_read_func().
++ * To begin iterating from the beginning of the list @pos must be %NULL.
++ */
++static int efi_pstore_sysfs_entry_iter(void *data, struct efivar_entry **pos)
++{
++ struct efivar_entry *entry, *n;
++ struct list_head *head = &efivar_sysfs_list;
++ int size = 0;
++
++ if (!*pos) {
++ list_for_each_entry_safe(entry, n, head, list) {
++ efi_pstore_scan_sysfs_enter(entry, n, head);
++
++ size = efi_pstore_read_func(entry, data);
++ efi_pstore_scan_sysfs_exit(entry, n, head, size < 0);
++ if (size)
++ break;
++ }
++ *pos = n;
++ return size;
++ }
++
++ list_for_each_entry_safe_from((*pos), n, head, list) {
++ efi_pstore_scan_sysfs_enter((*pos), n, head);
++
++ size = efi_pstore_read_func((*pos), data);
++ efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0);
++ if (size)
++ break;
++ }
++ *pos = n;
++ return size;
++}
++
++/**
++ * efi_pstore_read
++ *
++ * This function returns a size of NVRAM entry logged via efi_pstore_write().
++ * The meaning and behavior of efi_pstore/pstore are as below.
++ *
++ * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
++ * and pstore filesystem will continue reading subsequent entries.
++ * size == 0: Entry was not logged via efi_pstore_write(),
++ * and efi_pstore driver will continue reading subsequent entries.
++ * size < 0: Failed to get data of entry logging via efi_pstore_write(),
++ * and pstore will stop reading entry.
++ */
+ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ int *count, struct timespec *timespec,
+ char **buf, bool *compressed,
+ struct pstore_info *psi)
+ {
+ struct pstore_read_data data;
++ ssize_t size;
+
+ data.id = id;
+ data.type = type;
+@@ -112,8 +222,17 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ data.compressed = compressed;
+ data.buf = buf;
+
+- return __efivar_entry_iter(efi_pstore_read_func, &efivar_sysfs_list, &data,
+- (struct efivar_entry **)&psi->data);
++ *data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
++ if (!*data.buf)
++ return -ENOMEM;
++
++ efivar_entry_iter_begin();
++ size = efi_pstore_sysfs_entry_iter(&data,
++ (struct efivar_entry **)&psi->data);
++ efivar_entry_iter_end();
++ if (size <= 0)
++ kfree(*data.buf);
++ return size;
+ }
+
+ static int efi_pstore_write(enum pstore_type_id type,
+@@ -184,9 +303,17 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
+ return 0;
+ }
+
++ if (entry->scanning) {
++ /*
++ * Skip deletion because this entry will be deleted
++ * after scanning is completed.
++ */
++ entry->deleting = true;
++ } else
++ list_del(&entry->list);
++
+ /* found */
+ __efivar_entry_delete(entry);
+- list_del(&entry->list);
+
+ return 1;
+ }
+@@ -199,14 +326,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
+ char name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ int found, i;
++ unsigned int part;
+
+- sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
+- time.tv_sec);
++ do_div(id, 1000);
++ part = do_div(id, 100);
++ sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+- edata.id = id;
++ edata.id = part;
+ edata.type = type;
+ edata.count = count;
+ edata.time = time;
+@@ -214,10 +343,12 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
+
+ efivar_entry_iter_begin();
+ found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry);
+- efivar_entry_iter_end();
+
+- if (found)
++ if (found && !entry->scanning) {
++ efivar_entry_iter_end();
+ efivar_unregister(entry);
++ } else
++ efivar_entry_iter_end();
+
+ return 0;
+ }
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index 8a7432a4b413..8c5a61ae03ea 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -383,12 +383,16 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ else if (__efivar_entry_delete(entry))
+ err = -EIO;
+
+- efivar_entry_iter_end();
+-
+- if (err)
++ if (err) {
++ efivar_entry_iter_end();
+ return err;
++ }
+
+- efivar_unregister(entry);
++ if (!entry->scanning) {
++ efivar_entry_iter_end();
++ efivar_unregister(entry);
++ } else
++ efivar_entry_iter_end();
+
+ /* It's dead Jim.... */
+ return count;
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 391c67b182d9..b22659cccca4 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -683,8 +683,16 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ if (!found)
+ return NULL;
+
+- if (remove)
+- list_del(&entry->list);
++ if (remove) {
++ if (entry->scanning) {
++ /*
++ * The entry will be deleted
++ * after scanning is completed.
++ */
++ entry->deleting = true;
++ } else
++ list_del(&entry->list);
++ }
+
+ return entry;
+ }
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index a0b33a216d4a..2aa3ca215bd6 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -69,10 +69,14 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+ u32 val;
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
++ u32 out_mask, out_shadow;
+
+- val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR);
++ out_mask = in_be32(mm->regs + GPIO_DIR);
+
+- return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio);
++ val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
++ out_shadow = mpc8xxx_gc->data & out_mask;
++
++ return (val | out_shadow) & mpc8xxx_gpio2mask(gpio);
+ }
+
+ static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
+index 38b523a1ece0..a11ff74a5127 100644
+--- a/drivers/input/Kconfig
++++ b/drivers/input/Kconfig
+@@ -80,7 +80,7 @@ config INPUT_MATRIXKMAP
+ comment "Userland interfaces"
+
+ config INPUT_MOUSEDEV
+- tristate "Mouse interface" if EXPERT
++ tristate "Mouse interface"
+ default y
+ help
+ Say Y here if you want your mouse to be accessible as char devices
+diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
+index c1edd39bc5ba..1fa37e6d004e 100644
+--- a/drivers/input/keyboard/Kconfig
++++ b/drivers/input/keyboard/Kconfig
+@@ -2,7 +2,7 @@
+ # Input core configuration
+ #
+ menuconfig INPUT_KEYBOARD
+- bool "Keyboards" if EXPERT || !X86
++ bool "Keyboards"
+ default y
+ help
+ Say Y here, and a list of supported keyboards will be displayed.
+@@ -67,7 +67,7 @@ config KEYBOARD_ATARI
+ module will be called atakbd.
+
+ config KEYBOARD_ATKBD
+- tristate "AT keyboard" if EXPERT || !X86
++ tristate "AT keyboard"
+ default y
+ select SERIO
+ select SERIO_LIBPS2
+diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
+index 33b3e88fe4a2..f6578647e550 100644
+--- a/drivers/input/serio/Kconfig
++++ b/drivers/input/serio/Kconfig
+@@ -2,7 +2,7 @@
+ # Input core configuration
+ #
+ config SERIO
+- tristate "Serial I/O support" if EXPERT || !X86
++ tristate "Serial I/O support"
+ default y
+ help
+ Say Yes here if you have any input device that uses serial I/O to
+@@ -19,7 +19,7 @@ config SERIO
+ if SERIO
+
+ config SERIO_I8042
+- tristate "i8042 PC Keyboard controller" if EXPERT || !X86
++ tristate "i8042 PC Keyboard controller"
+ default y
+ depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
+ (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
+@@ -170,7 +170,7 @@ config SERIO_MACEPS2
+ module will be called maceps2.
+
+ config SERIO_LIBPS2
+- tristate "PS/2 driver library" if EXPERT
++ tristate "PS/2 driver library"
+ depends on SERIO_I8042 || SERIO_I8042=n
+ help
+ Say Y here if you are using a driver for device connected
+diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
+index 0e8df41aaf14..2cf2bbc0b927 100644
+--- a/drivers/misc/enclosure.c
++++ b/drivers/misc/enclosure.c
+@@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev)
+ {
+ char name[ENCLOSURE_NAME_SIZE];
+
++ /*
++ * In odd circumstances, like multipath devices, something else may
++ * already have removed the links, so check for this condition first.
++ */
++ if (!cdev->dev->kobj.sd)
++ return;
++
+ enclosure_link_name(cdev, name);
+ sysfs_remove_link(&cdev->dev->kobj, name);
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 6a203b6e8346..66f411a6e8ea 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -109,8 +109,12 @@
+ #define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
+ #define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
+
+-#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
++#define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */
++#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */
+ #define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
++#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
++
++#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
+ /*
+ * MEI HW Section
+ */
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 1b3844e82379..e637318b79ba 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -76,8 +76,11 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
+- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)},
+
+ /* required last entry */
+ {0, }
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index e3fc07cf2f62..e59c42b446a9 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
+ return 0;
+ }
+
+-static int c_can_get_berr_counter(const struct net_device *dev,
+- struct can_berr_counter *bec)
++static int __c_can_get_berr_counter(const struct net_device *dev,
++ struct can_berr_counter *bec)
+ {
+ unsigned int reg_err_counter;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+- c_can_pm_runtime_get_sync(priv);
+-
+ reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
+ bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
+ ERR_CNT_REC_SHIFT;
+ bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+
++ return 0;
++}
++
++static int c_can_get_berr_counter(const struct net_device *dev,
++ struct can_berr_counter *bec)
++{
++ struct c_can_priv *priv = netdev_priv(dev);
++ int err;
++
++ c_can_pm_runtime_get_sync(priv);
++ err = __c_can_get_berr_counter(dev, bec);
+ c_can_pm_runtime_put_sync(priv);
+
+- return 0;
++ return err;
+ }
+
+ /*
+@@ -872,7 +881,7 @@ static int c_can_handle_state_change(struct net_device *dev,
+ if (unlikely(!skb))
+ return 0;
+
+- c_can_get_berr_counter(dev, &bec);
++ __c_can_get_berr_counter(dev, &bec);
+ reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
+ rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
+ ERR_CNT_RP_SHIFT;
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 8f5ce747feb5..c1c3b132fed5 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev, "no ipg clock defined\n");
+ return PTR_ERR(clk_ipg);
+ }
+- clock_freq = clk_get_rate(clk_ipg);
+
+ clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(clk_per)) {
+ dev_err(&pdev->dev, "no per clock defined\n");
+ return PTR_ERR(clk_per);
+ }
++ clock_freq = clk_get_rate(clk_per);
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 7164a999f50f..f17c3018b7c7 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ uint8_t isrc, status;
+ int n = 0;
+
+- /* Shared interrupts and IRQ off? */
+- if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
+- return IRQ_NONE;
+-
+ if (priv->pre_irq)
+ priv->pre_irq(priv);
+
++ /* Shared interrupts and IRQ off? */
++ if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
++ goto out;
++
+ while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
+ (n < SJA1000_MAX_IRQ)) {
+- n++;
++
+ status = priv->read_reg(priv, SJA1000_SR);
+ /* check for absent controller due to hw unplug */
+ if (status == 0xFF && sja1000_is_absent(priv))
+- return IRQ_NONE;
++ goto out;
+
+ if (isrc & IRQ_WUI)
+ netdev_warn(dev, "wakeup interrupt\n");
+@@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ status = priv->read_reg(priv, SJA1000_SR);
+ /* check for absent controller */
+ if (status == 0xFF && sja1000_is_absent(priv))
+- return IRQ_NONE;
++ goto out;
+ }
+ }
+ if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
+@@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ if (sja1000_err(dev, isrc, status))
+ break;
+ }
++ n++;
+ }
+-
++out:
+ if (priv->post_irq)
+ priv->post_irq(priv);
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 12d961c4ebca..cd76d2a6e014 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6848,12 +6848,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+ pci_unmap_single(tp->pdev, dma_addr, skb_size,
+ PCI_DMA_FROMDEVICE);
+
+- skb = build_skb(data, frag_size);
+- if (!skb) {
+- tg3_frag_free(frag_size != 0, data);
+- goto drop_it_no_recycle;
+- }
+- skb_reserve(skb, TG3_RX_OFFSET(tp));
+ /* Ensure that the update to the data happens
+ * after the usage of the old DMA mapping.
+ */
+@@ -6861,6 +6855,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+
+ ri->data = NULL;
+
++ skb = build_skb(data, frag_size);
++ if (!skb) {
++ tg3_frag_free(frag_size != 0, data);
++ goto drop_it_no_recycle;
++ }
++ skb_reserve(skb, TG3_RX_OFFSET(tp));
+ } else {
+ tg3_recycle_rx(tnapi, tpr, opaque_key,
+ desc_idx, *post_ptr);
+diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
+index da442b81370a..1fef5240e6ad 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
+@@ -433,27 +433,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
++ txq_id = info->hw_queue;
++
+ if (is_agg)
+ txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
+ else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ /*
+- * Send this frame after DTIM -- there's a special queue
+- * reserved for this for contexts that support AP mode.
+- */
+- txq_id = ctx->mcast_queue;
+-
+- /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+- } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+- txq_id = IWL_AUX_QUEUE;
+- else
+- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
++ }
+
+- WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
+ WARN_ON_ONCE(is_agg &&
+ priv->queue_to_mac80211[txq_id] != info->hw_queue);
+
+diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
+index a39ee38a9414..2bd5c5f7db08 100644
+--- a/drivers/pnp/driver.c
++++ b/drivers/pnp/driver.c
+@@ -197,6 +197,11 @@ static int pnp_bus_freeze(struct device *dev)
+ return __pnp_bus_suspend(dev, PMSG_FREEZE);
+ }
+
++static int pnp_bus_poweroff(struct device *dev)
++{
++ return __pnp_bus_suspend(dev, PMSG_HIBERNATE);
++}
++
+ static int pnp_bus_resume(struct device *dev)
+ {
+ struct pnp_dev *pnp_dev = to_pnp_dev(dev);
+@@ -234,9 +239,14 @@ static int pnp_bus_resume(struct device *dev)
+ }
+
+ static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
++ /* Suspend callbacks */
+ .suspend = pnp_bus_suspend,
+- .freeze = pnp_bus_freeze,
+ .resume = pnp_bus_resume,
++ /* Hibernate callbacks */
++ .freeze = pnp_bus_freeze,
++ .thaw = pnp_bus_resume,
++ .poweroff = pnp_bus_poweroff,
++ .restore = pnp_bus_resume,
+ };
+
+ struct bus_type pnp_bus_type = {
+diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
+index 5e1e12c0cf42..0a7325361d29 100644
+--- a/drivers/scsi/3w-9xxx.c
++++ b/drivers/scsi/3w-9xxx.c
+@@ -2025,7 +2025,8 @@ static struct scsi_host_template driver_template = {
+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = twa_host_attrs,
+- .emulated = 1
++ .emulated = 1,
++ .no_write_same = 1,
+ };
+
+ /* This function will probe and initialize a card */
+diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
+index c845bdbeb6c0..4de346017e9f 100644
+--- a/drivers/scsi/3w-sas.c
++++ b/drivers/scsi/3w-sas.c
+@@ -1600,7 +1600,8 @@ static struct scsi_host_template driver_template = {
+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = twl_host_attrs,
+- .emulated = 1
++ .emulated = 1,
++ .no_write_same = 1,
+ };
+
+ /* This function will probe and initialize a card */
+diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
+index b9276d10b25c..752624e6bc00 100644
+--- a/drivers/scsi/3w-xxxx.c
++++ b/drivers/scsi/3w-xxxx.c
+@@ -2279,7 +2279,8 @@ static struct scsi_host_template driver_template = {
+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = tw_host_attrs,
+- .emulated = 1
++ .emulated = 1,
++ .no_write_same = 1,
+ };
+
+ /* This function will probe and initialize a card */
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index f0d432c139d0..4921ed19a027 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -1081,6 +1081,7 @@ static struct scsi_host_template aac_driver_template = {
+ #endif
+ .use_clustering = ENABLE_CLUSTERING,
+ .emulated = 1,
++ .no_write_same = 1,
+ };
+
+ static void __aac_shutdown(struct aac_dev * aac)
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 33c52bc2c7b4..278c9fa62067 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -137,6 +137,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
+ .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = arcmsr_host_attrs,
++ .no_write_same = 1,
+ };
+ static struct pci_device_id arcmsr_device_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
+diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
+index 94d5d0102f7d..42bcb970445a 100644
+--- a/drivers/scsi/bfa/bfa_fcs.h
++++ b/drivers/scsi/bfa/bfa_fcs.h
+@@ -296,6 +296,7 @@ wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
+ struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
+ u16 vf_id, wwn_t lpwwn);
+
++void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname);
+ void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
+ struct bfa_lport_info_s *port_info);
+ void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
+diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
+index 2f61a5af3658..f5e4e61a0fd7 100644
+--- a/drivers/scsi/bfa/bfa_fcs_lport.c
++++ b/drivers/scsi/bfa/bfa_fcs_lport.c
+@@ -1097,6 +1097,17 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
+ bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
+ }
+
++void
++bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port,
++ char *symname)
++{
++ strcpy(port->port_cfg.sym_name.symname, symname);
++
++ if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
++ bfa_fcs_lport_ns_util_send_rspn_id(
++ BFA_FCS_GET_NS_FROM_PORT(port), NULL);
++}
++
+ /*
+ * fcs_lport_api
+ */
+@@ -5140,9 +5151,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
+ u8 *psymbl = &symbl[0];
+ int len;
+
+- if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
+- return;
+-
+ /* Avoid sending RSPN in the following states. */
+ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
+diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
+index e9a681d31223..40be670a1cbc 100644
+--- a/drivers/scsi/bfa/bfad_attr.c
++++ b/drivers/scsi/bfa/bfad_attr.c
+@@ -593,11 +593,8 @@ bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
+ return;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+- if (strlen(sym_name) > 0) {
+- strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name);
+- bfa_fcs_lport_ns_util_send_rspn_id(
+- BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL);
+- }
++ if (strlen(sym_name) > 0)
++ bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
+index 6d55b4e7e792..aec3d4da276f 100644
+--- a/drivers/scsi/gdth.c
++++ b/drivers/scsi/gdth.c
+@@ -4686,6 +4686,7 @@ static struct scsi_host_template gdth_template = {
+ .cmd_per_lun = GDTH_MAXC_P_L,
+ .unchecked_isa_dma = 1,
+ .use_clustering = ENABLE_CLUSTERING,
++ .no_write_same = 1,
+ };
+
+ #ifdef CONFIG_ISA
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index df0c3c71ea43..3cafe0d784b8 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -388,6 +388,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+ shost->unchecked_isa_dma = sht->unchecked_isa_dma;
+ shost->use_clustering = sht->use_clustering;
+ shost->ordered_tag = sht->ordered_tag;
++ shost->no_write_same = sht->no_write_same;
+
+ if (sht->supported_mode == MODE_UNKNOWN)
+ /* means we didn't set it ... default to INITIATOR */
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 891c86b66253..0eb09403680c 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -562,6 +562,7 @@ static struct scsi_host_template hpsa_driver_template = {
+ .sdev_attrs = hpsa_sdev_attrs,
+ .shost_attrs = hpsa_shost_attrs,
+ .max_sectors = 8192,
++ .no_write_same = 1,
+ };
+
+
+@@ -1289,7 +1290,7 @@ static void complete_scsi_command(struct CommandList *cp)
+ "has check condition: aborted command: "
+ "ASC: 0x%x, ASCQ: 0x%x\n",
+ cp, asc, ascq);
+- cmd->result = DID_SOFT_ERROR << 16;
++ cmd->result |= DID_SOFT_ERROR << 16;
+ break;
+ }
+ /* Must be some other type of check condition */
+@@ -4926,7 +4927,7 @@ reinit_after_soft_reset:
+ hpsa_hba_inquiry(h);
+ hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
+ start_controller_lockup_detector(h);
+- return 1;
++ return 0;
+
+ clean4:
+ hpsa_free_sg_chain_blocks(h);
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 36ac1c34ce97..573f4128b6b6 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -6305,7 +6305,8 @@ static struct scsi_host_template driver_template = {
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = ipr_ioa_attrs,
+ .sdev_attrs = ipr_dev_attrs,
+- .proc_name = IPR_NAME
++ .proc_name = IPR_NAME,
++ .no_write_same = 1,
+ };
+
+ /**
+diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
+index 8d5ea8a1e5a6..52a216f21ae5 100644
+--- a/drivers/scsi/ips.c
++++ b/drivers/scsi/ips.c
+@@ -374,6 +374,7 @@ static struct scsi_host_template ips_driver_template = {
+ .sg_tablesize = IPS_MAX_SG,
+ .cmd_per_lun = 3,
+ .use_clustering = ENABLE_CLUSTERING,
++ .no_write_same = 1,
+ };
+
+
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 161c98efade9..d2895836f9fa 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ qc->tf.nsect = 0;
+ }
+
+- ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
++ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
+ task->uldd_task = qc;
+ if (ata_is_atapi(qc->tf.protocol)) {
+ memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
+diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
+index 90c95a3385d1..816db12ef5d5 100644
+--- a/drivers/scsi/megaraid.c
++++ b/drivers/scsi/megaraid.c
+@@ -4244,6 +4244,7 @@ static struct scsi_host_template megaraid_template = {
+ .eh_device_reset_handler = megaraid_reset,
+ .eh_bus_reset_handler = megaraid_reset,
+ .eh_host_reset_handler = megaraid_reset,
++ .no_write_same = 1,
+ };
+
+ static int
+diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
+index 515c9629e9fe..8844d5c179af 100644
+--- a/drivers/scsi/megaraid/megaraid_mbox.c
++++ b/drivers/scsi/megaraid/megaraid_mbox.c
+@@ -367,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = {
+ .eh_host_reset_handler = megaraid_reset_handler,
+ .change_queue_depth = megaraid_change_queue_depth,
+ .use_clustering = ENABLE_CLUSTERING,
++ .no_write_same = 1,
+ .sdev_attrs = megaraid_sdev_attrs,
+ .shost_attrs = megaraid_shost_attrs,
+ };
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 3020921a4746..a59a5526a318 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -2148,6 +2148,7 @@ static struct scsi_host_template megasas_template = {
+ .bios_param = megasas_bios_param,
+ .use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = megasas_change_queue_depth,
++ .no_write_same = 1,
+ };
+
+ /**
+diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
+index 1eb7b0280a45..a38f71bbe7b6 100644
+--- a/drivers/scsi/pmcraid.c
++++ b/drivers/scsi/pmcraid.c
+@@ -4314,6 +4314,7 @@ static struct scsi_host_template pmcraid_host_template = {
+ .this_id = -1,
+ .sg_tablesize = PMCRAID_MAX_IOADLS,
+ .max_sectors = PMCRAID_IOA_MAX_SECTORS,
++ .no_write_same = 1,
+ .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = pmcraid_host_attrs,
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 5693f6d7eddb..2634d691ec17 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2638,6 +2638,12 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+ {
+ struct scsi_device *sdev = sdkp->device;
+
++ if (sdev->host->no_write_same) {
++ sdev->no_write_same = 1;
++
++ return;
++ }
++
+ if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
+ sdev->no_report_opcodes = 1;
+
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 1a28f5632797..17d740427240 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1697,6 +1697,7 @@ static struct scsi_host_template scsi_driver = {
+ .use_clustering = DISABLE_CLUSTERING,
+ /* Make sure we dont get a sg segment crosses a page boundary */
+ .dma_boundary = PAGE_SIZE-1,
++ .no_write_same = 1,
+ };
+
+ enum {
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index c1a50674c1e3..28361f7783cd 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1077,6 +1077,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "INT33C0", 0 },
+ { "INT33C1", 0 },
++ { "INT3430", 0 },
++ { "INT3431", 0 },
+ { "80860F0E", 0 },
+ { },
+ };
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index ff582933e94c..4d6f430087d0 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -810,7 +810,8 @@ static void process_echoes(struct tty_struct *tty)
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t echoed;
+
+- if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_tail)
++ if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
++ ldata->echo_commit == ldata->echo_tail)
+ return;
+
+ mutex_lock(&ldata->output_lock);
+@@ -825,7 +826,8 @@ static void flush_echoes(struct tty_struct *tty)
+ {
+ struct n_tty_data *ldata = tty->disc_data;
+
+- if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_head)
++ if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
++ ldata->echo_commit == ldata->echo_head)
+ return;
+
+ mutex_lock(&ldata->output_lock);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 3e7560f004f8..e8404319ca68 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1515,6 +1515,8 @@ static int acm_reset_resume(struct usb_interface *intf)
+
+ static const struct usb_device_id acm_ids[] = {
+ /* quirky and broken devices */
++ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
++ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
+ { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b21d553c245b..dccb4db98ea1 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -2115,6 +2115,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
+ termios->c_cflag |= CRTSCTS;
+ }
+
++ /*
++ * All FTDI UART chips are limited to CS7/8. We won't pretend to
++ * support CS5/6 and revert the CSIZE setting instead.
++ */
++ if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
++ dev_warn(ddev, "requested CSIZE setting not supported\n");
++
++ termios->c_cflag &= ~CSIZE;
++ if (old_termios)
++ termios->c_cflag |= old_termios->c_cflag & CSIZE;
++ else
++ termios->c_cflag |= CS8;
++ }
++
+ cflag = termios->c_cflag;
+
+ if (!old_termios)
+@@ -2151,19 +2165,16 @@ no_skip:
+ } else {
+ urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
+ }
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS7:
+- urb_value |= 7;
+- dev_dbg(ddev, "Setting CS7\n");
+- break;
+- case CS8:
+- urb_value |= 8;
+- dev_dbg(ddev, "Setting CS8\n");
+- break;
+- default:
+- dev_err(ddev, "CSIZE was set but not CS7-CS8\n");
+- }
++ switch (cflag & CSIZE) {
++ case CS7:
++ urb_value |= 7;
++ dev_dbg(ddev, "Setting CS7\n");
++ break;
++ default:
++ case CS8:
++ urb_value |= 8;
++ dev_dbg(ddev, "Setting CS8\n");
++ break;
+ }
+
+ /* This is needed by the break command since it uses the same command
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index e5bdd987b9e8..a69da83604c0 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1813,25 +1813,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
+ iflag = tty->termios.c_iflag;
+
+ /* Change the number of bits */
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS5:
+- lData = LCR_BITS_5;
+- break;
++ switch (cflag & CSIZE) {
++ case CS5:
++ lData = LCR_BITS_5;
++ break;
+
+- case CS6:
+- lData = LCR_BITS_6;
+- break;
++ case CS6:
++ lData = LCR_BITS_6;
++ break;
+
+- case CS7:
+- lData = LCR_BITS_7;
+- break;
+- default:
+- case CS8:
+- lData = LCR_BITS_8;
+- break;
+- }
++ case CS7:
++ lData = LCR_BITS_7;
++ break;
++
++ default:
++ case CS8:
++ lData = LCR_BITS_8;
++ break;
+ }
++
+ /* Change the Parity bit */
+ if (cflag & PARENB) {
+ if (cflag & PARODD) {
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 1e6de4cd079d..1e3318dfa1cb 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -361,23 +361,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ 0, 0, buf, 7, 100);
+ dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
+
+- if (C_CSIZE(tty)) {
+- switch (C_CSIZE(tty)) {
+- case CS5:
+- buf[6] = 5;
+- break;
+- case CS6:
+- buf[6] = 6;
+- break;
+- case CS7:
+- buf[6] = 7;
+- break;
+- default:
+- case CS8:
+- buf[6] = 8;
+- }
+- dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
++ switch (C_CSIZE(tty)) {
++ case CS5:
++ buf[6] = 5;
++ break;
++ case CS6:
++ buf[6] = 6;
++ break;
++ case CS7:
++ buf[6] = 7;
++ break;
++ default:
++ case CS8:
++ buf[6] = 8;
+ }
++ dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
+
+ /* For reference buf[0]:buf[3] baud rate value */
+ pl2303_encode_baudrate(tty, port, &buf[0]);
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index 4abac28b5992..5b793c352267 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -348,22 +348,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+ }
+
+ /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS5:
+- buf[1] |= SET_UART_FORMAT_SIZE_5;
+- break;
+- case CS6:
+- buf[1] |= SET_UART_FORMAT_SIZE_6;
+- break;
+- case CS7:
+- buf[1] |= SET_UART_FORMAT_SIZE_7;
+- break;
+- default:
+- case CS8:
+- buf[1] |= SET_UART_FORMAT_SIZE_8;
+- break;
+- }
++ switch (cflag & CSIZE) {
++ case CS5:
++ buf[1] |= SET_UART_FORMAT_SIZE_5;
++ break;
++ case CS6:
++ buf[1] |= SET_UART_FORMAT_SIZE_6;
++ break;
++ case CS7:
++ buf[1] |= SET_UART_FORMAT_SIZE_7;
++ break;
++ default:
++ case CS8:
++ buf[1] |= SET_UART_FORMAT_SIZE_8;
++ break;
+ }
+
+ /* Set Stop bit2 : 0:1bit 1:2bit */
+diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+index e6d56f714ae4..d94f35dbd536 100644
+--- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
++++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+@@ -526,6 +526,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
++ mutex_lock(&ddata->mutex);
++
+ dev_dbg(&ddata->spi->dev, "%s\n", __func__);
+
+ in->ops.sdi->set_timings(in, &ddata->videomode);
+@@ -614,10 +616,7 @@ static int acx565akm_enable(struct omap_dss_device *dssdev)
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+- mutex_lock(&ddata->mutex);
+ r = acx565akm_panel_power_on(dssdev);
+- mutex_unlock(&ddata->mutex);
+-
+ if (r)
+ return r;
+
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index c4d2298893b1..255e8287d99a 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -921,9 +921,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+ ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
++ out:
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
+
+@@ -954,9 +955,10 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+ ret = m2p_remove_override(pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
++ out:
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3b115653d422..9be8021c70d8 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -4938,11 +4938,17 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+
+ trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
+ switch (task->tk_status) {
+- case -NFS4ERR_STALE_STATEID:
+- case -NFS4ERR_EXPIRED:
+ case 0:
+ renew_lease(data->res.server, data->timestamp);
+ break;
++ case -NFS4ERR_ADMIN_REVOKED:
++ case -NFS4ERR_DELEG_REVOKED:
++ case -NFS4ERR_BAD_STATEID:
++ case -NFS4ERR_OLD_STATEID:
++ case -NFS4ERR_STALE_STATEID:
++ case -NFS4ERR_EXPIRED:
++ task->tk_status = 0;
++ break;
+ default:
+ if (nfs4_async_handle_error(task, data->res.server, NULL) ==
+ -EAGAIN) {
+diff --git a/fs/pipe.c b/fs/pipe.c
+index d2c45e14e6d8..0e0752ef2715 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -726,11 +726,25 @@ pipe_poll(struct file *filp, poll_table *wait)
+ return mask;
+ }
+
++static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
++{
++ int kill = 0;
++
++ spin_lock(&inode->i_lock);
++ if (!--pipe->files) {
++ inode->i_pipe = NULL;
++ kill = 1;
++ }
++ spin_unlock(&inode->i_lock);
++
++ if (kill)
++ free_pipe_info(pipe);
++}
++
+ static int
+ pipe_release(struct inode *inode, struct file *file)
+ {
+- struct pipe_inode_info *pipe = inode->i_pipe;
+- int kill = 0;
++ struct pipe_inode_info *pipe = file->private_data;
+
+ __pipe_lock(pipe);
+ if (file->f_mode & FMODE_READ)
+@@ -743,17 +757,9 @@ pipe_release(struct inode *inode, struct file *file)
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ }
+- spin_lock(&inode->i_lock);
+- if (!--pipe->files) {
+- inode->i_pipe = NULL;
+- kill = 1;
+- }
+- spin_unlock(&inode->i_lock);
+ __pipe_unlock(pipe);
+
+- if (kill)
+- free_pipe_info(pipe);
+-
++ put_pipe_info(inode, pipe);
+ return 0;
+ }
+
+@@ -1014,7 +1020,6 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ {
+ struct pipe_inode_info *pipe;
+ bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
+- int kill = 0;
+ int ret;
+
+ filp->f_version = 0;
+@@ -1130,15 +1135,9 @@ err_wr:
+ goto err;
+
+ err:
+- spin_lock(&inode->i_lock);
+- if (!--pipe->files) {
+- inode->i_pipe = NULL;
+- kill = 1;
+- }
+- spin_unlock(&inode->i_lock);
+ __pipe_unlock(pipe);
+- if (kill)
+- free_pipe_info(pipe);
++
++ put_pipe_info(inode, pipe);
+ return ret;
+ }
+
+diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
+index 13621cc8cf4c..6a626a507b8c 100644
+--- a/include/crypto/scatterwalk.h
++++ b/include/crypto/scatterwalk.h
+@@ -36,6 +36,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
+ {
+ sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
+ sg1[num - 1].page_link &= ~0x02;
++ sg1[num - 1].page_link |= 0x01;
+ }
+
+ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
+@@ -43,7 +44,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
+ if (sg_is_last(sg))
+ return NULL;
+
+- return (++sg)->length ? sg : (void *)sg_page(sg);
++ return (++sg)->length ? sg : sg_chain_ptr(sg);
+ }
+
+ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 5f8f176154f7..094ddd0f5d1c 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -782,6 +782,8 @@ struct efivar_entry {
+ struct efi_variable var;
+ struct list_head list;
+ struct kobject kobj;
++ bool scanning;
++ bool deleting;
+ };
+
+ extern struct list_head efivar_sysfs_list;
+@@ -840,6 +842,8 @@ void efivar_run_worker(void);
+ #if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
+ int efivars_sysfs_init(void);
+
++#define EFIVARS_DATA_SIZE_MAX 1024
++
+ #endif /* CONFIG_EFI_VARS */
+
+ #endif /* _LINUX_EFI_H */
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index 755243572219..50769a72166b 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -475,6 +475,9 @@ struct scsi_host_template {
+ */
+ unsigned ordered_tag:1;
+
++ /* True if the controller does not support WRITE SAME */
++ unsigned no_write_same:1;
++
+ /*
+ * Countdown for host blocking with no commands outstanding.
+ */
+@@ -674,6 +677,9 @@ struct Scsi_Host {
+ /* Don't resume host in EH */
+ unsigned eh_noresume:1;
+
++ /* The controller does not support WRITE SAME */
++ unsigned no_write_same:1;
++
+ /*
+ * Optional work queue to be utilized by the transport
+ */
+diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
+index 27a72d5d4b00..9e600b418467 100644
+--- a/include/sound/soc-dapm.h
++++ b/include/sound/soc-dapm.h
+@@ -104,7 +104,8 @@ struct device;
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1}
+ #define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
+-{ .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, \
++{ .id = snd_soc_dapm_mux, .name = wname, \
++ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1}
+ #define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
+ { .id = snd_soc_dapm_virt_mux, .name = wname, \
+diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
+index cb228bf21760..abcd6ca86cb7 100644
+--- a/kernel/irq/pm.c
++++ b/kernel/irq/pm.c
+@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early)
+ bool is_early = desc->action &&
+ desc->action->flags & IRQF_EARLY_RESUME;
+
+- if (is_early != want_early)
++ if (!is_early && want_early)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 947ba25a95a0..5cf6c7097a71 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
+ tk->xtime_nsec -= remainder;
+ tk->xtime_nsec += 1ULL << tk->shift;
+ tk->ntp_error += remainder << tk->ntp_error_shift;
+-
++ tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
+ }
+ #else
+ #define old_vsyscall_fixup(tk)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5e2c2f1a075d..f60b1eec3f87 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1075,6 +1075,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ if (!up->pending) {
+ struct msghdr msg = { .msg_flags = flags|MSG_MORE };
+
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index a52d2a1a5e83..f684a4f8c797 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -345,6 +345,9 @@ static int patch_ad1986a(struct hda_codec *codec)
+ */
+ spec->gen.multiout.no_share_stream = 1;
+
++ /* AD1986A can't manage the dynamic pin on/off smoothly */
++ spec->gen.auto_mute_via_amp = 1;
++
+ snd_hda_pick_fixup(codec, ad1986a_fixup_models, ad1986a_fixup_tbl,
+ ad1986a_fixups);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+@@ -961,6 +964,7 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook;
++ spec->gen.own_eapd_ctl = 1;
+ snd_hda_sequence_write_cache(codec, gpio_init_verbs);
+ break;
+ case HDA_FIXUP_ACT_PROBE:
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f0f54829ecee..21b948af6ed0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1769,6 +1769,7 @@ enum {
+ ALC889_FIXUP_DAC_ROUTE,
+ ALC889_FIXUP_MBP_VREF,
+ ALC889_FIXUP_IMAC91_VREF,
++ ALC889_FIXUP_MBA21_VREF,
+ ALC882_FIXUP_INV_DMIC,
+ ALC882_FIXUP_NO_PRIMARY_HP,
+ ALC887_FIXUP_ASUS_BASS,
+@@ -1872,17 +1873,13 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
+ }
+ }
+
+-/* Set VREF on speaker pins on imac91 */
+-static void alc889_fixup_imac91_vref(struct hda_codec *codec,
+- const struct hda_fixup *fix, int action)
++static void alc889_fixup_mac_pins(struct hda_codec *codec,
++ const hda_nid_t *nids, int num_nids)
+ {
+ struct alc_spec *spec = codec->spec;
+- static hda_nid_t nids[2] = { 0x18, 0x1a };
+ int i;
+
+- if (action != HDA_FIXUP_ACT_INIT)
+- return;
+- for (i = 0; i < ARRAY_SIZE(nids); i++) {
++ for (i = 0; i < num_nids; i++) {
+ unsigned int val;
+ val = snd_hda_codec_get_pin_target(codec, nids[i]);
+ val |= AC_PINCTL_VREF_50;
+@@ -1891,6 +1888,26 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec,
+ spec->gen.keep_vref_in_automute = 1;
+ }
+
++/* Set VREF on speaker pins on imac91 */
++static void alc889_fixup_imac91_vref(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ static hda_nid_t nids[2] = { 0x18, 0x1a };
++
++ if (action == HDA_FIXUP_ACT_INIT)
++ alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
++}
++
++/* Set VREF on speaker pins on mba21 */
++static void alc889_fixup_mba21_vref(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ static hda_nid_t nids[2] = { 0x18, 0x19 };
++
++ if (action == HDA_FIXUP_ACT_INIT)
++ alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
++}
++
+ /* Don't take HP output as primary
+ * Strangely, the speaker output doesn't work on Vaio Z and some Vaio
+ * all-in-one desktop PCs (for example VGC-LN51JGB) through DAC 0x05
+@@ -2087,6 +2104,12 @@ static const struct hda_fixup alc882_fixups[] = {
+ .chained = true,
+ .chain_id = ALC882_FIXUP_GPIO1,
+ },
++ [ALC889_FIXUP_MBA21_VREF] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc889_fixup_mba21_vref,
++ .chained = true,
++ .chain_id = ALC889_FIXUP_MBP_VREF,
++ },
+ [ALC882_FIXUP_INV_DMIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+@@ -2151,7 +2174,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF),
+- SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBP_VREF),
++ SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBA21_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_FIXUP_MACPRO_GPIO),
+@@ -3231,6 +3254,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ alc_write_coef_idx(codec, 0x18, 0x7388);
+ break;
+ case 0x10ec0668:
++ alc_write_coef_idx(codec, 0x11, 0x0001);
+ alc_write_coef_idx(codec, 0x15, 0x0d60);
+ alc_write_coef_idx(codec, 0xc3, 0x0000);
+ break;
+@@ -3253,6 +3277,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ alc_write_coef_idx(codec, 0x18, 0x7388);
+ break;
+ case 0x10ec0668:
++ alc_write_coef_idx(codec, 0x11, 0x0001);
+ alc_write_coef_idx(codec, 0x15, 0x0d50);
+ alc_write_coef_idx(codec, 0xc3, 0x0000);
+ break;
+@@ -3982,6 +4007,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
++ SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
+ SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -4266,6 +4292,7 @@ enum {
+ ALC861_FIXUP_AMP_VREF_0F,
+ ALC861_FIXUP_NO_JACK_DETECT,
+ ALC861_FIXUP_ASUS_A6RP,
++ ALC660_FIXUP_ASUS_W7J,
+ };
+
+ /* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
+@@ -4315,10 +4342,22 @@ static const struct hda_fixup alc861_fixups[] = {
+ .v.func = alc861_fixup_asus_amp_vref_0f,
+ .chained = true,
+ .chain_id = ALC861_FIXUP_NO_JACK_DETECT,
++ },
++ [ALC660_FIXUP_ASUS_W7J] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* ASUS W7J needs a magic pin setup on unused NID 0x10
++ * for enabling outputs
++ */
++ {0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24},
++ { }
++ },
+ }
+ };
+
+ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
++ SND_PCI_QUIRK(0x1043, 0x1253, "ASUS W7J", ALC660_FIXUP_ASUS_W7J),
++ SND_PCI_QUIRK(0x1043, 0x1263, "ASUS Z35HL", ALC660_FIXUP_ASUS_W7J),
+ SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
+ SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
+ SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
+diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
+index 456bb8c6d759..bc7472c968e3 100644
+--- a/sound/soc/codecs/wm8731.c
++++ b/sound/soc/codecs/wm8731.c
+@@ -447,10 +447,10 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ iface |= 0x0001;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+- iface |= 0x0003;
++ iface |= 0x0013;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+- iface |= 0x0013;
++ iface |= 0x0003;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
+index 253c88bb7a4c..4f05fb88bddf 100644
+--- a/sound/soc/codecs/wm8990.c
++++ b/sound/soc/codecs/wm8990.c
+@@ -1259,6 +1259,8 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
+
+ /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
+ snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
++
++ codec->cache_sync = 1;
+ break;
+ }
+
diff --git a/1005_linux-3.12.6.patch b/1005_linux-3.12.6.patch
new file mode 100644
index 00000000..d2aa621d
--- /dev/null
+++ b/1005_linux-3.12.6.patch
@@ -0,0 +1,4485 @@
+diff --git a/Makefile b/Makefile
+index 986f3cdbad56..2b23383311ff 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
+index f244f5f02365..9d1bfe445c3c 100644
+--- a/arch/arm/boot/dts/sun6i-a31.dtsi
++++ b/arch/arm/boot/dts/sun6i-a31.dtsi
+@@ -193,7 +193,10 @@
+ pio: pinctrl@01c20800 {
+ compatible = "allwinner,sun6i-a31-pinctrl";
+ reg = <0x01c20800 0x400>;
+- interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>;
++ interrupts = <0 11 4>,
++ <0 15 4>,
++ <0 16 4>,
++ <0 17 4>;
+ clocks = <&apb1_gates 5>;
+ gpio-controller;
+ interrupt-controller;
+@@ -212,11 +215,11 @@
+ timer@01c20c00 {
+ compatible = "allwinner,sun4i-timer";
+ reg = <0x01c20c00 0xa0>;
+- interrupts = <0 18 1>,
+- <0 19 1>,
+- <0 20 1>,
+- <0 21 1>,
+- <0 22 1>;
++ interrupts = <0 18 4>,
++ <0 19 4>,
++ <0 20 4>,
++ <0 21 4>,
++ <0 22 4>;
+ clocks = <&osc24M>;
+ };
+
+@@ -228,7 +231,7 @@
+ uart0: serial@01c28000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28000 0x400>;
+- interrupts = <0 0 1>;
++ interrupts = <0 0 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb2_gates 16>;
+@@ -238,7 +241,7 @@
+ uart1: serial@01c28400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28400 0x400>;
+- interrupts = <0 1 1>;
++ interrupts = <0 1 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb2_gates 17>;
+@@ -248,7 +251,7 @@
+ uart2: serial@01c28800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28800 0x400>;
+- interrupts = <0 2 1>;
++ interrupts = <0 2 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb2_gates 18>;
+@@ -258,7 +261,7 @@
+ uart3: serial@01c28c00 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28c00 0x400>;
+- interrupts = <0 3 1>;
++ interrupts = <0 3 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb2_gates 19>;
+@@ -268,7 +271,7 @@
+ uart4: serial@01c29000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c29000 0x400>;
+- interrupts = <0 4 1>;
++ interrupts = <0 4 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb2_gates 20>;
+@@ -278,7 +281,7 @@
+ uart5: serial@01c29400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c29400 0x400>;
+- interrupts = <0 5 1>;
++ interrupts = <0 5 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb2_gates 21>;
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 94f6b05f9e24..92f7b15dd221 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ struct stackframe frame;
++ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
+ frame.sp = thread_saved_sp(p);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(p);
++ stack_page = (unsigned long)task_stack_page(p);
+ do {
+- int ret = unwind_frame(&frame);
+- if (ret < 0)
++ if (frame.sp < stack_page ||
++ frame.sp >= stack_page + THREAD_SIZE ||
++ unwind_frame(&frame) < 0)
+ return 0;
+ if (!in_sched_functions(frame.pc))
+ return frame.pc;
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
+index 00f79e59985b..af4e8c8a5422 100644
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
+ high = ALIGN(low, THREAD_SIZE);
+
+ /* check current frame pointer is within bounds */
+- if (fp < (low + 12) || fp + 4 >= high)
++ if (fp < low + 12 || fp > high - 4)
+ return -EINVAL;
+
+ /* restore the registers from the stack frame */
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 8fcda140358d..65ed63f68ef8 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -503,9 +503,10 @@ static inline int
+ __do_cache_op(unsigned long start, unsigned long end)
+ {
+ int ret;
+- unsigned long chunk = PAGE_SIZE;
+
+ do {
++ unsigned long chunk = min(PAGE_SIZE, end - start);
++
+ if (signal_pending(current)) {
+ struct thread_info *ti = current_thread_info();
+
+diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
+index 8e63ccdb0de3..8e44973b0139 100644
+--- a/arch/arm/mach-highbank/highbank.c
++++ b/arch/arm/mach-highbank/highbank.c
+@@ -17,12 +17,15 @@
+ #include <linux/clkdev.h>
+ #include <linux/clocksource.h>
+ #include <linux/dma-mapping.h>
++#include <linux/input.h>
+ #include <linux/io.h>
+ #include <linux/irqchip.h>
++#include <linux/mailbox.h>
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+ #include <linux/of_address.h>
++#include <linux/reboot.h>
+ #include <linux/amba/bus.h>
+ #include <linux/clk-provider.h>
+
+@@ -153,6 +156,24 @@ static struct notifier_block highbank_platform_nb = {
+ .notifier_call = highbank_platform_notifier,
+ };
+
++static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data)
++{
++ u32 key = *(u32 *)data;
++
++ if (event != 0x1000)
++ return 0;
++
++ if (key == KEY_POWER)
++ orderly_poweroff(false);
++ else if (key == 0xffff)
++ ctrl_alt_del();
++
++ return 0;
++}
++static struct notifier_block hb_keys_nb = {
++ .notifier_call = hb_keys_notifier,
++};
++
+ static void __init highbank_init(void)
+ {
+ pm_power_off = highbank_power_off;
+@@ -161,6 +182,8 @@ static void __init highbank_init(void)
+ bus_register_notifier(&platform_bus_type, &highbank_platform_nb);
+ bus_register_notifier(&amba_bustype, &highbank_amba_nb);
+
++ pl320_ipc_register_notifier(&hb_keys_nb);
++
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 3d5db8c83b3c..832adb1a6dd2 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
+ }
+
+ /**
+- * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v
++ * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
+ * @oh: struct omap_hwmod *
+ * @v: pointer to register contents to modify
+ *
+@@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
+ }
+
+ /**
++ * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
++ * @oh: struct omap_hwmod *
++ * @v: pointer to register contents to modify
++ *
++ * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon
++ * error or 0 upon success.
++ */
++static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
++{
++ u32 softrst_mask;
++
++ if (!oh->class->sysc ||
++ !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
++ return -EINVAL;
++
++ if (!oh->class->sysc->sysc_fields) {
++ WARN(1,
++ "omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
++ oh->name);
++ return -EINVAL;
++ }
++
++ softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
++
++ *v &= ~softrst_mask;
++
++ return 0;
++}
++
++/**
+ * _wait_softreset_complete - wait for an OCP softreset to complete
+ * @oh: struct omap_hwmod * to wait on
+ *
+@@ -1911,6 +1941,12 @@ static int _ocp_softreset(struct omap_hwmod *oh)
+ ret = _set_softreset(oh, &v);
+ if (ret)
+ goto dis_opt_clks;
++
++ _write_sysconfig(v, oh);
++ ret = _clear_softreset(oh, &v);
++ if (ret)
++ goto dis_opt_clks;
++
+ _write_sysconfig(v, oh);
+
+ if (oh->class->sysc->srst_udelay)
+@@ -3159,6 +3195,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
+ goto error;
+ _write_sysconfig(v, oh);
+
++ ret = _clear_softreset(oh, &v);
++ if (ret)
++ goto error;
++ _write_sysconfig(v, oh);
++
+ error:
+ return ret;
+ }
+diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+index 0c3a427da544..f234cbec0cb9 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
+- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
++ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
++ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+@@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
+ * hence HWMOD_SWSUP_MSTANDBY
+ */
+
+- /*
+- * During system boot; If the hwmod framework resets the module
+- * the module will have smart idle settings; which can lead to deadlock
+- * (above Errata Id:i660); so, dont reset the module during boot;
+- * Use HWMOD_INIT_NO_RESET.
+- */
+-
+- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
+- HWMOD_INIT_NO_RESET,
++ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+ };
+
+ /*
+diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
+index 0d5dd646f61f..263b15249b5b 100644
+--- a/arch/arm/mach-pxa/reset.c
++++ b/arch/arm/mach-pxa/reset.c
+@@ -13,6 +13,7 @@
+
+ #include <mach/regs-ost.h>
+ #include <mach/reset.h>
++#include <mach/smemc.h>
+
+ unsigned int reset_status;
+ EXPORT_SYMBOL(reset_status);
+@@ -81,6 +82,12 @@ static void do_hw_reset(void)
+ writel_relaxed(OSSR_M3, OSSR);
+ /* ... in 100 ms */
+ writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
++ /*
++ * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
++ * we put SDRAM into self-refresh to prevent that
++ */
++ while (1)
++ writel_relaxed(MDREFR_SLFRSH, MDREFR);
+ }
+
+ void pxa_restart(enum reboot_mode mode, const char *cmd)
+@@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd)
+ break;
+ }
+ }
+-
+diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
+index 0206b915a6f6..ef5557b807ed 100644
+--- a/arch/arm/mach-pxa/tosa.c
++++ b/arch/arm/mach-pxa/tosa.c
+@@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = {
+ * Tosa Keyboard
+ */
+ static const uint32_t tosakbd_keymap[] = {
+- KEY(0, 2, KEY_W),
+- KEY(0, 6, KEY_K),
+- KEY(0, 7, KEY_BACKSPACE),
+- KEY(0, 8, KEY_P),
+- KEY(1, 1, KEY_Q),
+- KEY(1, 2, KEY_E),
+- KEY(1, 3, KEY_T),
+- KEY(1, 4, KEY_Y),
+- KEY(1, 6, KEY_O),
+- KEY(1, 7, KEY_I),
+- KEY(1, 8, KEY_COMMA),
+- KEY(2, 1, KEY_A),
+- KEY(2, 2, KEY_D),
+- KEY(2, 3, KEY_G),
+- KEY(2, 4, KEY_U),
+- KEY(2, 6, KEY_L),
+- KEY(2, 7, KEY_ENTER),
+- KEY(2, 8, KEY_DOT),
+- KEY(3, 1, KEY_Z),
+- KEY(3, 2, KEY_C),
+- KEY(3, 3, KEY_V),
+- KEY(3, 4, KEY_J),
+- KEY(3, 5, TOSA_KEY_ADDRESSBOOK),
+- KEY(3, 6, TOSA_KEY_CANCEL),
+- KEY(3, 7, TOSA_KEY_CENTER),
+- KEY(3, 8, TOSA_KEY_OK),
+- KEY(3, 9, KEY_LEFTSHIFT),
+- KEY(4, 1, KEY_S),
+- KEY(4, 2, KEY_R),
+- KEY(4, 3, KEY_B),
+- KEY(4, 4, KEY_N),
+- KEY(4, 5, TOSA_KEY_CALENDAR),
+- KEY(4, 6, TOSA_KEY_HOMEPAGE),
+- KEY(4, 7, KEY_LEFTCTRL),
+- KEY(4, 8, TOSA_KEY_LIGHT),
+- KEY(4, 10, KEY_RIGHTSHIFT),
+- KEY(5, 1, KEY_TAB),
+- KEY(5, 2, KEY_SLASH),
+- KEY(5, 3, KEY_H),
+- KEY(5, 4, KEY_M),
+- KEY(5, 5, TOSA_KEY_MENU),
+- KEY(5, 7, KEY_UP),
+- KEY(5, 11, TOSA_KEY_FN),
+- KEY(6, 1, KEY_X),
+- KEY(6, 2, KEY_F),
+- KEY(6, 3, KEY_SPACE),
+- KEY(6, 4, KEY_APOSTROPHE),
+- KEY(6, 5, TOSA_KEY_MAIL),
+- KEY(6, 6, KEY_LEFT),
+- KEY(6, 7, KEY_DOWN),
+- KEY(6, 8, KEY_RIGHT),
++ KEY(0, 1, KEY_W),
++ KEY(0, 5, KEY_K),
++ KEY(0, 6, KEY_BACKSPACE),
++ KEY(0, 7, KEY_P),
++ KEY(1, 0, KEY_Q),
++ KEY(1, 1, KEY_E),
++ KEY(1, 2, KEY_T),
++ KEY(1, 3, KEY_Y),
++ KEY(1, 5, KEY_O),
++ KEY(1, 6, KEY_I),
++ KEY(1, 7, KEY_COMMA),
++ KEY(2, 0, KEY_A),
++ KEY(2, 1, KEY_D),
++ KEY(2, 2, KEY_G),
++ KEY(2, 3, KEY_U),
++ KEY(2, 5, KEY_L),
++ KEY(2, 6, KEY_ENTER),
++ KEY(2, 7, KEY_DOT),
++ KEY(3, 0, KEY_Z),
++ KEY(3, 1, KEY_C),
++ KEY(3, 2, KEY_V),
++ KEY(3, 3, KEY_J),
++ KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
++ KEY(3, 5, TOSA_KEY_CANCEL),
++ KEY(3, 6, TOSA_KEY_CENTER),
++ KEY(3, 7, TOSA_KEY_OK),
++ KEY(3, 8, KEY_LEFTSHIFT),
++ KEY(4, 0, KEY_S),
++ KEY(4, 1, KEY_R),
++ KEY(4, 2, KEY_B),
++ KEY(4, 3, KEY_N),
++ KEY(4, 4, TOSA_KEY_CALENDAR),
++ KEY(4, 5, TOSA_KEY_HOMEPAGE),
++ KEY(4, 6, KEY_LEFTCTRL),
++ KEY(4, 7, TOSA_KEY_LIGHT),
++ KEY(4, 9, KEY_RIGHTSHIFT),
++ KEY(5, 0, KEY_TAB),
++ KEY(5, 1, KEY_SLASH),
++ KEY(5, 2, KEY_H),
++ KEY(5, 3, KEY_M),
++ KEY(5, 4, TOSA_KEY_MENU),
++ KEY(5, 6, KEY_UP),
++ KEY(5, 10, TOSA_KEY_FN),
++ KEY(6, 0, KEY_X),
++ KEY(6, 1, KEY_F),
++ KEY(6, 2, KEY_SPACE),
++ KEY(6, 3, KEY_APOSTROPHE),
++ KEY(6, 4, TOSA_KEY_MAIL),
++ KEY(6, 5, KEY_LEFT),
++ KEY(6, 6, KEY_DOWN),
++ KEY(6, 7, KEY_RIGHT),
+ };
+
+ static struct matrix_keymap_data tosakbd_keymap_data = {
+diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
+index d57e66845c86..2e9d83673ef6 100644
+--- a/arch/arm64/include/asm/pgtable-hwdef.h
++++ b/arch/arm64/include/asm/pgtable-hwdef.h
+@@ -43,7 +43,7 @@
+ * Section
+ */
+ #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+-#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2)
++#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
+ #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+ #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
+ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
+diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
+index 27b2386f738a..842846c1b711 100644
+--- a/arch/powerpc/include/asm/pgalloc-32.h
++++ b/arch/powerpc/include/asm/pgalloc-32.h
+@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+ {
+- struct page *page = page_address(table);
+-
+ tlb_flush_pgtable(tlb, address);
+- pgtable_page_dtor(page);
+- pgtable_free_tlb(tlb, page, 0);
++ pgtable_page_dtor(table);
++ pgtable_free_tlb(tlb, page_address(table), 0);
+ }
+ #endif /* _ASM_POWERPC_PGALLOC_32_H */
+diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
+index f65e27b09bd3..256d6f8a26a8 100644
+--- a/arch/powerpc/include/asm/pgalloc-64.h
++++ b/arch/powerpc/include/asm/pgalloc-64.h
+@@ -144,11 +144,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+ {
+- struct page *page = page_address(table);
+-
+ tlb_flush_pgtable(tlb, address);
+- pgtable_page_dtor(page);
+- pgtable_free_tlb(tlb, page, 0);
++ pgtable_page_dtor(table);
++ pgtable_free_tlb(tlb, page_address(table), 0);
+ }
+
+ #else /* if CONFIG_PPC_64K_PAGES */
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index eda00f9be0cf..57d021507120 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -31,8 +31,8 @@ ifeq ($(CONFIG_X86_32),y)
+
+ KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
+
+- # Don't autogenerate SSE instructions
+- KBUILD_CFLAGS += -mno-sse
++ # Don't autogenerate MMX or SSE instructions
++ KBUILD_CFLAGS += -mno-mmx -mno-sse
+
+ # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
+ # with nonstandard options
+@@ -60,8 +60,8 @@ else
+ KBUILD_AFLAGS += -m64
+ KBUILD_CFLAGS += -m64
+
+- # Don't autogenerate SSE instructions
+- KBUILD_CFLAGS += -mno-sse
++ # Don't autogenerate MMX or SSE instructions
++ KBUILD_CFLAGS += -mno-mmx -mno-sse
+
+ # Use -mpreferred-stack-boundary=3 if supported.
+ KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 379814bc41e3..6cf0111783d3 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
+
+ # How to compile the 16-bit code. Note we always compile for -march=i386,
+ # that way we can complain to the user if the CPU is insufficient.
+-KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
++KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
+ -DDISABLE_BRANCH_PROFILING \
+ -Wall -Wstrict-prototypes \
+ -march=i386 -mregparm=3 \
+ -include $(srctree)/$(src)/code16gcc.h \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
++ -mno-mmx -mno-sse \
+ $(call cc-option, -ffreestanding) \
+ $(call cc-option, -fno-toplevel-reorder,\
+- $(call cc-option, -fno-unit-at-a-time)) \
++ $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+-KBUILD_CFLAGS += $(call cc-option, -m32)
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index dcd90df10ab4..c8a6792e7842 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+ cflags-$(CONFIG_X86_32) := -march=i386
+ cflags-$(CONFIG_X86_64) := -mcmodel=small
+ KBUILD_CFLAGS += $(cflags-y)
++KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 5439117d5c4c..dec48bfaddb8 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
+ return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
+ }
+
++#define KVM_X2APIC_CID_BITS 0
++
+ static void recalculate_apic_map(struct kvm *kvm)
+ {
+ struct kvm_apic_map *new, *old = NULL;
+@@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm)
+ if (apic_x2apic_mode(apic)) {
+ new->ldr_bits = 32;
+ new->cid_shift = 16;
+- new->cid_mask = new->lid_mask = 0xffff;
++ new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
++ new->lid_mask = 0xffff;
+ } else if (kvm_apic_sw_enabled(apic) &&
+ !new->cid_mask /* flat mode */ &&
+ kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
+@@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
+ ASSERT(apic != NULL);
+
+ /* if initial count is 0, current count should also be 0 */
+- if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
++ if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
++ apic->lapic_timer.period == 0)
+ return 0;
+
+ remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
+@@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
+ {
+ u32 data;
+- void *vapic;
+
+ if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
+ apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
+@@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
+ if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
+ return;
+
+- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
+- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
+- kunmap_atomic(vapic);
++ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++ sizeof(u32));
+
+ apic_set_tpr(vcpu->arch.apic, data & 0xff);
+ }
+@@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+ u32 data, tpr;
+ int max_irr, max_isr;
+ struct kvm_lapic *apic = vcpu->arch.apic;
+- void *vapic;
+
+ apic_sync_pv_eoi_to_guest(vcpu, apic);
+
+@@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+ max_isr = 0;
+ data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
+
+- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
+- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
+- kunmap_atomic(vapic);
++ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++ sizeof(u32));
+ }
+
+-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+ {
+- vcpu->arch.apic->vapic_addr = vapic_addr;
+- if (vapic_addr)
++ if (vapic_addr) {
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.apic->vapic_cache,
++ vapic_addr, sizeof(u32)))
++ return -EINVAL;
+ __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+- else
++ } else {
+ __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
++ }
++
++ vcpu->arch.apic->vapic_addr = vapic_addr;
++ return 0;
+ }
+
+ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index c730ac9fe801..c8b0d0d2da5c 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -34,7 +34,7 @@ struct kvm_lapic {
+ */
+ void *regs;
+ gpa_t vapic_addr;
+- struct page *vapic_page;
++ struct gfn_to_hva_cache vapic_cache;
+ unsigned long pending_events;
+ unsigned int sipi_vector;
+ };
+@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
+ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
+ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
+
+-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
+ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e5ca72a5cdb6..eb9b9c9fc3d9 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3192,8 +3192,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ r = -EFAULT;
+ if (copy_from_user(&va, argp, sizeof va))
+ goto out;
+- r = 0;
+- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ break;
+ }
+ case KVM_X86_SETUP_MCE: {
+@@ -5718,36 +5717,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
+ !kvm_event_needs_reinjection(vcpu);
+ }
+
+-static int vapic_enter(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_lapic *apic = vcpu->arch.apic;
+- struct page *page;
+-
+- if (!apic || !apic->vapic_addr)
+- return 0;
+-
+- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+- if (is_error_page(page))
+- return -EFAULT;
+-
+- vcpu->arch.apic->vapic_page = page;
+- return 0;
+-}
+-
+-static void vapic_exit(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_lapic *apic = vcpu->arch.apic;
+- int idx;
+-
+- if (!apic || !apic->vapic_addr)
+- return;
+-
+- idx = srcu_read_lock(&vcpu->kvm->srcu);
+- kvm_release_page_dirty(apic->vapic_page);
+- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+-}
+-
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+ {
+ int max_irr, tpr;
+@@ -6047,11 +6016,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
+ struct kvm *kvm = vcpu->kvm;
+
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+- r = vapic_enter(vcpu);
+- if (r) {
+- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+- return r;
+- }
+
+ r = 1;
+ while (r > 0) {
+@@ -6110,8 +6074,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
+
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+
+- vapic_exit(vcpu);
+-
+ return r;
+ }
+
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index c7e22ab29a5a..220fa52b9bd0 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -768,13 +768,6 @@ void __init efi_init(void)
+
+ set_bit(EFI_MEMMAP, &x86_efi_facility);
+
+-#ifdef CONFIG_X86_32
+- if (efi_is_native()) {
+- x86_platform.get_wallclock = efi_get_time;
+- x86_platform.set_wallclock = efi_set_rtc_mmss;
+- }
+-#endif
+-
+ #if EFI_DEBUG
+ print_efi_memmap();
+ #endif
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 0f92173a12b6..efe4d7220397 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+ unsigned long status;
+
+ bcp = &per_cpu(bau_control, cpu);
+- stat = bcp->statp;
+- stat->s_enters++;
+
+ if (bcp->nobau)
+ return cpumask;
+
++ stat = bcp->statp;
++ stat->s_enters++;
++
+ if (bcp->busy) {
+ descriptor_status =
+ read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
+diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
+index 88692871823f..9cac82588cbc 100644
+--- a/arch/x86/realmode/rm/Makefile
++++ b/arch/x86/realmode/rm/Makefile
+@@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
+ -march=i386 -mregparm=3 \
+ -include $(srctree)/$(src)/../../boot/code16gcc.h \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
++ -mno-mmx -mno-sse \
+ $(call cc-option, -ffreestanding) \
+ $(call cc-option, -fno-toplevel-reorder,\
+- $(call cc-option, -fno-unit-at-a-time)) \
++ $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 585c3b279feb..850246206b12 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -117,9 +117,6 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
+- if (flags & MSG_SENDPAGE_NOTLAST)
+- flags |= MSG_MORE;
+-
+ lock_sock(sk);
+ sg_init_table(ctx->sgl.sg, 1);
+ sg_set_page(ctx->sgl.sg, page, size, offset);
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 918a3b4148b8..a19c027b29bd 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -381,9 +381,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
+- if (flags & MSG_SENDPAGE_NOTLAST)
+- flags |= MSG_MORE;
+-
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 333aa1bca13d..f535670b42d1 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1429,6 +1429,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+ POSTING_READ(DPLL(pipe));
+ }
+
++static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
++{
++ u32 val = 0;
++
++ /* Make sure the pipe isn't still relying on us */
++ assert_pipe_disabled(dev_priv, pipe);
++
++ /* Leave integrated clock source enabled */
++ if (pipe == PIPE_B)
++ val = DPLL_INTEGRATED_CRI_CLK_VLV;
++ I915_WRITE(DPLL(pipe), val);
++ POSTING_READ(DPLL(pipe));
++}
++
+ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
+ {
+ u32 port_mask;
+@@ -3824,7 +3838,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+- i9xx_disable_pll(dev_priv, pipe);
++ if (IS_VALLEYVIEW(dev))
++ vlv_disable_pll(dev_priv, pipe);
++ else
++ i9xx_disable_pll(dev_priv, pipe);
+
+ intel_crtc->active = false;
+ intel_update_fbc(dev);
+@@ -4553,9 +4570,9 @@ static void vlv_update_pll(struct intel_crtc *crtc)
+ /* Enable DPIO clock input */
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+- if (pipe)
++ /* We should never disable this, set it here for state tracking */
++ if (pipe == PIPE_B)
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+-
+ dpll |= DPLL_VCO_ENABLE;
+ crtc->config.dpll_hw_state.dpll = dpll;
+
+@@ -5015,6 +5032,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
+ I915_READ(LVDS) & LVDS_BORDER_ENABLE;
+ }
+
++static void vlv_crtc_clock_get(struct intel_crtc *crtc,
++ struct intel_crtc_config *pipe_config)
++{
++ struct drm_device *dev = crtc->base.dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int pipe = pipe_config->cpu_transcoder;
++ intel_clock_t clock;
++ u32 mdiv;
++ int refclk = 100000;
++
++ mutex_lock(&dev_priv->dpio_lock);
++ mdiv = vlv_dpio_read(dev_priv, DPIO_DIV(pipe));
++ mutex_unlock(&dev_priv->dpio_lock);
++
++ clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
++ clock.m2 = mdiv & DPIO_M2DIV_MASK;
++ clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
++ clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
++ clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
++
++ clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
++ clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
++
++ pipe_config->adjusted_mode.clock = clock.dot / 10;
++}
++
+ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+ {
+@@ -5546,7 +5589,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
+ uint16_t postoff = 0;
+
+ if (intel_crtc->config.limited_color_range)
+- postoff = (16 * (1 << 13) / 255) & 0x1fff;
++ postoff = (16 * (1 << 12) / 255) & 0x1fff;
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+@@ -6062,7 +6105,7 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+
+ /* Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine! */
+- dev_priv->uncore.funcs.force_wake_get(dev_priv);
++ gen6_gt_force_wake_get(dev_priv);
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+@@ -6093,7 +6136,7 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+ DRM_ERROR("Switching back to LCPLL failed\n");
+ }
+
+- dev_priv->uncore.funcs.force_wake_put(dev_priv);
++ gen6_gt_force_wake_put(dev_priv);
+ }
+
+ void hsw_enable_pc8_work(struct work_struct *__work)
+@@ -9832,7 +9875,7 @@ static void intel_init_display(struct drm_device *dev)
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+- dev_priv->display.get_clock = i9xx_crtc_clock_get;
++ dev_priv->display.get_clock = vlv_crtc_clock_get;
+ dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = valleyview_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+@@ -10088,12 +10131,19 @@ static void i915_disable_vga(struct drm_device *dev)
+
+ void intel_modeset_init_hw(struct drm_device *dev)
+ {
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
+ intel_init_power_well(dev);
+
+ intel_prepare_ddi(dev);
+
+ intel_init_clock_gating(dev);
+
++ /* Enable the CRI clock source so we can get at the display */
++ if (IS_VALLEYVIEW(dev))
++ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
++ DPLL_INTEGRATED_CRI_CLK_VLV);
++
+ mutex_lock(&dev->struct_mutex);
+ intel_enable_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
+index 0652ee0a2098..f685035dbe39 100644
+--- a/drivers/gpu/drm/radeon/atombios_i2c.c
++++ b/drivers/gpu/drm/radeon/atombios_i2c.c
+@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
+ PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+ unsigned char *base;
+- u16 out;
++ u16 out = cpu_to_le16(0);
+
+ memset(&args, 0, sizeof(args));
+
+@@ -55,11 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
+ DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
+ return -EINVAL;
+ }
+- args.ucRegIndex = buf[0];
+- if (num > 1) {
++ if (buf == NULL)
++ args.ucRegIndex = 0;
++ else
++ args.ucRegIndex = buf[0];
++ if (num)
+ num--;
++ if (num)
+ memcpy(&out, &buf[1], num);
+- }
+ args.lpI2CDataOut = cpu_to_le16(out);
+ } else {
+ if (num > ATOM_MAX_HW_I2C_READ) {
+@@ -96,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct i2c_msg *p;
+ int i, remaining, current_count, buffer_offset, max_bytes, ret;
+- u8 buf = 0, flags;
++ u8 flags;
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ ret = radeon_process_i2c_ch(i2c,
+ p->addr, HW_I2C_WRITE,
+- &buf, 1);
++ NULL, 0);
+ if (ret)
+ return ret;
+ else
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index 615c5b290e78..7f3b0d9aaada 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+ WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
+- } else if (ASIC_IS_DCE3(rdev)) {
++ } else {
+ /* according to the reg specs, this should DCE3.2 only, but in
+- * practice it seems to cover DCE3.0/3.1 as well.
++ * practice it seems to cover DCE2.0/3.0/3.1 as well.
+ */
+ if (dig->dig_encoder == 0) {
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
+- } else {
+- /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
+- WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
+- AUDIO_DTO_MODULE(clock / 10));
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index f79ee184ffd5..5c39bf7c3d88 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
+ mpll_param->dll_speed = args.ucDllSpeed;
+ mpll_param->bwcntl = args.ucBWCntl;
+ mpll_param->vco_mode =
+- (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0;
++ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
+ mpll_param->yclk_sel =
+ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
+ mpll_param->qdr =
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index d96f7cbca0a1..fe0ec2cb2084 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -3887,8 +3887,15 @@ static int si_mc_init(struct radeon_device *rdev)
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ /* size in MB on si */
+- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
++ tmp = RREG32(CONFIG_MEMSIZE);
++ /* some boards may have garbage in the upper 16 bits */
++ if (tmp & 0xffff0000) {
++ DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
++ if (tmp & 0xffff)
++ tmp &= 0xffff;
++ }
++ rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
++ rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ si_vram_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
+index 8bf646183bac..f738800c70cf 100644
+--- a/drivers/gpu/drm/udl/udl_gem.c
++++ b/drivers/gpu/drm/udl/udl_gem.c
+@@ -132,6 +132,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
+
+ static void udl_gem_put_pages(struct udl_gem_object *obj)
+ {
++ if (obj->base.import_attach) {
++ drm_free_large(obj->pages);
++ obj->pages = NULL;
++ return;
++ }
++
+ drm_gem_put_pages(&obj->base, obj->pages, false, false);
+ obj->pages = NULL;
+ }
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index c08b5c14fece..aedfe50d557a 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1725,6 +1725,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 9480b425b254..aeeea796f595 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -489,6 +489,7 @@
+ #define USB_VENDOR_ID_KYE 0x0458
+ #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
+ #define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
++#define USB_DEVICE_ID_GENIUS_MANTICORE 0x0153
+ #define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
+ #define USB_DEVICE_ID_KYE_GPEN_560 0x5003
+ #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
+diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
+index 73845120295e..d645caa690dd 100644
+--- a/drivers/hid/hid-kye.c
++++ b/drivers/hid/hid-kye.c
+@@ -342,6 +342,10 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
+ "Genius Gx Imperator Keyboard");
+ break;
++ case USB_DEVICE_ID_GENIUS_MANTICORE:
++ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
++ "Genius Manticore Keyboard");
++ break;
+ }
+ return rdesc;
+ }
+@@ -439,6 +443,8 @@ static const struct hid_device_id kye_devices[] = {
+ USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
++ USB_DEVICE_ID_GENIUS_MANTICORE) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, kye_devices);
+diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
+index 2dc37c7c6947..7d68a08baaa8 100644
+--- a/drivers/hwmon/hih6130.c
++++ b/drivers/hwmon/hih6130.c
+@@ -43,6 +43,7 @@
+ * @last_update: time of last update (jiffies)
+ * @temperature: cached temperature measurement value
+ * @humidity: cached humidity measurement value
++ * @write_length: length for I2C measurement request
+ */
+ struct hih6130 {
+ struct device *hwmon_dev;
+@@ -51,6 +52,7 @@ struct hih6130 {
+ unsigned long last_update;
+ int temperature;
+ int humidity;
++ size_t write_length;
+ };
+
+ /**
+@@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client)
+ */
+ if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
+
+- /* write to slave address, no data, to request a measurement */
+- ret = i2c_master_send(client, tmp, 0);
++ /*
++ * Write to slave address to request a measurement.
++ * According with the datasheet it should be with no data, but
++ * for systems with I2C bus drivers that do not allow zero
++ * length packets we write one dummy byte to allow sensor
++ * measurements on them.
++ */
++ tmp[0] = 0;
++ ret = i2c_master_send(client, tmp, hih6130->write_length);
+ if (ret < 0)
+ goto out;
+
+@@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client,
+ goto fail_remove_sysfs;
+ }
+
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
++ hih6130->write_length = 1;
++
+ return 0;
+
+ fail_remove_sysfs:
+diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
+index 6cf6bff79003..a2f3b4a365e4 100644
+--- a/drivers/hwmon/lm78.c
++++ b/drivers/hwmon/lm78.c
+@@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+ if (rpm <= 0)
+ return 255;
++ if (rpm > 1350000)
++ return 1;
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ }
+
+diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
+index 1404e6319deb..72a889702f0d 100644
+--- a/drivers/hwmon/sis5595.c
++++ b/drivers/hwmon/sis5595.c
+@@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+ if (rpm <= 0)
+ return 255;
++ if (rpm > 1350000)
++ return 1;
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ }
+
+diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
+index 0e7017841f7d..aee14e2192f8 100644
+--- a/drivers/hwmon/vt8231.c
++++ b/drivers/hwmon/vt8231.c
+@@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
+ */
+ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+- if (rpm == 0)
++ if (rpm <= 0 || rpm > 1310720)
+ return 0;
+ return clamp_val(1310720 / (rpm * div), 1, 255);
+ }
+diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
+index edb06cda5a68..6ed76ceb9270 100644
+--- a/drivers/hwmon/w83l786ng.c
++++ b/drivers/hwmon/w83l786ng.c
+@@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
+ if (err)
+ return err;
+ val = clamp_val(val, 0, 255);
++ val = DIV_ROUND_CLOSEST(val, 0x11);
+
+ mutex_lock(&data->update_lock);
+- data->pwm[nr] = val;
++ data->pwm[nr] = val * 0x11;
++ val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
+ w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
+ mutex_unlock(&data->update_lock);
+ return count;
+@@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
+ mutex_lock(&data->update_lock);
+ reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
+ data->pwm_enable[nr] = val;
+- reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
++ reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
+ reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
+ w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
+ mutex_unlock(&data->update_lock);
+@@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
+ ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
+ ? 0 : 1;
+ data->pwm_enable[i] =
+- ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1;
+- data->pwm[i] = w83l786ng_read_value(client,
+- W83L786NG_REG_PWM[i]);
++ ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
++ data->pwm[i] =
++ (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
++ & 0x0f) * 0x11;
+ }
+
+
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 8551dcaf24db..597e9b8fc18d 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1313,6 +1313,7 @@ static int elantech_set_properties(struct elantech_data *etd)
+ break;
+ case 6:
+ case 7:
++ case 8:
+ etd->hw_version = 4;
+ break;
+ default:
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index ae4b6b903629..5f87bed05467 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -106,6 +106,7 @@ struct usbtouch_device_info {
+ struct usbtouch_usb {
+ unsigned char *data;
+ dma_addr_t data_dma;
++ int data_size;
+ unsigned char *buffer;
+ int buf_len;
+ struct urb *irq;
+@@ -1521,7 +1522,7 @@ static int usbtouch_reset_resume(struct usb_interface *intf)
+ static void usbtouch_free_buffers(struct usb_device *udev,
+ struct usbtouch_usb *usbtouch)
+ {
+- usb_free_coherent(udev, usbtouch->type->rept_size,
++ usb_free_coherent(udev, usbtouch->data_size,
+ usbtouch->data, usbtouch->data_dma);
+ kfree(usbtouch->buffer);
+ }
+@@ -1566,7 +1567,20 @@ static int usbtouch_probe(struct usb_interface *intf,
+ if (!type->process_pkt)
+ type->process_pkt = usbtouch_process_pkt;
+
+- usbtouch->data = usb_alloc_coherent(udev, type->rept_size,
++ usbtouch->data_size = type->rept_size;
++ if (type->get_pkt_len) {
++ /*
++ * When dealing with variable-length packets we should
++ * not request more than wMaxPacketSize bytes at once
++ * as we do not know if there is more data coming or
++ * we filled exactly wMaxPacketSize bytes and there is
++ * nothing else.
++ */
++ usbtouch->data_size = min(usbtouch->data_size,
++ usb_endpoint_maxp(endpoint));
++ }
++
++ usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size,
+ GFP_KERNEL, &usbtouch->data_dma);
+ if (!usbtouch->data)
+ goto out_free;
+@@ -1626,12 +1640,12 @@ static int usbtouch_probe(struct usb_interface *intf,
+ if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT)
+ usb_fill_int_urb(usbtouch->irq, udev,
+ usb_rcvintpipe(udev, endpoint->bEndpointAddress),
+- usbtouch->data, type->rept_size,
++ usbtouch->data, usbtouch->data_size,
+ usbtouch_irq, usbtouch, endpoint->bInterval);
+ else
+ usb_fill_bulk_urb(usbtouch->irq, udev,
+ usb_rcvbulkpipe(udev, endpoint->bEndpointAddress),
+- usbtouch->data, type->rept_size,
++ usbtouch->data, usbtouch->data_size,
+ usbtouch_irq, usbtouch);
+
+ usbtouch->irq->dev = udev;
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 181c9ba929cd..0046a619527d 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -392,7 +392,7 @@ struct arm_smmu_domain {
+ struct arm_smmu_cfg root_cfg;
+ phys_addr_t output_mask;
+
+- spinlock_t lock;
++ struct mutex lock;
+ };
+
+ static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+@@ -897,7 +897,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
+ goto out_free_domain;
+ smmu_domain->root_cfg.pgd = pgd;
+
+- spin_lock_init(&smmu_domain->lock);
++ mutex_init(&smmu_domain->lock);
+ domain->priv = smmu_domain;
+ return 0;
+
+@@ -1134,7 +1134,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ * Sanity check the domain. We don't currently support domains
+ * that cross between different SMMU chains.
+ */
+- spin_lock(&smmu_domain->lock);
++ mutex_lock(&smmu_domain->lock);
+ if (!smmu_domain->leaf_smmu) {
+ /* Now that we have a master, we can finalise the domain */
+ ret = arm_smmu_init_domain_context(domain, dev);
+@@ -1149,7 +1149,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ dev_name(device_smmu->dev));
+ goto err_unlock;
+ }
+- spin_unlock(&smmu_domain->lock);
++ mutex_unlock(&smmu_domain->lock);
+
+ /* Looks ok, so add the device to the domain */
+ master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+@@ -1159,7 +1159,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ return arm_smmu_domain_add_master(smmu_domain, master);
+
+ err_unlock:
+- spin_unlock(&smmu_domain->lock);
++ mutex_unlock(&smmu_domain->lock);
+ return ret;
+ }
+
+@@ -1388,7 +1388,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
+ if (paddr & ~output_mask)
+ return -ERANGE;
+
+- spin_lock(&smmu_domain->lock);
++ mutex_lock(&smmu_domain->lock);
+ pgd += pgd_index(iova);
+ end = iova + size;
+ do {
+@@ -1404,7 +1404,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
+ } while (pgd++, iova != end);
+
+ out_unlock:
+- spin_unlock(&smmu_domain->lock);
++ mutex_unlock(&smmu_domain->lock);
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+@@ -1443,44 +1443,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+ {
+- pgd_t *pgd;
+- pud_t *pud;
+- pmd_t *pmd;
+- pte_t *pte;
++ pgd_t *pgdp, pgd;
++ pud_t pud;
++ pmd_t pmd;
++ pte_t pte;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+- struct arm_smmu_device *smmu = root_cfg->smmu;
+
+- spin_lock(&smmu_domain->lock);
+- pgd = root_cfg->pgd;
+- if (!pgd)
+- goto err_unlock;
++ pgdp = root_cfg->pgd;
++ if (!pgdp)
++ return 0;
+
+- pgd += pgd_index(iova);
+- if (pgd_none_or_clear_bad(pgd))
+- goto err_unlock;
++ pgd = *(pgdp + pgd_index(iova));
++ if (pgd_none(pgd))
++ return 0;
+
+- pud = pud_offset(pgd, iova);
+- if (pud_none_or_clear_bad(pud))
+- goto err_unlock;
++ pud = *pud_offset(&pgd, iova);
++ if (pud_none(pud))
++ return 0;
+
+- pmd = pmd_offset(pud, iova);
+- if (pmd_none_or_clear_bad(pmd))
+- goto err_unlock;
++ pmd = *pmd_offset(&pud, iova);
++ if (pmd_none(pmd))
++ return 0;
+
+- pte = pmd_page_vaddr(*pmd) + pte_index(iova);
++ pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
+ if (pte_none(pte))
+- goto err_unlock;
+-
+- spin_unlock(&smmu_domain->lock);
+- return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
++ return 0;
+
+-err_unlock:
+- spin_unlock(&smmu_domain->lock);
+- dev_warn(smmu->dev,
+- "invalid (corrupt?) page tables detected for iova 0x%llx\n",
+- (unsigned long long)iova);
+- return -EINVAL;
++ return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
+ }
+
+ static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 173cbb20d104..54bdd923316f 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void)
+ {
+ __u64 mem;
+
++ dm_bufio_allocated_kmem_cache = 0;
++ dm_bufio_allocated_get_free_pages = 0;
++ dm_bufio_allocated_vmalloc = 0;
++ dm_bufio_current_allocated = 0;
++
+ memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
+ memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
+
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 496d5f3646a5..2f91d6d4a2cc 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -20,6 +20,7 @@
+ struct delay_c {
+ struct timer_list delay_timer;
+ struct mutex timer_lock;
++ struct workqueue_struct *kdelayd_wq;
+ struct work_struct flush_expired_bios;
+ struct list_head delayed_bios;
+ atomic_t may_delay;
+@@ -45,14 +46,13 @@ struct dm_delay_info {
+
+ static DEFINE_MUTEX(delayed_bios_lock);
+
+-static struct workqueue_struct *kdelayd_wq;
+ static struct kmem_cache *delayed_cache;
+
+ static void handle_delayed_timer(unsigned long data)
+ {
+ struct delay_c *dc = (struct delay_c *)data;
+
+- queue_work(kdelayd_wq, &dc->flush_expired_bios);
++ queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
+ }
+
+ static void queue_timeout(struct delay_c *dc, unsigned long expires)
+@@ -191,6 +191,12 @@ out:
+ goto bad_dev_write;
+ }
+
++ dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
++ if (!dc->kdelayd_wq) {
++ DMERR("Couldn't start kdelayd");
++ goto bad_queue;
++ }
++
+ setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
+
+ INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+@@ -203,6 +209,8 @@ out:
+ ti->private = dc;
+ return 0;
+
++bad_queue:
++ mempool_destroy(dc->delayed_pool);
+ bad_dev_write:
+ if (dc->dev_write)
+ dm_put_device(ti, dc->dev_write);
+@@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- flush_workqueue(kdelayd_wq);
++ destroy_workqueue(dc->kdelayd_wq);
+
+ dm_put_device(ti, dc->dev_read);
+
+@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
+ {
+ int r = -ENOMEM;
+
+- kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
+- if (!kdelayd_wq) {
+- DMERR("Couldn't start kdelayd");
+- goto bad_queue;
+- }
+-
+ delayed_cache = KMEM_CACHE(dm_delay_info, 0);
+ if (!delayed_cache) {
+ DMERR("Couldn't create delayed bio cache.");
+@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
+ bad_register:
+ kmem_cache_destroy(delayed_cache);
+ bad_memcache:
+- destroy_workqueue(kdelayd_wq);
+-bad_queue:
+ return r;
+ }
+
+@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
+ {
+ dm_unregister_target(&delay_target);
+ kmem_cache_destroy(delayed_cache);
+- destroy_workqueue(kdelayd_wq);
+ }
+
+ /* Module hooks */
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index aec57d76db5d..944690bafd93 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -66,6 +66,18 @@ struct dm_snapshot {
+
+ atomic_t pending_exceptions_count;
+
++ /* Protected by "lock" */
++ sector_t exception_start_sequence;
++
++ /* Protected by kcopyd single-threaded callback */
++ sector_t exception_complete_sequence;
++
++ /*
++ * A list of pending exceptions that completed out of order.
++ * Protected by kcopyd single-threaded callback.
++ */
++ struct list_head out_of_order_list;
++
+ mempool_t *pending_pool;
+
+ struct dm_exception_table pending;
+@@ -173,6 +185,14 @@ struct dm_snap_pending_exception {
+ */
+ int started;
+
++ /* There was copying error. */
++ int copy_error;
++
++ /* A sequence number, it is used for in-order completion. */
++ sector_t exception_sequence;
++
++ struct list_head out_of_order_entry;
++
+ /*
+ * For writing a complete chunk, bypassing the copy.
+ */
+@@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ s->valid = 1;
+ s->active = 0;
+ atomic_set(&s->pending_exceptions_count, 0);
++ s->exception_start_sequence = 0;
++ s->exception_complete_sequence = 0;
++ INIT_LIST_HEAD(&s->out_of_order_list);
+ init_rwsem(&s->lock);
+ INIT_LIST_HEAD(&s->list);
+ spin_lock_init(&s->pe_lock);
+@@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success)
+ pending_complete(pe, success);
+ }
+
++static void complete_exception(struct dm_snap_pending_exception *pe)
++{
++ struct dm_snapshot *s = pe->snap;
++
++ if (unlikely(pe->copy_error))
++ pending_complete(pe, 0);
++
++ else
++ /* Update the metadata if we are persistent */
++ s->store->type->commit_exception(s->store, &pe->e,
++ commit_callback, pe);
++}
++
+ /*
+ * Called when the copy I/O has finished. kcopyd actually runs
+ * this code so don't block.
+@@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
+ struct dm_snap_pending_exception *pe = context;
+ struct dm_snapshot *s = pe->snap;
+
+- if (read_err || write_err)
+- pending_complete(pe, 0);
++ pe->copy_error = read_err || write_err;
+
+- else
+- /* Update the metadata if we are persistent */
+- s->store->type->commit_exception(s->store, &pe->e,
+- commit_callback, pe);
++ if (pe->exception_sequence == s->exception_complete_sequence) {
++ s->exception_complete_sequence++;
++ complete_exception(pe);
++
++ while (!list_empty(&s->out_of_order_list)) {
++ pe = list_entry(s->out_of_order_list.next,
++ struct dm_snap_pending_exception, out_of_order_entry);
++ if (pe->exception_sequence != s->exception_complete_sequence)
++ break;
++ s->exception_complete_sequence++;
++ list_del(&pe->out_of_order_entry);
++ complete_exception(pe);
++ }
++ } else {
++ struct list_head *lh;
++ struct dm_snap_pending_exception *pe2;
++
++ list_for_each_prev(lh, &s->out_of_order_list) {
++ pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
++ if (pe2->exception_sequence < pe->exception_sequence)
++ break;
++ }
++ list_add(&pe->out_of_order_entry, lh);
++ }
+ }
+
+ /*
+@@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s,
+ return NULL;
+ }
+
++ pe->exception_sequence = s->exception_start_sequence++;
++
+ dm_insert_exception(&s->pending, &pe->e);
+
+ return pe;
+@@ -2192,7 +2249,7 @@ static struct target_type origin_target = {
+
+ static struct target_type snapshot_target = {
+ .name = "snapshot",
+- .version = {1, 11, 1},
++ .version = {1, 12, 0},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index 3d404c1371ed..28a90122a5a8 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+
+ int __init dm_statistics_init(void)
+ {
++ shared_memory_amount = 0;
+ dm_stat_need_rcu_barrier = 0;
+ return 0;
+ }
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 41d907b58f7e..20a8cc0df7c6 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
+
+ num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
+
++ if (!num_targets) {
++ kfree(t);
++ return -ENOMEM;
++ }
++
+ if (alloc_targets(t, num_targets)) {
+ kfree(t);
+ return -ENOMEM;
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 60bce435f4fa..8a30ad54bd46 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
+ up_write(&pmd->root_lock);
+ }
+
++void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
++{
++ down_write(&pmd->root_lock);
++ pmd->read_only = false;
++ dm_bm_set_read_write(pmd->bm);
++ up_write(&pmd->root_lock);
++}
++
+ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+ dm_block_t threshold,
+ dm_sm_threshold_fn fn,
+diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
+index 845ebbe589a9..7bcc0e1d6238 100644
+--- a/drivers/md/dm-thin-metadata.h
++++ b/drivers/md/dm-thin-metadata.h
+@@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz
+ * that nothing is changing.
+ */
+ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
++void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
+
+ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+ dm_block_t threshold,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 2c0cf511ec23..ee29037ffc2e 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
+ */
+ r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
+ if (r) {
+- DMERR_LIMIT("dm_thin_insert_block() failed");
++ DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
++ dm_device_name(pool->pool_md), r);
++ set_pool_mode(pool, PM_READ_ONLY);
+ cell_error(pool, m->cell);
+ goto out;
+ }
+@@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
+ }
+ }
+
+-static int commit(struct pool *pool)
+-{
+- int r;
+-
+- r = dm_pool_commit_metadata(pool->pmd);
+- if (r)
+- DMERR_LIMIT("%s: commit failed: error = %d",
+- dm_device_name(pool->pool_md), r);
+-
+- return r;
+-}
+-
+ /*
+ * A non-zero return indicates read_only or fail_io mode.
+ * Many callers don't care about the return value.
+ */
+-static int commit_or_fallback(struct pool *pool)
++static int commit(struct pool *pool)
+ {
+ int r;
+
+ if (get_pool_mode(pool) != PM_WRITE)
+ return -EINVAL;
+
+- r = commit(pool);
+- if (r)
++ r = dm_pool_commit_metadata(pool->pmd);
++ if (r) {
++ DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
++ dm_device_name(pool->pool_md), r);
+ set_pool_mode(pool, PM_READ_ONLY);
++ }
+
+ return r;
+ }
+@@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+ * Try to commit to see if that will free up some
+ * more space.
+ */
+- (void) commit_or_fallback(pool);
++ r = commit(pool);
++ if (r)
++ return r;
+
+ r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+ if (r)
+@@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+ * table reload).
+ */
+ if (!free_blocks) {
+- DMWARN("%s: no free space available.",
++ DMWARN("%s: no free data space available.",
+ dm_device_name(pool->pool_md));
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->no_free_space = 1;
+@@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+ }
+
+ r = dm_pool_alloc_data_block(pool->pmd, result);
+- if (r)
++ if (r) {
++ if (r == -ENOSPC &&
++ !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
++ !free_blocks) {
++ DMWARN("%s: no free metadata space available.",
++ dm_device_name(pool->pool_md));
++ set_pool_mode(pool, PM_READ_ONLY);
++ }
+ return r;
++ }
+
+ return 0;
+ }
+@@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool)
+ if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
+ return;
+
+- if (commit_or_fallback(pool)) {
++ if (commit(pool)) {
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
+ return;
+@@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
+ case PM_FAIL:
+ DMERR("%s: switching pool to failure mode",
+ dm_device_name(pool->pool_md));
++ dm_pool_metadata_read_only(pool->pmd);
+ pool->process_bio = process_bio_fail;
+ pool->process_discard = process_bio_fail;
+ pool->process_prepared_mapping = process_prepared_mapping_fail;
+@@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
+ break;
+
+ case PM_WRITE:
++ dm_pool_metadata_read_write(pool->pmd);
+ pool->process_bio = process_bio;
+ pool->process_discard = process_discard;
+ pool->process_prepared_mapping = process_prepared_mapping;
+@@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
+ struct pool_c *pt = ti->private;
+
+ /*
+- * We want to make sure that degraded pools are never upgraded.
++ * We want to make sure that a pool in PM_FAIL mode is never upgraded.
+ */
+ enum pool_mode old_mode = pool->pf.mode;
+ enum pool_mode new_mode = pt->adjusted_pf.mode;
+
+- if (old_mode > new_mode)
++ /*
++ * If we were in PM_FAIL mode, rollback of metadata failed. We're
++ * not going to recover without a thin_repair. So we never let the
++ * pool move out of the old mode. On the other hand a PM_READ_ONLY
++ * may have been due to a lack of metadata or data space, and may
++ * now work (ie. if the underlying devices have been resized).
++ */
++ if (old_mode == PM_FAIL)
+ new_mode = old_mode;
+
+ pool->ti = ti;
+@@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti)
+ return r;
+
+ if (need_commit1 || need_commit2)
+- (void) commit_or_fallback(pool);
++ (void) commit(pool);
+
+ return 0;
+ }
+@@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti)
+
+ cancel_delayed_work(&pool->waker);
+ flush_workqueue(pool->wq);
+- (void) commit_or_fallback(pool);
++ (void) commit(pool);
+ }
+
+ static int check_arg_count(unsigned argc, unsigned args_required)
+@@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
+ if (r)
+ return r;
+
+- (void) commit_or_fallback(pool);
++ (void) commit(pool);
+
+ r = dm_pool_reserve_metadata_snap(pool->pmd);
+ if (r)
+@@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
+ DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
+
+ if (!r)
+- (void) commit_or_fallback(pool);
++ (void) commit(pool);
+
+ return r;
+ }
+@@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
+
+ /* Commit to ensure statistics aren't out-of-date */
+ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
+- (void) commit_or_fallback(pool);
++ (void) commit(pool);
+
+ r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
+ if (r) {
+diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
+index af96e24ec328..1d75b1dc1e2e 100644
+--- a/drivers/md/persistent-data/dm-array.c
++++ b/drivers/md/persistent-data/dm-array.c
+@@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+ * The shadow op will often be a noop. Only insert if it really
+ * copied data.
+ */
+- if (dm_block_location(*block) != b)
++ if (dm_block_location(*block) != b) {
++ /*
++ * dm_tm_shadow_block will have already decremented the old
++ * block, but it is still referenced by the btree. We
++ * increment to stop the insert decrementing it below zero
++ * when overwriting the old value.
++ */
++ dm_tm_inc(info->btree_info.tm, b);
+ r = insert_ablock(info, index, *block, root);
++ }
+
+ return r;
+ }
+diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
+index a7e8bf296388..064a3c271baa 100644
+--- a/drivers/md/persistent-data/dm-block-manager.c
++++ b/drivers/md/persistent-data/dm-block-manager.c
+@@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm)
+ }
+ EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
+
++void dm_bm_set_read_write(struct dm_block_manager *bm)
++{
++ bm->read_only = false;
++}
++EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
++
+ u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
+ {
+ return crc32c(~(u32) 0, data, len) ^ init_xor;
+diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
+index 9a82083a66b6..13cd58e1fe69 100644
+--- a/drivers/md/persistent-data/dm-block-manager.h
++++ b/drivers/md/persistent-data/dm-block-manager.h
+@@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b);
+ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
+ struct dm_block *superblock);
+
+- /*
+- * Request data be prefetched into the cache.
+- */
++/*
++ * Request data is prefetched into the cache.
++ */
+ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
+
+ /*
+@@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
+ * be returned if you do.
+ */
+ void dm_bm_set_read_only(struct dm_block_manager *bm);
++void dm_bm_set_read_write(struct dm_block_manager *bm);
+
+ u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
+
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index 6058569fe86c..466a60bbd716 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ }
+
+ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+- uint32_t (*mutator)(void *context, uint32_t old),
++ int (*mutator)(void *context, uint32_t old, uint32_t *new),
+ void *context, enum allocation_event *ev)
+ {
+ int r;
+@@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+
+ if (old > 2) {
+ r = sm_ll_lookup_big_ref_count(ll, b, &old);
+- if (r < 0)
++ if (r < 0) {
++ dm_tm_unlock(ll->tm, nb);
+ return r;
++ }
+ }
+
+- ref_count = mutator(context, old);
++ r = mutator(context, old, &ref_count);
++ if (r) {
++ dm_tm_unlock(ll->tm, nb);
++ return r;
++ }
+
+ if (ref_count <= 2) {
+ sm_set_bitmap(bm_le, bit, ref_count);
+@@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+ return ll->save_ie(ll, index, &ie_disk);
+ }
+
+-static uint32_t set_ref_count(void *context, uint32_t old)
++static int set_ref_count(void *context, uint32_t old, uint32_t *new)
+ {
+- return *((uint32_t *) context);
++ *new = *((uint32_t *) context);
++ return 0;
+ }
+
+ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
+@@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
+ return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
+ }
+
+-static uint32_t inc_ref_count(void *context, uint32_t old)
++static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
+ {
+- return old + 1;
++ *new = old + 1;
++ return 0;
+ }
+
+ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+@@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+ return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
+ }
+
+-static uint32_t dec_ref_count(void *context, uint32_t old)
++static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
+ {
+- return old - 1;
++ if (!old) {
++ DMERR_LIMIT("unable to decrement a reference count below 0");
++ return -EINVAL;
++ }
++
++ *new = old - 1;
++ return 0;
+ }
+
+ int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 1c959684caef..58fc1eef7499 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ int r = sm_metadata_new_block_(sm, b);
+- if (r)
++ if (r) {
+ DMERR("unable to allocate new metadata block");
++ return r;
++ }
+
+ r = sm_metadata_get_nr_free(sm, &count);
+- if (r)
++ if (r) {
+ DMERR("couldn't get free block count");
++ return r;
++ }
+
+ check_threshold(&smm->threshold, count);
+
+diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
+index 30ee59052157..65728c25ea05 100644
+--- a/drivers/media/dvb-frontends/af9033.c
++++ b/drivers/media/dvb-frontends/af9033.c
+@@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
+ static int af9033_wr_reg_val_tab(struct af9033_state *state,
+ const struct reg_val *tab, int tab_len)
+ {
++#define MAX_TAB_LEN 212
+ int ret, i, j;
+- u8 buf[MAX_XFER_SIZE];
++ u8 buf[1 + MAX_TAB_LEN];
++
++ dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
+
+ if (tab_len > sizeof(buf)) {
+- dev_warn(&state->i2c->dev,
+- "%s: i2c wr len=%d is too big!\n",
+- KBUILD_MODNAME, tab_len);
++ dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n",
++ KBUILD_MODNAME, tab_len);
+ return -EINVAL;
+ }
+
+- dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
+-
+ for (i = 0, j = 0; i < tab_len; i++) {
+ buf[j] = tab[i].val;
+
+diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
+index 51447a04d8f2..03930d5e9fea 100644
+--- a/drivers/media/dvb-frontends/cxd2820r_core.c
++++ b/drivers/media/dvb-frontends/cxd2820r_core.c
+@@ -34,7 +34,7 @@ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
+ {
+ .addr = i2c,
+ .flags = 0,
+- .len = sizeof(buf),
++ .len = len + 1,
+ .buf = buf,
+ }
+ };
+@@ -75,7 +75,7 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
+ }, {
+ .addr = i2c,
+ .flags = I2C_M_RD,
+- .len = sizeof(buf),
++ .len = len,
+ .buf = buf,
+ }
+ };
+diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
+index 3f584a7d0781..bee7946faa7c 100644
+--- a/drivers/media/i2c/wm8775.c
++++ b/drivers/media/i2c/wm8775.c
+@@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
+ return -EINVAL;
+ }
+ state->input = input;
+- if (!v4l2_ctrl_g_ctrl(state->mute))
++ if (v4l2_ctrl_g_ctrl(state->mute))
+ return 0;
+ if (!v4l2_ctrl_g_ctrl(state->vol))
+ return 0;
+- if (!v4l2_ctrl_g_ctrl(state->bal))
+- return 0;
+ wm8775_set_audio(sd, 1);
+ return 0;
+ }
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index c6532de0eac7..4f0aaa51ae0d 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+ }
+ btv->std = V4L2_STD_PAL;
+ init_irqreg(btv);
+- v4l2_ctrl_handler_setup(hdl);
++ if (!bttv_tvcards[btv->c.type].no_video)
++ v4l2_ctrl_handler_setup(hdl);
+ if (hdl->error) {
+ result = hdl->error;
+ goto fail2;
+diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
+index d37ee37aaefe..896bd8b974b5 100644
+--- a/drivers/media/pci/saa7164/saa7164-core.c
++++ b/drivers/media/pci/saa7164/saa7164-core.c
+@@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
+ if (fw_debug) {
+ dev->kthread = kthread_run(saa7164_thread_function, dev,
+ "saa7164 debug");
+- if (!dev->kthread)
++ if (IS_ERR(dev->kthread)) {
++ dev->kthread = NULL;
+ printk(KERN_ERR "%s() Failed to create "
+ "debug kernel thread\n", __func__);
++ }
+ }
+
+ } /* != BOARD_UNKNOWN */
+diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
+index 036e2f54f4db..3ed1f5669f79 100644
+--- a/drivers/media/radio/radio-tea5764.c
++++ b/drivers/media/radio/radio-tea5764.c
+@@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
+ So we keep it as-is. */
+ return -EINVAL;
+ }
+- clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
++ freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
+ tea5764_power_up(radio);
+ tea5764_tune(radio, (freq * 125) / 2);
+ return 0;
+diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
+index 06ac69245ca1..f4bb456b9a23 100644
+--- a/drivers/media/radio/tef6862.c
++++ b/drivers/media/radio/tef6862.c
+@@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen
+ if (f->tuner != 0)
+ return -EINVAL;
+
+- clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
++ freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
+ pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
+ i2cmsg[0] = (MODE_PRESET << MODE_SHIFT) | WM_SUB_PLLM;
+ i2cmsg[1] = (pll >> 8) & 0xff;
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index c8fcd78425bd..8f9b2cea88f0 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
+ {
+ u8 wbuf[MAX_XFER_SIZE];
+ u8 mbox = (reg >> 16) & 0xff;
+- struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL };
++ struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
+
+ if (6 + len > sizeof(wbuf)) {
+ dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
+@@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ } else {
+ /* I2C */
+ u8 buf[MAX_XFER_SIZE];
+- struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
++ struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
+ buf, msg[1].len, msg[1].buf };
+
+ if (5 + msg[0].len > sizeof(buf)) {
+ dev_warn(&d->udev->dev,
+ "%s: i2c xfer: len=%d is too big!\n",
+ KBUILD_MODNAME, msg[0].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ req.mbox |= ((msg[0].addr & 0x80) >> 3);
+ buf[0] = msg[1].len;
+@@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ } else {
+ /* I2C */
+ u8 buf[MAX_XFER_SIZE];
+- struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
+- 0, NULL };
++ struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
++ buf, 0, NULL };
+
+ if (5 + msg[0].len > sizeof(buf)) {
+ dev_warn(&d->udev->dev,
+ "%s: i2c xfer: len=%d is too big!\n",
+ KBUILD_MODNAME, msg[0].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ req.mbox |= ((msg[0].addr & 0x80) >> 3);
+ buf[0] = msg[0].len;
+@@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ }
+
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+
+ if (ret < 0)
+@@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = {
+ /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
+ { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
+ &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
++ { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
++ &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(usb, af9035_id_table);
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index c28d4e29af1a..e76a733e8a28 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -1241,10 +1241,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_PXA,
+ },
+- {
+- .compatible = "marvell,armada370-nand",
+- .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+- },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
+index 50b853a79d77..46dfb1378c17 100644
+--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
+@@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev)
+ if (netif_msg_ifup(db))
+ dev_dbg(db->dev, "enabling %s\n", dev->name);
+
+- if (devm_request_irq(db->dev, dev->irq, &emac_interrupt,
+- 0, dev->name, dev))
++ if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev))
+ return -EAGAIN;
+
+ /* Initialize EMAC board */
+@@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev)
+
+ emac_shutdown(ndev);
+
++ free_irq(ndev->irq, ndev);
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
+index e7266759a10b..556da81ab092 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
+@@ -1730,7 +1730,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+- udelay(usec_interval);
++ if (usec_interval >= 1000)
++ mdelay(usec_interval/1000);
++ else
++ udelay(usec_interval);
+ }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+index f4864807e15b..e493150d50c3 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+@@ -3966,18 +3966,20 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
+ int quick_drop;
+ s32 t[3], f[3] = {5180, 5500, 5785};
+
+- if (!(pBase->miscConfiguration & BIT(1)))
++ if (!(pBase->miscConfiguration & BIT(4)))
+ return;
+
+- if (freq < 4000)
+- quick_drop = eep->modalHeader2G.quick_drop;
+- else {
+- t[0] = eep->base_ext1.quick_drop_low;
+- t[1] = eep->modalHeader5G.quick_drop;
+- t[2] = eep->base_ext1.quick_drop_high;
+- quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
++ if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) {
++ if (freq < 4000) {
++ quick_drop = eep->modalHeader2G.quick_drop;
++ } else {
++ t[0] = eep->base_ext1.quick_drop_low;
++ t[1] = eep->modalHeader5G.quick_drop;
++ t[2] = eep->base_ext1.quick_drop_high;
++ quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
++ }
++ REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
+ }
+- REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
+ }
+
+ static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
+@@ -4017,7 +4019,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 bias;
+
+- if (!(eep->baseEepHeader.featureEnable & 0x40))
++ if (!(eep->baseEepHeader.miscConfiguration & 0x40))
+ return;
+
+ if (!AR_SREV_9300(ah))
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index dd30452df966..7fe6b5923a9c 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1275,6 +1275,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
+ if (!rts_thresh || (len > rts_thresh))
+ rts = true;
+ }
++
++ if (!aggr)
++ len = fi->framelen;
++
+ ath_buf_set_rate(sc, bf, &info, len, rts);
+ }
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
+index 76e14c046d94..200f0d98471a 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
+@@ -125,6 +125,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
++ .host_interrupt_operation_mode = true,
+ };
+
+ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
+@@ -135,6 +136,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+ .high_temp = true,
++ .host_interrupt_operation_mode = true,
+ };
+
+ const struct iwl_cfg iwl7260_2n_cfg = {
+@@ -144,6 +146,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
++ .host_interrupt_operation_mode = true,
+ };
+
+ const struct iwl_cfg iwl7260_n_cfg = {
+@@ -153,6 +156,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
++ .host_interrupt_operation_mode = true,
+ };
+
+ const struct iwl_cfg iwl3160_2ac_cfg = {
+@@ -162,6 +166,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = {
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL3160_NVM_VERSION,
+ .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
++ .host_interrupt_operation_mode = true,
+ };
+
+ const struct iwl_cfg iwl3160_2n_cfg = {
+@@ -171,6 +176,7 @@ const struct iwl_cfg iwl3160_2n_cfg = {
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL3160_NVM_VERSION,
+ .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
++ .host_interrupt_operation_mode = true,
+ };
+
+ const struct iwl_cfg iwl3160_n_cfg = {
+@@ -180,6 +186,7 @@ const struct iwl_cfg iwl3160_n_cfg = {
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL3160_NVM_VERSION,
+ .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
++ .host_interrupt_operation_mode = true,
+ };
+
+ MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
+index b03c25e14903..028ae91edf55 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/iwlwifi/iwl-config.h
+@@ -207,6 +207,8 @@ struct iwl_eeprom_params {
+ * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
+ * @internal_wimax_coex: internal wifi/wimax combo device
+ * @high_temp: Is this NIC is designated to be in high temperature.
++ * @host_interrupt_operation_mode: device needs host interrupt operation
++ * mode set
+ *
+ * We enable the driver to be backward compatible wrt. hardware features.
+ * API differences in uCode shouldn't be handled here but through TLVs
+@@ -235,6 +237,7 @@ struct iwl_cfg {
+ enum iwl_led_mode led_mode;
+ const bool rx_with_siso_diversity;
+ const bool internal_wimax_coex;
++ const bool host_interrupt_operation_mode;
+ bool high_temp;
+ };
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
+index a276af476e2d..641420528771 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
++++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
+@@ -463,14 +463,11 @@
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+ #define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+ #define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+ #define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
++#define IWL_HOST_INT_OPER_MODE BIT(31)
+
+ /*****************************************************************************
+ * 7000/3000 series SHR DTS addresses *
+diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+index aac81b8984b0..c196425a6723 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
++++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+@@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
+
+ if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
+ return -EINVAL;
++ if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
++ return -EINVAL;
++ if (drain < 0 || drain > 1)
++ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
+index 3f237b42eb36..83d28bcf0d48 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
+@@ -489,6 +489,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
++
++ /* W/A for interrupt coalescing bug in 7260 and 3160 */
++ if (trans->cfg->host_interrupt_operation_mode)
++ iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
+ }
+
+ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index c3f904d422b0..6bc31003a32c 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -276,9 +276,6 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ iwl_pcie_apm_init(trans);
+
+- /* Set interrupt coalescing calibration timer to default (512 usecs) */
+- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+-
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ iwl_pcie_set_pwr(trans, false);
+diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
+index f084412eee0b..bf63e13d14c9 100644
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -319,8 +319,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+ if (bss_desc && bss_desc->ssid.ssid_len &&
+ (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
+ ssid, &bss_desc->ssid))) {
+- kfree(bss_desc);
+- return 0;
++ ret = 0;
++ goto done;
+ }
+
+ /* Exit Adhoc mode first */
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 98f7b9b89507..53dc57127ca3 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -19,6 +19,7 @@
+ #include <linux/cpu.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/suspend.h>
++#include <linux/kexec.h>
+ #include "pci.h"
+
+ struct pci_dynid {
+@@ -388,12 +389,17 @@ static void pci_device_shutdown(struct device *dev)
+ pci_msi_shutdown(pci_dev);
+ pci_msix_shutdown(pci_dev);
+
++#ifdef CONFIG_KEXEC
+ /*
+- * Turn off Bus Master bit on the device to tell it to not
+- * continue to do DMA. Don't touch devices in D3cold or unknown states.
++ * If this is a kexec reboot, turn off Bus Master bit on the
++ * device to tell it to not continue to do DMA. Don't touch
++ * devices in D3cold or unknown states.
++ * If it is not a kexec reboot, firmware will hit the PCI
++ * devices with big hammer and stop their DMA any way.
+ */
+- if (pci_dev->current_state <= PCI_D3hot)
++ if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
+ pci_clear_master(pci_dev);
++#endif
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
+index 032df3799efb..8b5e4c712a01 100644
+--- a/drivers/regulator/pfuze100-regulator.c
++++ b/drivers/regulator/pfuze100-regulator.c
+@@ -38,7 +38,7 @@
+
+ #define PFUZE100_DEVICEID 0x0
+ #define PFUZE100_REVID 0x3
+-#define PFUZE100_FABID 0x3
++#define PFUZE100_FABID 0x4
+
+ #define PFUZE100_SW1ABVOL 0x20
+ #define PFUZE100_SW1CVOL 0x2e
+diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
+index 741892632ae0..b86eec3ffba8 100644
+--- a/drivers/rtc/rtc-at91rm9200.c
++++ b/drivers/rtc/rtc-at91rm9200.c
+@@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+
+ at91_alarm_year = tm.tm_year;
+
++ tm.tm_mon = alrm->time.tm_mon;
++ tm.tm_mday = alrm->time.tm_mday;
+ tm.tm_hour = alrm->time.tm_hour;
+ tm.tm_min = alrm->time.tm_min;
+ tm.tm_sec = alrm->time.tm_sec;
+diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
+index e7108045f553..d13c532b68cd 100644
+--- a/drivers/staging/comedi/drivers/amplc_pc263.c
++++ b/drivers/staging/comedi/drivers/amplc_pc263.c
+@@ -68,6 +68,9 @@ static int pc263_do_insn_bits(struct comedi_device *dev,
+ outb(s->state & 0xFF, dev->iobase);
+ outb(s->state >> 8, dev->iobase + 1);
+ }
++
++ data[1] = s->state;
++
+ return insn->n;
+ }
+
+diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
+index 145bb48f618e..a9c77af3b76d 100644
+--- a/drivers/staging/comedi/drivers/amplc_pci263.c
++++ b/drivers/staging/comedi/drivers/amplc_pci263.c
+@@ -55,6 +55,9 @@ static int pci263_do_insn_bits(struct comedi_device *dev,
+ outb(s->state & 0xFF, dev->iobase);
+ outb(s->state >> 8, dev->iobase + 1);
+ }
++
++ data[1] = s->state;
++
+ return insn->n;
+ }
+
+diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
+index 11758a515c1b..0e687de7848f 100644
+--- a/drivers/staging/comedi/drivers/ssv_dnp.c
++++ b/drivers/staging/comedi/drivers/ssv_dnp.c
+@@ -83,11 +83,11 @@ static int dnp_dio_insn_bits(struct comedi_device *dev,
+
+ /* on return, data[1] contains the value of the digital input lines. */
+ outb(PADR, CSCIR);
+- data[0] = inb(CSCDR);
++ data[1] = inb(CSCDR);
+ outb(PBDR, CSCIR);
+- data[0] += inb(CSCDR) << 8;
++ data[1] += inb(CSCDR) << 8;
+ outb(PCDR, CSCIR);
+- data[0] += ((inb(CSCDR) & 0xF0) << 12);
++ data[1] += ((inb(CSCDR) & 0xF0) << 12);
+
+ return insn->n;
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 243c6729c320..c5c366790e6a 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4836,8 +4836,9 @@ static void hub_events(void)
+ hub->ports[i - 1]->child;
+
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- if (!udev || !(portstatus &
+- USB_PORT_STAT_CONNECTION)) {
++ if (!udev ||
++ !(portstatus & USB_PORT_STAT_CONNECTION) ||
++ udev->state == USB_STATE_NOTATTACHED) {
+ status = hub_port_reset(hub, i,
+ NULL, HUB_BH_RESET_TIME,
+ true);
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 7fa93f4bc507..056da977ebdf 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -459,6 +459,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ dep = dwc3_wIndex_to_dep(dwc, wIndex);
+ if (!dep)
+ return -EINVAL;
++ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
++ break;
+ ret = __dwc3_gadget_ep_set_halt(dep, set);
+ if (ret)
+ return -EINVAL;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 5452c0fce360..02e44fcaf205 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1200,9 +1200,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ else
+ dep->flags |= DWC3_EP_STALL;
+ } else {
+- if (dep->flags & DWC3_EP_WEDGE)
+- return 0;
+-
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_CLEARSTALL, &params);
+ if (ret)
+@@ -1210,7 +1207,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ value ? "set" : "clear",
+ dep->name);
+ else
+- dep->flags &= ~DWC3_EP_STALL;
++ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ }
+
+ return ret;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index d4f0f3305759..7c0adb9812aa 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -593,6 +593,7 @@ static void reset_config(struct usb_composite_dev *cdev)
+ bitmap_zero(f->endpoints, 32);
+ }
+ cdev->config = NULL;
++ cdev->delayed_status = 0;
+ }
+
+ static int set_config(struct usb_composite_dev *cdev,
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6bfbd80ec2b9..55fc0c39b7e1 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2929,8 +2929,58 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ }
+
+ while (1) {
+- if (room_on_ring(xhci, ep_ring, num_trbs))
+- break;
++ if (room_on_ring(xhci, ep_ring, num_trbs)) {
++ union xhci_trb *trb = ep_ring->enqueue;
++ unsigned int usable = ep_ring->enq_seg->trbs +
++ TRBS_PER_SEGMENT - 1 - trb;
++ u32 nop_cmd;
++
++ /*
++ * Section 4.11.7.1 TD Fragments states that a link
++ * TRB must only occur at the boundary between
++ * data bursts (eg 512 bytes for 480M).
++ * While it is possible to split a large fragment
++ * we don't know the size yet.
++ * Simplest solution is to fill the trb before the
++ * LINK with nop commands.
++ */
++ if (num_trbs == 1 || num_trbs <= usable || usable == 0)
++ break;
++
++ if (ep_ring->type != TYPE_BULK)
++ /*
++ * While isoc transfers might have a buffer that
++ * crosses a 64k boundary it is unlikely.
++ * Since we can't add NOPs without generating
++ * gaps in the traffic just hope it never
++ * happens at the end of the ring.
++ * This could be fixed by writing a LINK TRB
++ * instead of the first NOP - however the
++ * TRB_TYPE_LINK_LE32() calls would all need
++ * changing to check the ring length.
++ */
++ break;
++
++ if (num_trbs >= TRBS_PER_SEGMENT) {
++ xhci_err(xhci, "Too many fragments %d, max %d\n",
++ num_trbs, TRBS_PER_SEGMENT - 1);
++ return -ENOMEM;
++ }
++
++ nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
++ ep_ring->cycle_state);
++ ep_ring->num_trbs_free -= usable;
++ do {
++ trb->generic.field[0] = 0;
++ trb->generic.field[1] = 0;
++ trb->generic.field[2] = 0;
++ trb->generic.field[3] = nop_cmd;
++ trb++;
++ } while (--usable);
++ ep_ring->enqueue = trb;
++ if (room_on_ring(xhci, ep_ring, num_trbs))
++ break;
++ }
+
+ if (ep_ring == xhci->cmd_ring) {
+ xhci_err(xhci, "Do not support expand command ring\n");
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index ae959746f77f..0c593afc3185 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -38,6 +38,7 @@ struct cppi41_dma_channel {
+ u32 prog_len;
+ u32 transferred;
+ u32 packet_sz;
++ struct list_head tx_check;
+ };
+
+ #define MUSB_DMA_NUM_CHANNELS 15
+@@ -47,6 +48,8 @@ struct cppi41_dma_controller {
+ struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct musb *musb;
++ struct hrtimer early_tx;
++ struct list_head early_tx_list;
+ u32 rx_mode;
+ u32 tx_mode;
+ u32 auto_req;
+@@ -96,31 +99,27 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
+ cppi41_channel->usb_toggle = toggle;
+ }
+
+-static void cppi41_dma_callback(void *private_data)
++static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
+ {
+- struct dma_channel *channel = private_data;
+- struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+- struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+- struct musb *musb = hw_ep->musb;
+- unsigned long flags;
+- struct dma_tx_state txstate;
+- u32 transferred;
++ u8 epnum = hw_ep->epnum;
++ struct musb *musb = hw_ep->musb;
++ void __iomem *epio = musb->endpoints[epnum].regs;
++ u16 csr;
+
+- spin_lock_irqsave(&musb->lock, flags);
++ csr = musb_readw(epio, MUSB_TXCSR);
++ if (csr & MUSB_TXCSR_TXPKTRDY)
++ return false;
++ return true;
++}
+
+- dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
+- &txstate);
+- transferred = cppi41_channel->prog_len - txstate.residue;
+- cppi41_channel->transferred += transferred;
++static void cppi41_dma_callback(void *private_data);
+
+- dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
+- hw_ep->epnum, cppi41_channel->transferred,
+- cppi41_channel->total_len);
++static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
++{
++ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
++ struct musb *musb = hw_ep->musb;
+
+- update_rx_toggle(cppi41_channel);
+-
+- if (cppi41_channel->transferred == cppi41_channel->total_len ||
+- transferred < cppi41_channel->packet_sz) {
++ if (!cppi41_channel->prog_len) {
+
+ /* done, complete */
+ cppi41_channel->channel.actual_len =
+@@ -150,13 +149,11 @@ static void cppi41_dma_callback(void *private_data)
+ remain_bytes,
+ direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+- if (WARN_ON(!dma_desc)) {
+- spin_unlock_irqrestore(&musb->lock, flags);
++ if (WARN_ON(!dma_desc))
+ return;
+- }
+
+ dma_desc->callback = cppi41_dma_callback;
+- dma_desc->callback_param = channel;
++ dma_desc->callback_param = &cppi41_channel->channel;
+ cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+ dma_async_issue_pending(dc);
+
+@@ -166,6 +163,117 @@ static void cppi41_dma_callback(void *private_data)
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
++}
++
++static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
++{
++ struct cppi41_dma_controller *controller;
++ struct cppi41_dma_channel *cppi41_channel, *n;
++ struct musb *musb;
++ unsigned long flags;
++ enum hrtimer_restart ret = HRTIMER_NORESTART;
++
++ controller = container_of(timer, struct cppi41_dma_controller,
++ early_tx);
++ musb = controller->musb;
++
++ spin_lock_irqsave(&musb->lock, flags);
++ list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
++ tx_check) {
++ bool empty;
++ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
++
++ empty = musb_is_tx_fifo_empty(hw_ep);
++ if (empty) {
++ list_del_init(&cppi41_channel->tx_check);
++ cppi41_trans_done(cppi41_channel);
++ }
++ }
++
++ if (!list_empty(&controller->early_tx_list)) {
++ ret = HRTIMER_RESTART;
++ hrtimer_forward_now(&controller->early_tx,
++ ktime_set(0, 150 * NSEC_PER_USEC));
++ }
++
++ spin_unlock_irqrestore(&musb->lock, flags);
++ return ret;
++}
++
++static void cppi41_dma_callback(void *private_data)
++{
++ struct dma_channel *channel = private_data;
++ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
++ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
++ struct musb *musb = hw_ep->musb;
++ unsigned long flags;
++ struct dma_tx_state txstate;
++ u32 transferred;
++ bool empty;
++
++ spin_lock_irqsave(&musb->lock, flags);
++
++ dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
++ &txstate);
++ transferred = cppi41_channel->prog_len - txstate.residue;
++ cppi41_channel->transferred += transferred;
++
++ dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
++ hw_ep->epnum, cppi41_channel->transferred,
++ cppi41_channel->total_len);
++
++ update_rx_toggle(cppi41_channel);
++
++ if (cppi41_channel->transferred == cppi41_channel->total_len ||
++ transferred < cppi41_channel->packet_sz)
++ cppi41_channel->prog_len = 0;
++
++ empty = musb_is_tx_fifo_empty(hw_ep);
++ if (empty) {
++ cppi41_trans_done(cppi41_channel);
++ } else {
++ struct cppi41_dma_controller *controller;
++ /*
++ * On AM335x it has been observed that the TX interrupt fires
++ * too early that means the TXFIFO is not yet empty but the DMA
++ * engine says that it is done with the transfer. We don't
++ * receive a FIFO empty interrupt so the only thing we can do is
++ * to poll for the bit. On HS it usually takes 2us, on FS around
++ * 110us - 150us depending on the transfer size.
++ * We spin on HS (no longer than than 25us and setup a timer on
++ * FS to check for the bit and complete the transfer.
++ */
++ controller = cppi41_channel->controller;
++
++ if (musb->g.speed == USB_SPEED_HIGH) {
++ unsigned wait = 25;
++
++ do {
++ empty = musb_is_tx_fifo_empty(hw_ep);
++ if (empty)
++ break;
++ wait--;
++ if (!wait)
++ break;
++ udelay(1);
++ } while (1);
++
++ empty = musb_is_tx_fifo_empty(hw_ep);
++ if (empty) {
++ cppi41_trans_done(cppi41_channel);
++ goto out;
++ }
++ }
++ list_add_tail(&cppi41_channel->tx_check,
++ &controller->early_tx_list);
++ if (!hrtimer_active(&controller->early_tx)) {
++ hrtimer_start_range_ns(&controller->early_tx,
++ ktime_set(0, 140 * NSEC_PER_USEC),
++ 40 * NSEC_PER_USEC,
++ HRTIMER_MODE_REL);
++ }
++ }
++out:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+
+@@ -364,6 +472,8 @@ static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
+ WARN_ON(1);
+ return 1;
+ }
++ if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
++ return 0;
+ if (cppi41_channel->is_tx)
+ return 1;
+ /* AM335x Advisory 1.0.13. No workaround for device RX mode */
+@@ -388,6 +498,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
+ if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
+ return 0;
+
++ list_del_init(&cppi41_channel->tx_check);
+ if (is_tx) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~MUSB_TXCSR_DMAENAB;
+@@ -494,6 +605,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
+ cppi41_channel->controller = controller;
+ cppi41_channel->port_num = port;
+ cppi41_channel->is_tx = is_tx;
++ INIT_LIST_HEAD(&cppi41_channel->tx_check);
+
+ musb_dma = &cppi41_channel->channel;
+ musb_dma->private_data = cppi41_channel;
+@@ -518,6 +630,7 @@ void dma_controller_destroy(struct dma_controller *c)
+ struct cppi41_dma_controller *controller = container_of(c,
+ struct cppi41_dma_controller, controller);
+
++ hrtimer_cancel(&controller->early_tx);
+ cppi41_dma_controller_stop(controller);
+ kfree(controller);
+ }
+@@ -537,6 +650,9 @@ struct dma_controller *dma_controller_create(struct musb *musb,
+ if (!controller)
+ goto kzalloc_fail;
+
++ hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ controller->early_tx.function = cppi41_recheck_tx_req;
++ INIT_LIST_HEAD(&controller->early_tx_list);
+ controller->musb = musb;
+
+ controller->controller.channel_alloc = cppi41_dma_channel_allocate;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c3d94853b4ab..496b7e39d5be 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb);
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+ #define HUAWEI_PRODUCT_K4605 0x14C6
++#define HUAWEI_PRODUCT_E173S6 0x1C07
+
+ #define QUANTA_VENDOR_ID 0x0408
+ #define QUANTA_PRODUCT_Q101 0xEA02
+@@ -572,6 +573,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
+@@ -634,6 +637,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
+@@ -688,6 +695,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+@@ -742,6 +753,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
+@@ -796,6 +811,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
+@@ -850,6 +869,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
+@@ -904,6 +927,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
+diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
+index 3fb83b0c28c2..ab6d3f56cbca 100644
+--- a/drivers/watchdog/sc1200wdt.c
++++ b/drivers/watchdog/sc1200wdt.c
+@@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void)
+ #if defined CONFIG_PNP
+ /* now that the user has specified an IO port and we haven't detected
+ * any devices, disable pnp support */
++ if (isapnp)
++ pnp_unregister_driver(&scl200wdt_pnp_driver);
+ isapnp = 0;
+- pnp_unregister_driver(&scl200wdt_pnp_driver);
+ #endif
+
+ if (!request_region(io, io_len, SC1200_MODULE_NAME)) {
+diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
+index e15d2b0d8d3b..0890c83643e9 100644
+--- a/fs/btrfs/acl.c
++++ b/fs/btrfs/acl.c
+@@ -229,7 +229,7 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ if (ret > 0) {
+ /* we need an acl */
+ ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
+- } else {
++ } else if (ret < 0) {
+ cache_no_acl(inode);
+ }
+ } else {
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 0552a599b28f..5eb50b5df777 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -185,6 +185,9 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
+ {
+ struct __prelim_ref *ref;
+
++ if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
++ return 0;
++
+ ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
+ if (!ref)
+ return -ENOMEM;
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 61b5bcd57b7e..b544a44d696e 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -2758,7 +2758,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
+ int level;
+ int lowest_unlock = 1;
+ u8 lowest_level = 0;
+- int prev_cmp;
++ int prev_cmp = -1;
+
+ lowest_level = p->lowest_level;
+ WARN_ON(p->nodes[0] != NULL);
+@@ -2769,7 +2769,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
+ }
+
+ again:
+- prev_cmp = -1;
+ b = get_old_root(root, time_seq);
+ level = btrfs_header_level(b);
+ p->locks[level] = BTRFS_READ_LOCK;
+@@ -2787,6 +2786,11 @@ again:
+ */
+ btrfs_unlock_up_safe(p, level + 1);
+
++ /*
++ * Since we can unwind eb's we want to do a real search every
++ * time.
++ */
++ prev_cmp = -1;
+ ret = key_search(b, key, level, &prev_cmp, &slot);
+
+ if (level != 0) {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 51e3afa78354..8b8eff051493 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2367,10 +2367,23 @@ out_unlock:
+ return ret;
+ }
+
++static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
++{
++ struct old_sa_defrag_extent *old, *tmp;
++
++ if (!new)
++ return;
++
++ list_for_each_entry_safe(old, tmp, &new->head, list) {
++ list_del(&old->list);
++ kfree(old);
++ }
++ kfree(new);
++}
++
+ static void relink_file_extents(struct new_sa_defrag_extent *new)
+ {
+ struct btrfs_path *path;
+- struct old_sa_defrag_extent *old, *tmp;
+ struct sa_defrag_extent_backref *backref;
+ struct sa_defrag_extent_backref *prev = NULL;
+ struct inode *inode;
+@@ -2413,16 +2426,11 @@ static void relink_file_extents(struct new_sa_defrag_extent *new)
+ kfree(prev);
+
+ btrfs_free_path(path);
+-
+- list_for_each_entry_safe(old, tmp, &new->head, list) {
+- list_del(&old->list);
+- kfree(old);
+- }
+ out:
++ free_sa_defrag_extent(new);
++
+ atomic_dec(&root->fs_info->defrag_running);
+ wake_up(&root->fs_info->transaction_wait);
+-
+- kfree(new);
+ }
+
+ static struct new_sa_defrag_extent *
+@@ -2432,7 +2440,7 @@ record_old_file_extents(struct inode *inode,
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+- struct old_sa_defrag_extent *old, *tmp;
++ struct old_sa_defrag_extent *old;
+ struct new_sa_defrag_extent *new;
+ int ret;
+
+@@ -2480,7 +2488,7 @@ record_old_file_extents(struct inode *inode,
+ if (slot >= btrfs_header_nritems(l)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+- goto out_free_list;
++ goto out_free_path;
+ else if (ret > 0)
+ break;
+ continue;
+@@ -2509,7 +2517,7 @@ record_old_file_extents(struct inode *inode,
+
+ old = kmalloc(sizeof(*old), GFP_NOFS);
+ if (!old)
+- goto out_free_list;
++ goto out_free_path;
+
+ offset = max(new->file_pos, key.offset);
+ end = min(new->file_pos + new->len, key.offset + num_bytes);
+@@ -2531,15 +2539,10 @@ next:
+
+ return new;
+
+-out_free_list:
+- list_for_each_entry_safe(old, tmp, &new->head, list) {
+- list_del(&old->list);
+- kfree(old);
+- }
+ out_free_path:
+ btrfs_free_path(path);
+ out_kfree:
+- kfree(new);
++ free_sa_defrag_extent(new);
+ return NULL;
+ }
+
+@@ -2710,8 +2713,14 @@ out:
+ btrfs_remove_ordered_extent(inode, ordered_extent);
+
+ /* for snapshot-aware defrag */
+- if (new)
+- relink_file_extents(new);
++ if (new) {
++ if (ret) {
++ free_sa_defrag_extent(new);
++ atomic_dec(&root->fs_info->defrag_running);
++ } else {
++ relink_file_extents(new);
++ }
++ }
+
+ /* once for us */
+ btrfs_put_ordered_extent(ordered_extent);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 9d46f60cb943..8747feb77ec9 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2130,7 +2130,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+
+ err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
+ if (err == -EINTR)
+- goto out;
++ goto out_drop_write;
+ dentry = lookup_one_len(vol_args->name, parent, namelen);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+@@ -2293,6 +2293,7 @@ out_dput:
+ dput(dentry);
+ out_unlock_dir:
+ mutex_unlock(&dir->i_mutex);
++out_drop_write:
+ mnt_drop_write_file(file);
+ out:
+ kfree(vol_args);
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index c702cb62f78a..bda1cd84ee5f 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -537,7 +537,9 @@ void btrfs_remove_ordered_extent(struct inode *inode,
+ */
+ if (RB_EMPTY_ROOT(&tree->tree) &&
+ !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
++ spin_lock(&root->fs_info->ordered_root_lock);
+ list_del_init(&BTRFS_I(inode)->ordered_operations);
++ spin_unlock(&root->fs_info->ordered_root_lock);
+ }
+
+ if (!root->nr_ordered_extents) {
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index e46e0ed74925..741c839fa46a 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -121,7 +121,6 @@ struct send_ctx {
+ struct list_head name_cache_list;
+ int name_cache_size;
+
+- struct file *cur_inode_filp;
+ char *read_buf;
+ };
+
+@@ -2120,77 +2119,6 @@ out:
+ }
+
+ /*
+- * Called for regular files when sending extents data. Opens a struct file
+- * to read from the file.
+- */
+-static int open_cur_inode_file(struct send_ctx *sctx)
+-{
+- int ret = 0;
+- struct btrfs_key key;
+- struct path path;
+- struct inode *inode;
+- struct dentry *dentry;
+- struct file *filp;
+- int new = 0;
+-
+- if (sctx->cur_inode_filp)
+- goto out;
+-
+- key.objectid = sctx->cur_ino;
+- key.type = BTRFS_INODE_ITEM_KEY;
+- key.offset = 0;
+-
+- inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root,
+- &new);
+- if (IS_ERR(inode)) {
+- ret = PTR_ERR(inode);
+- goto out;
+- }
+-
+- dentry = d_obtain_alias(inode);
+- inode = NULL;
+- if (IS_ERR(dentry)) {
+- ret = PTR_ERR(dentry);
+- goto out;
+- }
+-
+- path.mnt = sctx->mnt;
+- path.dentry = dentry;
+- filp = dentry_open(&path, O_RDONLY | O_LARGEFILE, current_cred());
+- dput(dentry);
+- dentry = NULL;
+- if (IS_ERR(filp)) {
+- ret = PTR_ERR(filp);
+- goto out;
+- }
+- sctx->cur_inode_filp = filp;
+-
+-out:
+- /*
+- * no xxxput required here as every vfs op
+- * does it by itself on failure
+- */
+- return ret;
+-}
+-
+-/*
+- * Closes the struct file that was created in open_cur_inode_file
+- */
+-static int close_cur_inode_file(struct send_ctx *sctx)
+-{
+- int ret = 0;
+-
+- if (!sctx->cur_inode_filp)
+- goto out;
+-
+- ret = filp_close(sctx->cur_inode_filp, NULL);
+- sctx->cur_inode_filp = NULL;
+-
+-out:
+- return ret;
+-}
+-
+-/*
+ * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
+ */
+ static int send_subvol_begin(struct send_ctx *sctx)
+@@ -3622,6 +3550,72 @@ out:
+ return ret;
+ }
+
++static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
++{
++ struct btrfs_root *root = sctx->send_root;
++ struct btrfs_fs_info *fs_info = root->fs_info;
++ struct inode *inode;
++ struct page *page;
++ char *addr;
++ struct btrfs_key key;
++ pgoff_t index = offset >> PAGE_CACHE_SHIFT;
++ pgoff_t last_index;
++ unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
++ ssize_t ret = 0;
++
++ key.objectid = sctx->cur_ino;
++ key.type = BTRFS_INODE_ITEM_KEY;
++ key.offset = 0;
++
++ inode = btrfs_iget(fs_info->sb, &key, root, NULL);
++ if (IS_ERR(inode))
++ return PTR_ERR(inode);
++
++ if (offset + len > i_size_read(inode)) {
++ if (offset > i_size_read(inode))
++ len = 0;
++ else
++ len = offset - i_size_read(inode);
++ }
++ if (len == 0)
++ goto out;
++
++ last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
++ while (index <= last_index) {
++ unsigned cur_len = min_t(unsigned, len,
++ PAGE_CACHE_SIZE - pg_offset);
++ page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
++ if (!page) {
++ ret = -ENOMEM;
++ break;
++ }
++
++ if (!PageUptodate(page)) {
++ btrfs_readpage(NULL, page);
++ lock_page(page);
++ if (!PageUptodate(page)) {
++ unlock_page(page);
++ page_cache_release(page);
++ ret = -EIO;
++ break;
++ }
++ }
++
++ addr = kmap(page);
++ memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
++ kunmap(page);
++ unlock_page(page);
++ page_cache_release(page);
++ index++;
++ pg_offset = 0;
++ len -= cur_len;
++ ret += cur_len;
++ }
++out:
++ iput(inode);
++ return ret;
++}
++
+ /*
+ * Read some bytes from the current inode/file and send a write command to
+ * user space.
+@@ -3630,35 +3624,20 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
+ {
+ int ret = 0;
+ struct fs_path *p;
+- loff_t pos = offset;
+- int num_read = 0;
+- mm_segment_t old_fs;
++ ssize_t num_read = 0;
+
+ p = fs_path_alloc();
+ if (!p)
+ return -ENOMEM;
+
+- /*
+- * vfs normally only accepts user space buffers for security reasons.
+- * we only read from the file and also only provide the read_buf buffer
+- * to vfs. As this buffer does not come from a user space call, it's
+- * ok to temporary allow kernel space buffers.
+- */
+- old_fs = get_fs();
+- set_fs(KERNEL_DS);
+-
+ verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
+
+- ret = open_cur_inode_file(sctx);
+- if (ret < 0)
+- goto out;
+-
+- ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos);
+- if (ret < 0)
+- goto out;
+- num_read = ret;
+- if (!num_read)
++ num_read = fill_read_buf(sctx, offset, len);
++ if (num_read <= 0) {
++ if (num_read < 0)
++ ret = num_read;
+ goto out;
++ }
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
+ if (ret < 0)
+@@ -3677,7 +3656,6 @@ verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
+ tlv_put_failure:
+ out:
+ fs_path_free(p);
+- set_fs(old_fs);
+ if (ret < 0)
+ return ret;
+ return num_read;
+@@ -4222,10 +4200,6 @@ static int changed_inode(struct send_ctx *sctx,
+ u64 left_gen = 0;
+ u64 right_gen = 0;
+
+- ret = close_cur_inode_file(sctx);
+- if (ret < 0)
+- goto out;
+-
+ sctx->cur_ino = key->objectid;
+ sctx->cur_inode_new_gen = 0;
+
+@@ -4686,11 +4660,6 @@ static int send_subvol(struct send_ctx *sctx)
+ }
+
+ out:
+- if (!ret)
+- ret = close_cur_inode_file(sctx);
+- else
+- close_cur_inode_file(sctx);
+-
+ free_recorded_refs(sctx);
+ return ret;
+ }
+@@ -4756,8 +4725,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
+ }
+
+ if (!access_ok(VERIFY_READ, arg->clone_sources,
+- sizeof(*arg->clone_sources *
+- arg->clone_sources_count))) {
++ sizeof(*arg->clone_sources) *
++ arg->clone_sources_count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 8c81bdc1ef9b..b791cfb9a050 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1453,7 +1453,7 @@ static void do_async_commit(struct work_struct *work)
+ * We've got freeze protection passed with the transaction.
+ * Tell lockdep about it.
+ */
+- if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
++ if (ac->newtrans->type & __TRANS_FREEZABLE)
+ rwsem_acquire_read(
+ &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 0, 1, _THIS_IP_);
+@@ -1494,7 +1494,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
+ * Tell lockdep we've released the freeze rwsem, since the
+ * async commit thread will be the one to unlock it.
+ */
+- if (trans->type < TRANS_JOIN_NOLOCK)
++ if (ac->newtrans->type & __TRANS_FREEZABLE)
+ rwsem_release(
+ &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 1, _THIS_IP_);
+@@ -1552,6 +1552,8 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
+ root->fs_info->running_transaction = NULL;
+ spin_unlock(&root->fs_info->trans_lock);
+
++ if (trans->type & __TRANS_FREEZABLE)
++ sb_end_intwrite(root->fs_info->sb);
+ put_transaction(cur_trans);
+ put_transaction(cur_trans);
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 79f057c0619a..e14e1f7748e5 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3375,7 +3375,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_token_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG,
+ &token);
+- if (em->block_start == 0)
++ if (em->block_start == EXTENT_MAP_HOLE)
+ skip_csum = true;
+ }
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 043b215769c2..b691f375d837 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4488,6 +4488,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
+ btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
+ "%Lu-%Lu\n", logical, logical+len, em->start,
+ em->start + em->len);
++ free_extent_map(em);
+ return 1;
+ }
+
+@@ -4668,6 +4669,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+ btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
+ "found %Lu-%Lu\n", logical, em->start,
+ em->start + em->len);
++ free_extent_map(em);
+ return -EINVAL;
+ }
+
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index a235f0016889..c43fe9b39ff2 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -215,7 +215,7 @@ struct getdents_callback {
+ struct dir_context ctx;
+ char *name; /* name that was found. It already points to a
+ buffer NAME_MAX+1 is size */
+- unsigned long ino; /* the inum we are looking for */
++ u64 ino; /* the inum we are looking for */
+ int found; /* inode matched? */
+ int sequence; /* sequence counter */
+ };
+@@ -255,10 +255,14 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
+ struct inode *dir = path->dentry->d_inode;
+ int error;
+ struct file *file;
++ struct kstat stat;
++ struct path child_path = {
++ .mnt = path->mnt,
++ .dentry = child,
++ };
+ struct getdents_callback buffer = {
+ .ctx.actor = filldir_one,
+ .name = name,
+- .ino = child->d_inode->i_ino
+ };
+
+ error = -ENOTDIR;
+@@ -268,6 +272,16 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
+ if (!dir->i_fop)
+ goto out;
+ /*
++ * inode->i_ino is unsigned long, kstat->ino is u64, so the
++ * former would be insufficient on 32-bit hosts when the
++ * filesystem supports 64-bit inode numbers. So we need to
++ * actually call ->getattr, not just read i_ino:
++ */
++ error = vfs_getattr_nosec(&child_path, &stat);
++ if (error)
++ return error;
++ buffer.ino = stat.ino;
++ /*
+ * Open the directory ...
+ */
+ file = dentry_open(path, O_RDONLY, cred);
+diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
+index 9c3e117c3ed1..4d0161442565 100644
+--- a/fs/nfs/blocklayout/extents.c
++++ b/fs/nfs/blocklayout/extents.c
+@@ -44,7 +44,7 @@
+ static inline sector_t normalize(sector_t s, int base)
+ {
+ sector_t tmp = s; /* Since do_div modifies its argument */
+- return s - do_div(tmp, base);
++ return s - sector_div(tmp, base);
+ }
+
+ static inline sector_t normalize_up(sector_t s, int base)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 9be8021c70d8..dacb2979e8ac 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -4752,8 +4752,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
+ dprintk("%s ERROR %d, Reset session\n", __func__,
+ task->tk_status);
+ nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
+- task->tk_status = 0;
+- return -EAGAIN;
++ goto wait_on_recovery;
+ #endif /* CONFIG_NFS_V4_1 */
+ case -NFS4ERR_DELAY:
+ nfs_inc_server_stats(server, NFSIOS_DELAY);
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index 9186c7ce0b14..b6af150c96b8 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void)
+ }
+
+ static void
++nfsd_reply_cache_unhash(struct svc_cacherep *rp)
++{
++ hlist_del_init(&rp->c_hash);
++ list_del_init(&rp->c_lru);
++}
++
++static void
+ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
+ {
+ if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
+@@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
+ rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
+ if (nfsd_cache_entry_expired(rp) ||
+ num_drc_entries >= max_drc_entries) {
+- lru_put_end(rp);
++ nfsd_reply_cache_unhash(rp);
+ prune_cache_entries();
+ goto search_cache;
+ }
+diff --git a/fs/stat.c b/fs/stat.c
+index d0ea7ef75e26..ae0c3cef9927 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -37,14 +37,21 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
+
+ EXPORT_SYMBOL(generic_fillattr);
+
+-int vfs_getattr(struct path *path, struct kstat *stat)
++/**
++ * vfs_getattr_nosec - getattr without security checks
++ * @path: file to get attributes from
++ * @stat: structure to return attributes in
++ *
++ * Get attributes without calling security_inode_getattr.
++ *
++ * Currently the only caller other than vfs_getattr is internal to the
++ * filehandle lookup code, which uses only the inode number and returns
++ * no attributes to any user. Any other code probably wants
++ * vfs_getattr.
++ */
++int vfs_getattr_nosec(struct path *path, struct kstat *stat)
+ {
+ struct inode *inode = path->dentry->d_inode;
+- int retval;
+-
+- retval = security_inode_getattr(path->mnt, path->dentry);
+- if (retval)
+- return retval;
+
+ if (inode->i_op->getattr)
+ return inode->i_op->getattr(path->mnt, path->dentry, stat);
+@@ -53,6 +60,18 @@ int vfs_getattr(struct path *path, struct kstat *stat)
+ return 0;
+ }
+
++EXPORT_SYMBOL(vfs_getattr_nosec);
++
++int vfs_getattr(struct path *path, struct kstat *stat)
++{
++ int retval;
++
++ retval = security_inode_getattr(path->mnt, path->dentry);
++ if (retval)
++ return retval;
++ return vfs_getattr_nosec(path, stat);
++}
++
+ EXPORT_SYMBOL(vfs_getattr);
+
+ int vfs_fstat(unsigned int fd, struct kstat *stat)
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index e64ee5288b86..c888040a1e93 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -217,6 +217,8 @@ xfs_growfs_data_private(
+ */
+ nfree = 0;
+ for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
++ __be32 *agfl_bno;
++
+ /*
+ * AG freespace header block
+ */
+@@ -276,8 +278,10 @@ xfs_growfs_data_private(
+ agfl->agfl_seqno = cpu_to_be32(agno);
+ uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
+ }
++
++ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
+ for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
+- agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
++ agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
+
+ error = xfs_bwrite(bp);
+ xfs_buf_relse(bp);
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 2e1e6c33841d..8c8ef246c6b4 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -443,7 +443,8 @@ xfs_attrlist_by_handle(
+ return -XFS_ERROR(EPERM);
+ if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
+ return -XFS_ERROR(EFAULT);
+- if (al_hreq.buflen > XATTR_LIST_MAX)
++ if (al_hreq.buflen < sizeof(struct attrlist) ||
++ al_hreq.buflen > XATTR_LIST_MAX)
+ return -XFS_ERROR(EINVAL);
+
+ /*
+diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
+index f671f7e472ac..53365c6db2c2 100644
+--- a/fs/xfs/xfs_ioctl32.c
++++ b/fs/xfs/xfs_ioctl32.c
+@@ -357,7 +357,8 @@ xfs_compat_attrlist_by_handle(
+ if (copy_from_user(&al_hreq, arg,
+ sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
+ return -XFS_ERROR(EFAULT);
+- if (al_hreq.buflen > XATTR_LIST_MAX)
++ if (al_hreq.buflen < sizeof(struct attrlist) ||
++ al_hreq.buflen > XATTR_LIST_MAX)
+ return -XFS_ERROR(EINVAL);
+
+ /*
+diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
+index 973ce10c40b6..dc1bd3dcf11f 100644
+--- a/include/linux/compiler-intel.h
++++ b/include/linux/compiler-intel.h
+@@ -28,8 +28,6 @@
+
+ #endif
+
+-#define uninitialized_var(x) x
+-
+ #ifndef __HAVE_BUILTIN_BSWAP16__
+ /* icc has this, but it's called _bswap16 */
+ #define __HAVE_BUILTIN_BSWAP16__
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index fefa7b00ba42..164d2a91667f 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2504,6 +2504,7 @@ extern int page_symlink(struct inode *inode, const char *symname, int len);
+ extern const struct inode_operations page_symlink_inode_operations;
+ extern int generic_readlink(struct dentry *, char __user *, int);
+ extern void generic_fillattr(struct inode *, struct kstat *);
++int vfs_getattr_nosec(struct path *path, struct kstat *stat);
+ extern int vfs_getattr(struct path *, struct kstat *);
+ void __inode_add_bytes(struct inode *inode, loff_t bytes);
+ void inode_add_bytes(struct inode *inode, loff_t bytes);
+diff --git a/include/linux/kexec.h b/include/linux/kexec.h
+index d78d28a733b1..5fd33dc1fe3a 100644
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+ extern size_t vmcoreinfo_size;
+ extern size_t vmcoreinfo_max_size;
+
++/* flag to track if kexec reboot is in progress */
++extern bool kexec_in_progress;
++
+ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+ int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 39cfa0aca91f..6b02370256e4 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1262,6 +1262,8 @@ typedef void (*usb_complete_t)(struct urb *);
+ * @sg: scatter gather buffer list, the buffer size of each element in
+ * the list (except the last) must be divisible by the endpoint's
+ * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
++ * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
++ * Do not use urb->sg for interrupt endpoints for now, only bulk.)
+ * @num_mapped_sgs: (internal) number of mapped sg entries
+ * @num_sgs: number of entries in the sg list
+ * @transfer_buffer_length: How big is transfer_buffer. The transfer may
+diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
+index cf15b8213df7..54aff2d73150 100644
+--- a/include/sound/memalloc.h
++++ b/include/sound/memalloc.h
+@@ -103,7 +103,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
+ {
+ struct snd_sg_buf *sgbuf = dmab->private_data;
+ dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
+- addr &= PAGE_MASK;
++ addr &= ~((dma_addr_t)PAGE_SIZE - 1);
+ return addr + offset % PAGE_SIZE;
+ }
+
+diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
+index d630163b9a2e..5759810e1c1b 100644
+--- a/include/uapi/sound/compress_offload.h
++++ b/include/uapi/sound/compress_offload.h
+@@ -30,7 +30,7 @@
+ #include <sound/compress_params.h>
+
+
+-#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1)
++#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
+ /**
+ * struct snd_compressed_buffer: compressed buffer
+ * @fragment_size: size of buffer fragment in bytes
+@@ -67,8 +67,8 @@ struct snd_compr_params {
+ struct snd_compr_tstamp {
+ __u32 byte_offset;
+ __u32 copied_total;
+- snd_pcm_uframes_t pcm_frames;
+- snd_pcm_uframes_t pcm_io_frames;
++ __u32 pcm_frames;
++ __u32 pcm_io_frames;
+ __u32 sampling_rate;
+ };
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index c3a1a55a5214..221a58fc62f7 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -288,7 +288,7 @@ again:
+ put_page(page);
+ /* serialize against __split_huge_page_splitting() */
+ local_irq_disable();
+- if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
++ if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
+ page_head = compound_head(page);
+ /*
+ * page_head is valid pointer but we must pin
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 2a74f307c5ec..ecd783dda9ae 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+ size_t vmcoreinfo_size;
+ size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+
++/* Flag to indicate we are going to kexec a new kernel */
++bool kexec_in_progress = false;
++
+ /* Location of the reserved area for the crash kernel */
+ struct resource crashk_res = {
+ .name = "Crash kernel",
+@@ -1675,6 +1678,7 @@ int kernel_kexec(void)
+ } else
+ #endif
+ {
++ kexec_in_progress = true;
+ kernel_restart_prepare(NULL);
+ printk(KERN_EMERG "Starting new kernel\n");
+ machine_shutdown();
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 196559994f7c..fd9ca1de7559 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -225,6 +225,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ atomic_read(&cfs_rq->tg->runnable_avg));
+ #endif
+ #endif
++#ifdef CONFIG_CFS_BANDWIDTH
++ SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
++ cfs_rq->tg->cfs_bandwidth.timer_active);
++ SEQ_printf(m, " .%-30s: %d\n", "throttled",
++ cfs_rq->throttled);
++ SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
++ cfs_rq->throttle_count);
++#endif
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ print_cfs_group_stats(m, cpu, cfs_rq->tg);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 7c70201fbc61..513fc2fd5109 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2335,6 +2335,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ cfs_rq->throttled_clock = rq_clock(rq);
+ raw_spin_lock(&cfs_b->lock);
+ list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
++ if (!cfs_b->timer_active)
++ __start_cfs_bandwidth(cfs_b);
+ raw_spin_unlock(&cfs_b->lock);
+ }
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 13b9d0f221b8..36cc2d0570ab 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2675,7 +2675,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
+ goto bypass;
+
+ if (unlikely(task_in_memcg_oom(current)))
+- goto bypass;
++ goto nomem;
++
++ if (gfp_mask & __GFP_NOFAIL)
++ oom = false;
+
+ /*
+ * We always charge the cgroup the mm_struct belongs to.
+@@ -6338,6 +6341,42 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
+ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
+ {
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
++ /*
++ * XXX: css_offline() would be where we should reparent all
++ * memory to prepare the cgroup for destruction. However,
++ * memcg does not do css_tryget() and res_counter charging
++ * under the same RCU lock region, which means that charging
++ * could race with offlining. Offlining only happens to
++ * cgroups with no tasks in them but charges can show up
++ * without any tasks from the swapin path when the target
++ * memcg is looked up from the swapout record and not from the
++ * current task as it usually is. A race like this can leak
++ * charges and put pages with stale cgroup pointers into
++ * circulation:
++ *
++ * #0 #1
++ * lookup_swap_cgroup_id()
++ * rcu_read_lock()
++ * mem_cgroup_lookup()
++ * css_tryget()
++ * rcu_read_unlock()
++ * disable css_tryget()
++ * call_rcu()
++ * offline_css()
++ * reparent_charges()
++ * res_counter_charge()
++ * css_put()
++ * css_free()
++ * pc->mem_cgroup = dead memcg
++ * add page to lru
++ *
++ * The bulk of the charges are still moved in offline_css() to
++ * avoid pinning a lot of pages in case a long-term reference
++ * like a swapout record is deferring the css_free() to long
++ * after offlining. But this makes sure we catch any charges
++ * made after offlining:
++ */
++ mem_cgroup_reparent_charges(memcg);
+
+ memcg_destroy_kmem(memcg);
+ __mem_cgroup_free(memcg);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index f60b1eec3f87..5e2c2f1a075d 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1075,9 +1075,6 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
+- if (flags & MSG_SENDPAGE_NOTLAST)
+- flags |= MSG_MORE;
+-
+ if (!up->pending) {
+ struct msghdr msg = { .msg_flags = flags|MSG_MORE };
+
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 629dee7ec9bf..9903ee585561 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2386,8 +2386,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+
+- if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+- sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
++ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 21d5d44444d0..e765f77bb97a 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1047,6 +1047,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
+
+ cancel_work_sync(&local->restart_work);
+ cancel_work_sync(&local->reconfig_filter);
++ flush_work(&local->sched_scan_stopped_work);
+
+ ieee80211_clear_tx_pending(local);
+ rate_control_deinitialize(local);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 674eac1f996c..1e5bd0d75732 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -911,7 +911,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
+ u16 sc;
+ u8 tid, ack_policy;
+
+- if (!ieee80211_is_data_qos(hdr->frame_control))
++ if (!ieee80211_is_data_qos(hdr->frame_control) ||
++ is_multicast_ether_addr(hdr->addr1))
+ goto dont_reorder;
+
+ /*
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index d2d17a449224..8f2f003afbb8 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -1089,6 +1089,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
+
+ trace_api_sched_scan_stopped(local);
+
+- ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work);
++ schedule_work(&local->sched_scan_stopped_work);
+ }
+ EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index aff959e5a1b3..00a65ba3aeaa 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -451,6 +451,9 @@ int wiphy_register(struct wiphy *wiphy)
+ int i;
+ u16 ifmodes = wiphy->interface_modes;
+
++ /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
++ wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
++
+ #ifdef CONFIG_PM
+ if (WARN_ON(wiphy->wowlan &&
+ (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 5b5231068516..d9a78fd8a2e1 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -53,6 +53,7 @@
+ #include <net/ip.h> /* for local_port_range[] */
+ #include <net/sock.h>
+ #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
++#include <net/inet_connection_sock.h>
+ #include <net/net_namespace.h>
+ #include <net/netlabel.h>
+ #include <linux/uaccess.h>
+@@ -3805,6 +3806,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
+ return 0;
+ }
+
++/**
++ * selinux_conn_sid - Determine the child socket label for a connection
++ * @sk_sid: the parent socket's SID
++ * @skb_sid: the packet's SID
++ * @conn_sid: the resulting connection SID
++ *
++ * If @skb_sid is valid then the user:role:type information from @sk_sid is
++ * combined with the MLS information from @skb_sid in order to create
++ * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
++ * of @sk_sid. Returns zero on success, negative values on failure.
++ *
++ */
++static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
++{
++ int err = 0;
++
++ if (skb_sid != SECSID_NULL)
++ err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
++ else
++ *conn_sid = sk_sid;
++
++ return err;
++}
++
+ /* socket security operations */
+
+ static int socket_sockcreate_sid(const struct task_security_struct *tsec,
+@@ -4411,7 +4436,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ struct sk_security_struct *sksec = sk->sk_security;
+ int err;
+ u16 family = sk->sk_family;
+- u32 newsid;
++ u32 connsid;
+ u32 peersid;
+
+ /* handle mapped IPv4 packets arriving via IPv6 sockets */
+@@ -4421,16 +4446,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ err = selinux_skb_peerlbl_sid(skb, family, &peersid);
+ if (err)
+ return err;
+- if (peersid == SECSID_NULL) {
+- req->secid = sksec->sid;
+- req->peer_secid = SECSID_NULL;
+- } else {
+- err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
+- if (err)
+- return err;
+- req->secid = newsid;
+- req->peer_secid = peersid;
+- }
++ err = selinux_conn_sid(sksec->sid, peersid, &connsid);
++ if (err)
++ return err;
++ req->secid = connsid;
++ req->peer_secid = peersid;
+
+ return selinux_netlbl_inet_conn_request(req, family);
+ }
+@@ -4690,6 +4710,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+ static unsigned int selinux_ip_output(struct sk_buff *skb,
+ u16 family)
+ {
++ struct sock *sk;
+ u32 sid;
+
+ if (!netlbl_enabled())
+@@ -4698,8 +4719,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
+ /* we do this in the LOCAL_OUT path and not the POST_ROUTING path
+ * because we want to make sure we apply the necessary labeling
+ * before IPsec is applied so we can leverage AH protection */
+- if (skb->sk) {
+- struct sk_security_struct *sksec = skb->sk->sk_security;
++ sk = skb->sk;
++ if (sk) {
++ struct sk_security_struct *sksec;
++
++ if (sk->sk_state == TCP_LISTEN)
++ /* if the socket is the listening state then this
++ * packet is a SYN-ACK packet which means it needs to
++ * be labeled based on the connection/request_sock and
++ * not the parent socket. unfortunately, we can't
++ * lookup the request_sock yet as it isn't queued on
++ * the parent socket until after the SYN-ACK is sent.
++ * the "solution" is to simply pass the packet as-is
++ * as any IP option based labeling should be copied
++ * from the initial connection request (in the IP
++ * layer). it is far from ideal, but until we get a
++ * security label in the packet itself this is the
++ * best we can do. */
++ return NF_ACCEPT;
++
++ /* standard practice, label using the parent socket */
++ sksec = sk->sk_security;
+ sid = sksec->sid;
+ } else
+ sid = SECINITSID_KERNEL;
+@@ -4784,12 +4824,12 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+- /* if the packet is being forwarded then get the peer label from the
+- * packet itself; otherwise check to see if it is from a local
+- * application or the kernel, if from an application get the peer label
+- * from the sending socket, otherwise use the kernel's sid */
+ sk = skb->sk;
+ if (sk == NULL) {
++ /* Without an associated socket the packet is either coming
++ * from the kernel or it is being forwarded; check the packet
++ * to determine which and if the packet is being forwarded
++ * query the packet directly to determine the security label. */
+ if (skb->skb_iif) {
+ secmark_perm = PACKET__FORWARD_OUT;
+ if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
+@@ -4798,7 +4838,26 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ secmark_perm = PACKET__SEND;
+ peer_sid = SECINITSID_KERNEL;
+ }
++ } else if (sk->sk_state == TCP_LISTEN) {
++ /* Locally generated packet but the associated socket is in the
++ * listening state which means this is a SYN-ACK packet. In
++ * this particular case the correct security label is assigned
++ * to the connection/request_sock but unfortunately we can't
++ * query the request_sock as it isn't queued on the parent
++ * socket until after the SYN-ACK packet is sent; the only
++ * viable choice is to regenerate the label like we do in
++ * selinux_inet_conn_request(). See also selinux_ip_output()
++ * for similar problems. */
++ u32 skb_sid;
++ struct sk_security_struct *sksec = sk->sk_security;
++ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
++ return NF_DROP;
++ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
++ return NF_DROP;
++ secmark_perm = PACKET__SEND;
+ } else {
++ /* Locally generated packet, fetch the security label from the
++ * associated socket. */
+ struct sk_security_struct *sksec = sk->sk_security;
+ peer_sid = sksec->sid;
+ secmark_perm = PACKET__SEND;
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index c4671d00babd..c7f6d1cab606 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -474,6 +474,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx)
+ memset(path, 0, sizeof(*path));
+ }
+
++/* return a DAC if paired to the given pin by codec driver */
++static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin)
++{
++ struct hda_gen_spec *spec = codec->spec;
++ const hda_nid_t *list = spec->preferred_dacs;
++
++ if (!list)
++ return 0;
++ for (; *list; list += 2)
++ if (*list == pin)
++ return list[1];
++ return 0;
++}
++
+ /* look for an empty DAC slot */
+ static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin,
+ bool is_digital)
+@@ -1192,7 +1206,14 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
+ continue;
+ }
+
+- dacs[i] = look_for_dac(codec, pin, false);
++ dacs[i] = get_preferred_dac(codec, pin);
++ if (dacs[i]) {
++ if (is_dac_already_used(codec, dacs[i]))
++ badness += bad->shared_primary;
++ }
++
++ if (!dacs[i])
++ dacs[i] = look_for_dac(codec, pin, false);
+ if (!dacs[i] && !i) {
+ /* try to steal the DAC of surrounds for the front */
+ for (j = 1; j < num_outs; j++) {
+@@ -4297,6 +4318,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
+ return AC_PWRST_D3;
+ }
+
++/* mute all aamix inputs initially; parse up to the first leaves */
++static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
++{
++ int i, nums;
++ const hda_nid_t *conn;
++ bool has_amp;
++
++ nums = snd_hda_get_conn_list(codec, mix, &conn);
++ has_amp = nid_has_mute(codec, mix, HDA_INPUT);
++ for (i = 0; i < nums; i++) {
++ if (has_amp)
++ snd_hda_codec_amp_stereo(codec, mix,
++ HDA_INPUT, i,
++ 0xff, HDA_AMP_MUTE);
++ else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
++ snd_hda_codec_amp_stereo(codec, conn[i],
++ HDA_OUTPUT, 0,
++ 0xff, HDA_AMP_MUTE);
++ }
++}
+
+ /*
+ * Parse the given BIOS configuration and set up the hda_gen_spec
+@@ -4435,6 +4476,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
+ }
+ }
+
++ /* mute all aamix input initially */
++ if (spec->mixer_nid)
++ mute_all_mixer_nid(codec, spec->mixer_nid);
++
+ dig_only:
+ parse_digital(codec);
+
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index 7e45cb44d151..0929a06df812 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -249,6 +249,9 @@ struct hda_gen_spec {
+ const struct badness_table *main_out_badness;
+ const struct badness_table *extra_out_badness;
+
++ /* preferred pin/DAC pairs; an array of paired NIDs */
++ const hda_nid_t *preferred_dacs;
++
+ /* loopback mixing mode */
+ bool aamix_mode;
+
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index f684a4f8c797..38aa080681a3 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -324,6 +324,14 @@ static int patch_ad1986a(struct hda_codec *codec)
+ {
+ int err;
+ struct ad198x_spec *spec;
++ static hda_nid_t preferred_pairs[] = {
++ 0x1a, 0x03,
++ 0x1b, 0x03,
++ 0x1c, 0x04,
++ 0x1d, 0x05,
++ 0x1e, 0x03,
++ 0
++ };
+
+ err = alloc_ad_spec(codec);
+ if (err < 0)
+@@ -344,6 +352,8 @@ static int patch_ad1986a(struct hda_codec *codec)
+ * So, let's disable the shared stream.
+ */
+ spec->gen.multiout.no_share_stream = 1;
++ /* give fixed DAC/pin pairs */
++ spec->gen.preferred_dacs = preferred_pairs;
+
+ /* AD1986A can't manage the dynamic pin on/off smoothly */
+ spec->gen.auto_mute_via_amp = 1;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 8d2d01b0cf86..f26c42c92db7 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2085,8 +2085,9 @@ static int simple_playback_build_controls(struct hda_codec *codec)
+ int err;
+
+ per_cvt = get_cvt(spec, 0);
+- err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid,
+- per_cvt->cvt_nid);
++ err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid,
++ per_cvt->cvt_nid,
++ HDA_PCM_TYPE_HDMI);
+ if (err < 0)
+ return err;
+ return simple_hdmi_build_jack(codec, 0);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 1cf9ccb01013..aac732d17c17 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1893,6 +1893,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ int r;
+ struct kvm_vcpu *vcpu, *v;
+
++ if (id >= KVM_MAX_VCPUS)
++ return -EINVAL;
++
+ vcpu = kvm_arch_vcpu_create(kvm, id);
+ if (IS_ERR(vcpu))
+ return PTR_ERR(vcpu);
diff --git a/1006_linux-3.12.7.patch b/1006_linux-3.12.7.patch
new file mode 100644
index 00000000..75566aad
--- /dev/null
+++ b/1006_linux-3.12.7.patch
@@ -0,0 +1,5182 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index fcbb736d55fe..4f7c57cb6022 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1515,6 +1515,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+
+ * atapi_dmadir: Enable ATAPI DMADIR bridge support
+
++ * disable: Disable this device.
++
+ If there are multiple matching configurations changing
+ the same attribute, the last one is used.
+
+diff --git a/Makefile b/Makefile
+index 2b23383311ff..c2f0b7985b41 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
+index 413b4c29e782..8a01060f421a 100644
+--- a/arch/arm/boot/dts/r8a7790.dtsi
++++ b/arch/arm/boot/dts/r8a7790.dtsi
+@@ -152,7 +152,7 @@
+
+ sdhi0: sdhi@ee100000 {
+ compatible = "renesas,sdhi-r8a7790";
+- reg = <0 0xee100000 0 0x100>;
++ reg = <0 0xee100000 0 0x200>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 165 4>;
+ cap-sd-highspeed;
+@@ -161,7 +161,7 @@
+
+ sdhi1: sdhi@ee120000 {
+ compatible = "renesas,sdhi-r8a7790";
+- reg = <0 0xee120000 0 0x100>;
++ reg = <0 0xee120000 0 0x200>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 166 4>;
+ cap-sd-highspeed;
+diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
+index 80559cbdbc87..aad69ba7503f 100644
+--- a/arch/arm/boot/dts/sun7i-a20.dtsi
++++ b/arch/arm/boot/dts/sun7i-a20.dtsi
+@@ -170,7 +170,7 @@
+ emac: ethernet@01c0b000 {
+ compatible = "allwinner,sun4i-emac";
+ reg = <0x01c0b000 0x1000>;
+- interrupts = <0 55 1>;
++ interrupts = <0 55 4>;
+ clocks = <&ahb_gates 17>;
+ status = "disabled";
+ };
+@@ -186,7 +186,7 @@
+ pio: pinctrl@01c20800 {
+ compatible = "allwinner,sun7i-a20-pinctrl";
+ reg = <0x01c20800 0x400>;
+- interrupts = <0 28 1>;
++ interrupts = <0 28 4>;
+ clocks = <&apb0_gates 5>;
+ gpio-controller;
+ interrupt-controller;
+@@ -230,12 +230,12 @@
+ timer@01c20c00 {
+ compatible = "allwinner,sun4i-timer";
+ reg = <0x01c20c00 0x90>;
+- interrupts = <0 22 1>,
+- <0 23 1>,
+- <0 24 1>,
+- <0 25 1>,
+- <0 67 1>,
+- <0 68 1>;
++ interrupts = <0 22 4>,
++ <0 23 4>,
++ <0 24 4>,
++ <0 25 4>,
++ <0 67 4>,
++ <0 68 4>;
+ clocks = <&osc24M>;
+ };
+
+@@ -247,7 +247,7 @@
+ uart0: serial@01c28000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28000 0x400>;
+- interrupts = <0 1 1>;
++ interrupts = <0 1 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 16>;
+@@ -257,7 +257,7 @@
+ uart1: serial@01c28400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28400 0x400>;
+- interrupts = <0 2 1>;
++ interrupts = <0 2 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 17>;
+@@ -267,7 +267,7 @@
+ uart2: serial@01c28800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28800 0x400>;
+- interrupts = <0 3 1>;
++ interrupts = <0 3 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 18>;
+@@ -277,7 +277,7 @@
+ uart3: serial@01c28c00 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28c00 0x400>;
+- interrupts = <0 4 1>;
++ interrupts = <0 4 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 19>;
+@@ -287,7 +287,7 @@
+ uart4: serial@01c29000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c29000 0x400>;
+- interrupts = <0 17 1>;
++ interrupts = <0 17 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 20>;
+@@ -297,7 +297,7 @@
+ uart5: serial@01c29400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c29400 0x400>;
+- interrupts = <0 18 1>;
++ interrupts = <0 18 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 21>;
+@@ -307,7 +307,7 @@
+ uart6: serial@01c29800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c29800 0x400>;
+- interrupts = <0 19 1>;
++ interrupts = <0 19 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 22>;
+@@ -317,7 +317,7 @@
+ uart7: serial@01c29c00 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c29c00 0x400>;
+- interrupts = <0 20 1>;
++ interrupts = <0 20 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 23>;
+diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
+index dd8da2c5399f..ba1cba94c31d 100644
+--- a/arch/arm/mach-omap2/board-ldp.c
++++ b/arch/arm/mach-omap2/board-ldp.c
+@@ -243,12 +243,18 @@ static void __init ldp_display_init(void)
+
+ static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
+ {
++ int res;
++
+ /* LCD enable GPIO */
+ ldp_lcd_pdata.enable_gpio = gpio + 7;
+
+ /* Backlight enable GPIO */
+ ldp_lcd_pdata.backlight_gpio = gpio + 15;
+
++ res = platform_device_register(&ldp_lcd_device);
++ if (res)
++ pr_err("Unable to register LCD: %d\n", res);
++
+ return 0;
+ }
+
+@@ -347,7 +353,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
+
+ static struct platform_device *ldp_devices[] __initdata = {
+ &ldp_gpio_keys_device,
+- &ldp_lcd_device,
+ };
+
+ #ifdef CONFIG_OMAP_MUX
+diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+index 56cebb05509e..d23c77fadb31 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
+
+ /* gpmc */
+ static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
+- { .irq = 20 },
++ { .irq = 20 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
+ };
+
+ static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
+- { .irq = 52 },
++ { .irq = 52 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+index f234cbec0cb9..60f23440082e 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
+ };
+
+ static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
+- { .irq = 20 },
++ { .irq = 20 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+@@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
+
+ static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
+ static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
+- { .irq = 24 },
++ { .irq = 24 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+@@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
+
+ static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
+ static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
+- { .irq = 28 },
++ { .irq = 28 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+index db32d5380b11..18f333c440db 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
+ .class = &dra7xx_uart_hwmod_class,
+ .clkdm_name = "l4per_clkdm",
+ .main_clk = "uart1_gfclk_mux",
+- .flags = HWMOD_SWSUP_SIDLE_ACT,
++ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index fecdbf7de82e..c484d5625ffb 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
+ {
+ int err, len, type, disabled = !ctrl.enabled;
+
+- if (disabled) {
+- len = 0;
+- type = HW_BREAKPOINT_EMPTY;
+- } else {
+- err = arch_bp_generic_fields(ctrl, &len, &type);
+- if (err)
+- return err;
+-
+- switch (note_type) {
+- case NT_ARM_HW_BREAK:
+- if ((type & HW_BREAKPOINT_X) != type)
+- return -EINVAL;
+- break;
+- case NT_ARM_HW_WATCH:
+- if ((type & HW_BREAKPOINT_RW) != type)
+- return -EINVAL;
+- break;
+- default:
++ attr->disabled = disabled;
++ if (disabled)
++ return 0;
++
++ err = arch_bp_generic_fields(ctrl, &len, &type);
++ if (err)
++ return err;
++
++ switch (note_type) {
++ case NT_ARM_HW_BREAK:
++ if ((type & HW_BREAKPOINT_X) != type)
+ return -EINVAL;
+- }
++ break;
++ case NT_ARM_HW_WATCH:
++ if ((type & HW_BREAKPOINT_RW) != type)
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
+ }
+
+ attr->bp_len = len;
+ attr->bp_type = type;
+- attr->disabled = disabled;
+
+ return 0;
+ }
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index cca12f084842..9cf9635e8f44 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -265,7 +265,7 @@ do_kvm_##n: \
+ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
+ beq- 1f; \
+ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
+-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
++1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
+ blt+ cr1,3f; /* abort if it is */ \
+ li r1,(n); /* will be reloaded later */ \
+ sth r1,PACA_TRAP_SAVE(r13); \
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index 3d11d8038dee..9141e894c89b 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -467,6 +467,7 @@ _STATIC(__after_prom_start)
+ mtctr r8
+ bctr
+
++.balign 8
+ p_end: .llong _end - _stext
+
+ 4: /* Now copy the rest of the kernel up to _end */
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+index 043eec8461e7..46ff25149282 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+@@ -473,11 +473,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ slb_v = vcpu->kvm->arch.vrma_slb_v;
+ }
+
++ preempt_disable();
+ /* Find the HPTE in the hash table */
+ index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
+ HPTE_V_VALID | HPTE_V_ABSENT);
+- if (index < 0)
++ if (index < 0) {
++ preempt_enable();
+ return -ENOENT;
++ }
+ hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+ v = hptep[0] & ~HPTE_V_HVLOCK;
+ gr = kvm->arch.revmap[index].guest_rpte;
+@@ -485,6 +488,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ /* Unlock the HPTE */
+ asm volatile("lwsync" : : : "memory");
+ hptep[0] = v;
++ preempt_enable();
+
+ gpte->eaddr = eaddr;
+ gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+index 9c515440ad1a..ea17b3067233 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+@@ -749,6 +749,10 @@ static int slb_base_page_shift[4] = {
+ 20, /* 1M, unsupported */
+ };
+
++/* When called from virtmode, this func should be protected by
++ * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
++ * can trigger deadlock issue.
++ */
+ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
+ unsigned long valid)
+ {
+diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
+index 2a0a596ebf67..d77f2f6c7ff0 100644
+--- a/arch/sh/kernel/sh_ksyms_32.c
++++ b/arch/sh/kernel/sh_ksyms_32.c
+@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(__clear_user);
+ EXPORT_SYMBOL(empty_zero_page);
++#ifdef CONFIG_FLATMEM
++/* need in pfn_valid macro */
++EXPORT_SYMBOL(min_low_pfn);
++EXPORT_SYMBOL(max_low_pfn);
++#endif
+
+ #define DECLARE_EXPORT(name) \
+ extern void name(void);EXPORT_SYMBOL(name)
+diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
+index 7b95f29e3174..3baff31e58cf 100644
+--- a/arch/sh/lib/Makefile
++++ b/arch/sh/lib/Makefile
+@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
+ checksum.o strlen.o div64.o div64-generic.o
+
+ # Extracted from libgcc
+-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
++obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
+ ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
+ udiv_qrnnd.o
+
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 36760317814f..90f289f0ec8e 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -616,7 +616,7 @@ static inline unsigned long pte_present(pte_t pte)
+ }
+
+ #define pte_accessible pte_accessible
+-static inline unsigned long pte_accessible(pte_t a)
++static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
+ {
+ return pte_val(a) & _PAGE_VALID;
+ }
+@@ -806,7 +806,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
+ * and SUN4V pte layout, so this inline test is fine.
+ */
+- if (likely(mm != &init_mm) && pte_accessible(orig))
++ if (likely(mm != &init_mm) && pte_accessible(mm, orig))
+ tlb_batch_add(mm, addr, ptep, orig, fullmm);
+ }
+
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 3d1999458709..bbc8b12fa443 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
+ }
+
+ #define pte_accessible pte_accessible
+-static inline int pte_accessible(pte_t a)
++static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
+ {
+- return pte_flags(a) & _PAGE_PRESENT;
++ if (pte_flags(a) & _PAGE_PRESENT)
++ return true;
++
++ if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
++ mm_tlb_flush_pending(mm))
++ return true;
++
++ return false;
+ }
+
+ static inline int pte_hidden(pte_t pte)
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index ec7299566f79..a51efc90b534 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
+ set_cpu_cap(c, X86_FEATURE_PEBS);
+ }
+
+- if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
++ if (c->x86 == 6 && cpu_has_clflush &&
++ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
+ set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
+
+ #ifdef CONFIG_X86_64
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index dec48bfaddb8..1673940cf9c3 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1350,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
+ return;
+ }
+
++ if (!kvm_vcpu_is_bsp(apic->vcpu))
++ value &= ~MSR_IA32_APICBASE_BSP;
++ vcpu->arch.apic_base = value;
++
+ /* update jump label if enable bit changes */
+ if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
+ if (value & MSR_IA32_APICBASE_ENABLE)
+@@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
+ recalculate_apic_map(vcpu->kvm);
+ }
+
+- if (!kvm_vcpu_is_bsp(apic->vcpu))
+- value &= ~MSR_IA32_APICBASE_BSP;
+-
+- vcpu->arch.apic_base = value;
+ if ((old_value ^ value) & X2APIC_ENABLE) {
+ if (value & X2APIC_ENABLE) {
+ u32 id = kvm_apic_id(apic);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 2b2fce1b2009..6128914ee873 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8218,8 +8218,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ kvm_set_cr4(vcpu, vmcs12->host_cr4);
+
+- if (nested_cpu_has_ept(vmcs12))
+- nested_ept_uninit_mmu_context(vcpu);
++ nested_ept_uninit_mmu_context(vcpu);
+
+ kvm_set_cr3(vcpu, vmcs12->host_cr3);
+ kvm_mmu_reset_context(vcpu);
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index dd74e46828c0..0596e8e0cc19 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
+ pte_t pte = gup_get_pte(ptep);
+ struct page *page;
+
++ /* Similar to the PMD case, NUMA hinting must take slow path */
++ if (pte_numa(pte)) {
++ pte_unmap(ptep);
++ return 0;
++ }
++
+ if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
+ pte_unmap(ptep);
+ return 0;
+@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ return 0;
+ if (unlikely(pmd_large(pmd))) {
++ /*
++ * NUMA hinting faults need to be handled in the GUP
++ * slowpath for accounting purposes and so that they
++ * can be serialised against THP migration.
++ */
++ if (pmd_numa(pmd))
++ return 0;
+ if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
+ return 0;
+ } else {
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index fb78bb9ad8f6..ab19263baf39 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -156,6 +156,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
+ { "80860F14", (unsigned long)&byt_sdio_dev_desc },
+ { "80860F41", (unsigned long)&byt_i2c_dev_desc },
+ { "INT33B2", },
++ { "INT33FC", },
+
+ { }
+ };
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index b587ec8257b2..661a5b7f5104 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
+ }
+ EXPORT_SYMBOL(acpi_bus_get_private_data);
+
++void acpi_bus_no_hotplug(acpi_handle handle)
++{
++ struct acpi_device *adev = NULL;
++
++ acpi_bus_get_device(handle, &adev);
++ if (adev)
++ adev->flags.no_hotplug = true;
++}
++EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
++
+ static void acpi_print_osc_error(acpi_handle handle,
+ struct acpi_osc_context *context, char *error)
+ {
+diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
+index 58debb0acc3a..566cca4f9dc2 100644
+--- a/drivers/ata/ahci_imx.c
++++ b/drivers/ata/ahci_imx.c
+@@ -60,7 +60,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
+ /*
+ * set PHY Paremeters, two steps to configure the GPR13,
+ * one write for rest of parameters, mask of first write
+- * is 0x07fffffd, and the other one write for setting
++ * is 0x07ffffff, and the other one write for setting
+ * the mpll_clk_en.
+ */
+ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
+@@ -71,6 +71,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
+ | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
+ | IMX6Q_GPR13_SATA_TX_BOOST_MASK
+ | IMX6Q_GPR13_SATA_TX_LVL_MASK
++ | IMX6Q_GPR13_SATA_MPLL_CLK_EN
+ | IMX6Q_GPR13_SATA_TX_EDGE_RATE
+ , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
+ | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 81a94a3919db..2c2780a19609 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
+ "failed to get NCQ Send/Recv Log Emask 0x%x\n",
+ err_mask);
+ } else {
++ u8 *cmds = dev->ncq_send_recv_cmds;
++
+ dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
+- memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
+- ATA_LOG_NCQ_SEND_RECV_SIZE);
++ memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
++
++ if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
++ ata_dev_dbg(dev, "disabling queued TRIM support\n");
++ cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
++ ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
++ }
+ }
+ }
+
+@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
++ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
++ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
++
+ /* Blacklist entries taken from Silicon Image 3124/3132
+ Windows driver .inf file - also several Linux problem reports */
+ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
+@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+
++ /* devices that don't properly handle queued TRIM commands */
++ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
++ { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
++
+ /* End Marker */
+ { }
+ };
+@@ -6520,6 +6534,7 @@ static int __init ata_parse_force_one(char **cur,
+ { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
+ { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
+ { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
++ { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
+ };
+ char *start = *cur, *p = *cur;
+ char *id, *val, *endp;
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index ab58556d347c..377eb889f555 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
+ return;
+ }
+
++ /*
++ * XXX - UGLY HACK
++ *
++ * The block layer suspend/resume path is fundamentally broken due
++ * to freezable kthreads and workqueue and may deadlock if a block
++ * device gets removed while resume is in progress. I don't know
++ * what the solution is short of removing freezable kthreads and
++ * workqueues altogether.
++ *
++ * The following is an ugly hack to avoid kicking off device
++ * removal while freezer is active. This is a joke but does avoid
++ * this particular deadlock scenario.
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=62801
++ * http://marc.info/?l=linux-kernel&m=138695698516487
++ */
++#ifdef CONFIG_FREEZER
++ while (pm_freezing)
++ msleep(10);
++#endif
++
+ DPRINTK("ENTER\n");
+ mutex_lock(&ap->scsi_scan_mutex);
+
+diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
+index 4cbae4f762b1..b386be6018d7 100644
+--- a/drivers/clocksource/dw_apb_timer_of.c
++++ b/drivers/clocksource/dw_apb_timer_of.c
+@@ -108,12 +108,11 @@ static void add_clocksource(struct device_node *source_timer)
+
+ static u32 read_sched_clock(void)
+ {
+- return __raw_readl(sched_io_base);
++ return ~__raw_readl(sched_io_base);
+ }
+
+ static const struct of_device_id sptimer_ids[] __initconst = {
+ { .compatible = "picochip,pc3x2-rtc" },
+- { .compatible = "snps,dw-apb-timer-sp" },
+ { /* Sentinel */ },
+ };
+
+@@ -153,4 +152,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
+ num_called++;
+ }
+ CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
+-CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init);
++CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
++CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
++CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index eb3fdc755000..99d8ab548a34 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -550,6 +550,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
+ cpu = all_cpu_data[cpunum];
+
+ intel_pstate_get_cpu_pstates(cpu);
++ if (!cpu->pstate.current_pstate) {
++ all_cpu_data[cpunum] = NULL;
++ kfree(cpu);
++ return -ENODATA;
++ }
+
+ cpu->cpu = cpunum;
+ cpu->pstate_policy =
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index f238cfd33847..b61c5fc64dc3 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -339,6 +339,7 @@ config NET_DMA
+ bool "Network: TCP receive copy offload"
+ depends on DMA_ENGINE && NET
+ default (INTEL_IOATDMA || FSL_DMA)
++ depends on BROKEN
+ help
+ This enables the use of DMA engines in the network stack to
+ offload receive copy-to-user operations, freeing CPU cycles.
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index b0bb056458a3..281029daf98c 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
+ .cmd_per_lun = 1,
+ .can_queue = 1,
+ .sdev_attrs = sbp2_scsi_sysfs_attrs,
+- .no_write_same = 1,
+ };
+
+ MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
+index 7b37300973db..2baf0ddf7e02 100644
+--- a/drivers/gpio/gpio-msm-v2.c
++++ b/drivers/gpio/gpio-msm-v2.c
+@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
+- clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
++ clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
+ __clear_bit(gpio, msm_gpio.enabled_irqs);
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ }
+@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ __set_bit(gpio, msm_gpio.enabled_irqs);
+- set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
++ set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
+ writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ }
+diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
+index db2de1a2dbcf..77d805a3321a 100644
+--- a/drivers/gpio/gpio-twl4030.c
++++ b/drivers/gpio/gpio-twl4030.c
+@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
+ if (offset < TWL4030_GPIO_MAX)
+ ret = twl4030_set_gpio_direction(offset, 1);
+ else
+- ret = -EINVAL;
++ ret = -EINVAL; /* LED outputs can't be set as input */
+
+ if (!ret)
+ priv->direction &= ~BIT(offset);
+@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+ static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+ {
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+- int ret = -EINVAL;
++ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+- if (offset < TWL4030_GPIO_MAX)
++ if (offset < TWL4030_GPIO_MAX) {
+ ret = twl4030_set_gpio_direction(offset, 0);
++ if (ret) {
++ mutex_unlock(&priv->mutex);
++ return ret;
++ }
++ }
++
++ /*
++ * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
++ */
+
+ priv->direction |= BIT(offset);
+ mutex_unlock(&priv->mutex);
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 830f7501cb4d..d0d3eae05a1a 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -68,6 +68,8 @@
+ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+ /* Force reduced-blanking timings for detailed modes */
+ #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
++/* Force 8bpc */
++#define EDID_QUIRK_FORCE_8BPC (1 << 8)
+
+ struct detailed_mode_closure {
+ struct drm_connector *connector;
+@@ -128,6 +130,9 @@ static struct edid_quirk {
+
+ /* Medion MD 30217 PG */
+ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
++
++ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
++ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+ };
+
+ /*
+@@ -3236,6 +3241,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+
+ drm_add_display_info(edid, &connector->display_info);
+
++ if (quirks & EDID_QUIRK_FORCE_8BPC)
++ connector->display_info.bpc = 8;
++
+ return num_modes;
+ }
+ EXPORT_SYMBOL(drm_add_edid_modes);
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index d5c784d48671..5a25f2476c3b 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+
++ /*
++ * The dri breadcrumb update races against the drm master disappearing.
++ * Instead of trying to fix this (this is by far not the only ums issue)
++ * just don't do the update in kms mode.
++ */
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ return;
++
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+@@ -1848,8 +1856,10 @@ void i915_driver_lastclose(struct drm_device * dev)
+
+ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+ {
++ mutex_lock(&dev->struct_mutex);
+ i915_gem_context_close(dev, file_priv);
+ i915_gem_release(dev, file_priv);
++ mutex_unlock(&dev->struct_mutex);
+ }
+
+ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index cdfb9da0e4ce..b00b32c992b0 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2278,15 +2278,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
+ kfree(request);
+ }
+
+-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+- struct intel_ring_buffer *ring)
++static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
++ struct intel_ring_buffer *ring)
+ {
+- u32 completed_seqno;
+- u32 acthd;
++ u32 completed_seqno = ring->get_seqno(ring, false);
++ u32 acthd = intel_ring_get_active_head(ring);
++ struct drm_i915_gem_request *request;
++
++ list_for_each_entry(request, &ring->request_list, list) {
++ if (i915_seqno_passed(completed_seqno, request->seqno))
++ continue;
+
+- acthd = intel_ring_get_active_head(ring);
+- completed_seqno = ring->get_seqno(ring, false);
++ i915_set_reset_status(ring, request, acthd);
++ }
++}
+
++static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
++ struct intel_ring_buffer *ring)
++{
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+@@ -2294,9 +2303,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_request,
+ list);
+
+- if (request->seqno > completed_seqno)
+- i915_set_reset_status(ring, request, acthd);
+-
+ i915_gem_free_request(request);
+ }
+
+@@ -2338,8 +2344,16 @@ void i915_gem_reset(struct drm_device *dev)
+ struct intel_ring_buffer *ring;
+ int i;
+
++ /*
++ * Before we free the objects from the requests, we need to inspect
++ * them for finding the guilty party. As the requests only borrow
++ * their reference to the objects, the inspection must be done first.
++ */
++ for_each_ring(ring, dev_priv, i)
++ i915_gem_reset_ring_status(dev_priv, ring);
++
+ for_each_ring(ring, dev_priv, i)
+- i915_gem_reset_ring_lists(dev_priv, ring);
++ i915_gem_reset_ring_cleanup(dev_priv, ring);
+
+ i915_gem_restore_fences(dev);
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
+index 403309c2a7d6..bb6eecb3551c 100644
+--- a/drivers/gpu/drm/i915/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/i915_gem_context.c
+@@ -328,10 +328,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+ {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+- mutex_lock(&dev->struct_mutex);
+ idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ idr_destroy(&file_priv->context_idr);
+- mutex_unlock(&dev->struct_mutex);
+ }
+
+ static struct i915_hw_context *
+@@ -404,11 +402,21 @@ static int do_switch(struct i915_hw_context *to)
+ if (ret)
+ return ret;
+
+- /* Clear this page out of any CPU caches for coherent swap-in/out. Note
++ /*
++ * Pin can switch back to the default context if we end up calling into
++ * evict_everything - as a last ditch gtt defrag effort that also
++ * switches to the default context. Hence we need to reload from here.
++ */
++ from = ring->last_context;
++
++ /*
++ * Clear this page out of any CPU caches for coherent swap-in/out. Note
+ * that thanks to write = false in this call and us not setting any gpu
+ * write domains when putting a context object onto the active list
+ * (when switching away from it), this won't block.
+- * XXX: We need a real interface to do this instead of trickery. */
++ *
++ * XXX: We need a real interface to do this instead of trickery.
++ */
+ ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ if (ret) {
+ i915_gem_object_unpin(to->obj);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index f535670b42d1..aad6f7bfc589 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -6009,7 +6009,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+ uint32_t val;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+- WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
++ WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+@@ -10592,7 +10592,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
+
+ intel_setup_overlay(dev);
+
++ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, false);
++ drm_modeset_unlock_all(dev);
+ }
+
+ void intel_modeset_cleanup(struct drm_device *dev)
+@@ -10666,14 +10668,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
+ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
++ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
+ if (state)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
++ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index dd7d2e182719..8160fbddbcfe 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
+ bool dsm_detected;
+ bool optimus_detected;
+ acpi_handle dhandle;
++ acpi_handle other_handle;
+ acpi_handle rom_handle;
+ } nouveau_dsm_priv;
+
+@@ -253,19 +254,17 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
+
+ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
+ {
+- acpi_handle dhandle, nvidia_handle;
+- acpi_status status;
++ acpi_handle dhandle;
+ int retval = 0;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+- status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
+- if (ACPI_FAILURE(status)) {
++ if (!acpi_has_method(dhandle, "_DSM")) {
++ nouveau_dsm_priv.other_handle = dhandle;
+ return false;
+ }
+-
+ if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
+ retval |= NOUVEAU_DSM_HAS_MUX;
+
+@@ -331,6 +330,16 @@ static bool nouveau_dsm_detect(void)
+ printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ nouveau_dsm_priv.dsm_detected = true;
++ /*
++ * On some systems hotplug events are generated for the device
++ * being switched off when _DSM is executed. They cause ACPI
++ * hotplug to trigger and attempt to remove the device from
++ * the system, which causes it to break down. Prevent that from
++ * happening by setting the no_hotplug flag for the involved
++ * ACPI device objects.
++ */
++ acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
++ acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
+ ret = true;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index e893c5362402..32c6b0a60fb3 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -879,6 +879,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
+ if (nouveau_runtime_pm == 0)
+ return -EINVAL;
+
++ /* are we optimus enabled? */
++ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
++ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
++ return -EINVAL;
++ }
++
+ drm_kms_helper_poll_disable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+ nouveau_switcheroo_optimus_dsm();
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 86d9ee08b13f..368e1b84f429 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1180,23 +1180,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
+ if (rdev->family >= CHIP_BONAIRE) {
+- u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
+- u32 num_rb = rdev->config.cik.max_backends_per_se;
+- if (num_pipe_configs > 8)
+- num_pipe_configs = 8;
+- if (num_pipe_configs == 8)
+- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
+- else if (num_pipe_configs == 4) {
+- if (num_rb == 4)
+- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
+- else if (num_rb < 4)
+- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
+- } else if (num_pipe_configs == 2)
+- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
++ /* Read the pipe config from the 2D TILED SCANOUT mode.
++ * It should be the same for the other modes too, but not all
++ * modes set the pipe config field. */
++ u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
++
++ fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
+ } else if ((rdev->family == CHIP_TAHITI) ||
+ (rdev->family == CHIP_PITCAIRN))
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+- else if (rdev->family == CHIP_VERDE)
++ else if ((rdev->family == CHIP_VERDE) ||
++ (rdev->family == CHIP_OLAND) ||
++ (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
+ switch (radeon_crtc->crtc_id) {
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 9cd2bc989ac7..31f5f0e88328 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -2608,7 +2608,7 @@ static u32 cik_create_bitmask(u32 bit_width)
+ * Returns the disabled RB bitmask.
+ */
+ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
+- u32 max_rb_num, u32 se_num,
++ u32 max_rb_num_per_se,
+ u32 sh_per_se)
+ {
+ u32 data, mask;
+@@ -2622,7 +2622,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
+
+ data >>= BACKEND_DISABLE_SHIFT;
+
+- mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
++ mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+ return data & mask;
+ }
+@@ -2639,7 +2639,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
+ */
+ static void cik_setup_rb(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+- u32 max_rb_num)
++ u32 max_rb_num_per_se)
+ {
+ int i, j;
+ u32 data, mask;
+@@ -2649,19 +2649,21 @@ static void cik_setup_rb(struct radeon_device *rdev,
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ cik_select_se_sh(rdev, i, j);
+- data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
++ data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
+ disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ mask = 1;
+- for (i = 0; i < max_rb_num; i++) {
++ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
++ rdev->config.cik.backend_enable_mask = enabled_rbs;
++
+ for (i = 0; i < se_num; i++) {
+ cik_select_se_sh(rdev, i, 0xffffffff);
+ data = 0;
+diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
+index b6286068e111..aaf7ffce8b5b 100644
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -468,7 +468,7 @@ int cik_copy_dma(struct radeon_device *rdev,
+ radeon_ring_write(ring, 0); /* src/dst endian swap */
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
+- radeon_ring_write(ring, dst_offset & 0xfffffffc);
++ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 9fcd338c0fcf..86ee09783925 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -132,7 +132,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+- if (sad_count < 0) {
++ if (sad_count <= 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+@@ -193,7 +193,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+- if (sad_count < 0) {
++ if (sad_count <= 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index 57fcc4b16a52..b347fffa4519 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -81,7 +81,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+- if (sad_count < 0) {
++ if (sad_count <= 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+@@ -134,7 +134,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+- if (sad_count < 0) {
++ if (sad_count <= 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index cac2866d79da..954eb9afbe71 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -900,6 +900,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ (rdev->pdev->device == 0x999C)) {
+ rdev->config.cayman.max_simds_per_se = 6;
+ rdev->config.cayman.max_backends_per_se = 2;
++ rdev->config.cayman.max_hw_contexts = 8;
++ rdev->config.cayman.sx_max_export_size = 256;
++ rdev->config.cayman.sx_max_export_pos_size = 64;
++ rdev->config.cayman.sx_max_export_smx_size = 192;
+ } else if ((rdev->pdev->device == 0x9903) ||
+ (rdev->pdev->device == 0x9904) ||
+ (rdev->pdev->device == 0x990A) ||
+@@ -910,6 +914,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ (rdev->pdev->device == 0x999D)) {
+ rdev->config.cayman.max_simds_per_se = 4;
+ rdev->config.cayman.max_backends_per_se = 2;
++ rdev->config.cayman.max_hw_contexts = 8;
++ rdev->config.cayman.sx_max_export_size = 256;
++ rdev->config.cayman.sx_max_export_pos_size = 64;
++ rdev->config.cayman.sx_max_export_smx_size = 192;
+ } else if ((rdev->pdev->device == 0x9919) ||
+ (rdev->pdev->device == 0x9990) ||
+ (rdev->pdev->device == 0x9991) ||
+@@ -920,9 +928,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ (rdev->pdev->device == 0x99A0)) {
+ rdev->config.cayman.max_simds_per_se = 3;
+ rdev->config.cayman.max_backends_per_se = 1;
++ rdev->config.cayman.max_hw_contexts = 4;
++ rdev->config.cayman.sx_max_export_size = 128;
++ rdev->config.cayman.sx_max_export_pos_size = 32;
++ rdev->config.cayman.sx_max_export_smx_size = 96;
+ } else {
+ rdev->config.cayman.max_simds_per_se = 2;
+ rdev->config.cayman.max_backends_per_se = 1;
++ rdev->config.cayman.max_hw_contexts = 4;
++ rdev->config.cayman.sx_max_export_size = 128;
++ rdev->config.cayman.sx_max_export_pos_size = 32;
++ rdev->config.cayman.sx_max_export_smx_size = 96;
+ }
+ rdev->config.cayman.max_texture_channel_caches = 2;
+ rdev->config.cayman.max_gprs = 256;
+@@ -930,10 +946,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ rdev->config.cayman.max_gs_threads = 32;
+ rdev->config.cayman.max_stack_entries = 512;
+ rdev->config.cayman.sx_num_of_sets = 8;
+- rdev->config.cayman.sx_max_export_size = 256;
+- rdev->config.cayman.sx_max_export_pos_size = 64;
+- rdev->config.cayman.sx_max_export_smx_size = 192;
+- rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sq_num_cf_insts = 2;
+
+ rdev->config.cayman.sc_prim_fifo_size = 0x40;
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 24f4960f59ee..f44ca5853ff2 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -1930,7 +1930,7 @@ struct si_asic {
+ unsigned sc_earlyz_tile_fifo_size;
+
+ unsigned num_tile_pipes;
+- unsigned num_backends_per_se;
++ unsigned backend_enable_mask;
+ unsigned backend_disable_mask_per_asic;
+ unsigned backend_map;
+ unsigned num_texture_channel_caches;
+@@ -1960,7 +1960,7 @@ struct cik_asic {
+ unsigned sc_earlyz_tile_fifo_size;
+
+ unsigned num_tile_pipes;
+- unsigned num_backends_per_se;
++ unsigned backend_enable_mask;
+ unsigned backend_disable_mask_per_asic;
+ unsigned backend_map;
+ unsigned num_texture_channel_caches;
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index d96070bf8388..d7e7c25feaaf 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -34,6 +34,7 @@ static struct radeon_atpx_priv {
+ bool atpx_detected;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
++ acpi_handle other_handle;
+ struct radeon_atpx atpx;
+ } radeon_atpx_priv;
+
+@@ -448,9 +449,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+- if (ACPI_FAILURE(status))
++ if (ACPI_FAILURE(status)) {
++ radeon_atpx_priv.other_handle = dhandle;
+ return false;
+-
++ }
+ radeon_atpx_priv.dhandle = dhandle;
+ radeon_atpx_priv.atpx.handle = atpx_handle;
+ return true;
+@@ -527,6 +529,16 @@ static bool radeon_atpx_detect(void)
+ printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ radeon_atpx_priv.atpx_detected = true;
++ /*
++ * On some systems hotplug events are generated for the device
++ * being switched off when ATPX is executed. They cause ACPI
++ * hotplug to trigger and attempt to remove the device from
++ * the system, which causes it to break down. Prevent that from
++ * happening by setting the no_hotplug flag for the involved
++ * ACPI device objects.
++ */
++ acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
++ acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
+ return true;
+ }
+ return false;
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index 61580ddc4eb2..4a3b3c55a568 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -436,6 +436,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ case RADEON_INFO_SI_CP_DMA_COMPUTE:
+ *value = 1;
+ break;
++ case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
++ if (rdev->family >= CHIP_BONAIRE) {
++ *value = rdev->config.cik.backend_enable_mask;
++ } else if (rdev->family >= CHIP_TAHITI) {
++ *value = rdev->config.si.backend_enable_mask;
++ } else {
++ DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
++ }
++ break;
+ default:
+ DRM_DEBUG_KMS("Invalid request %d\n", info->request);
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index ab0a17248d55..1d029ccf428b 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -472,7 +472,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
+ return -EINVAL;
+ }
+
+- if ((start >> 28) != (end >> 28)) {
++ if ((start >> 28) != ((end - 1) >> 28)) {
+ DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+ start, end);
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index 1447d794c22a..3c38f0af78fb 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
+ base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ base = G_000100_MC_FB_START(base) << 16;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
++ /* Some boards seem to be configured for 128MB of sideport memory,
++ * but really only have 64MB. Just skip the sideport and use
++ * UMA memory.
++ */
++ if (rdev->mc.igp_sideport_enabled &&
++ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
++ base += 128 * 1024 * 1024;
++ rdev->mc.real_vram_size -= 128 * 1024 * 1024;
++ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
++ }
+
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
+index 913b025ae9b3..374499db20c7 100644
+--- a/drivers/gpu/drm/radeon/rv770_dpm.c
++++ b/drivers/gpu/drm/radeon/rv770_dpm.c
+@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
++ /* disable ss, causes hangs on some cayman boards */
++ if (rdev->family == CHIP_CAYMAN) {
++ pi->sclk_ss = false;
++ pi->mclk_ss = false;
++ }
++
+ if (pi->sclk_ss || pi->mclk_ss)
+ pi->dynamic_ss = true;
+ else
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index fe0ec2cb2084..37acf938b779 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -2816,7 +2816,7 @@ static void si_setup_spi(struct radeon_device *rdev,
+ }
+
+ static u32 si_get_rb_disabled(struct radeon_device *rdev,
+- u32 max_rb_num, u32 se_num,
++ u32 max_rb_num_per_se,
+ u32 sh_per_se)
+ {
+ u32 data, mask;
+@@ -2830,14 +2830,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
+
+ data >>= BACKEND_DISABLE_SHIFT;
+
+- mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
++ mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+ return data & mask;
+ }
+
+ static void si_setup_rb(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+- u32 max_rb_num)
++ u32 max_rb_num_per_se)
+ {
+ int i, j;
+ u32 data, mask;
+@@ -2847,19 +2847,21 @@ static void si_setup_rb(struct radeon_device *rdev,
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+- data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
++ data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
+ disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ mask = 1;
+- for (i = 0; i < max_rb_num; i++) {
++ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
++ rdev->config.si.backend_enable_mask = enabled_rbs;
++
+ for (i = 0; i < se_num; i++) {
+ si_select_se_sh(rdev, i, 0xffffffff);
+ data = 0;
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index 1006c15445e9..97f07aab1c36 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -116,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ }
+
+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
+- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+- page_last = vma_pages(vma) +
+- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
++ vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
++ page_last = vma_pages(vma) + vma->vm_pgoff -
++ drm_vma_node_start(&bo->vma_node);
+
+ if (unlikely(page_offset >= bo->num_pages)) {
+ retval = VM_FAULT_SIGBUS;
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index f116d664b473..d47bb0f267f7 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -361,6 +361,9 @@ static int intel_idle(struct cpuidle_device *dev,
+
+ if (!current_set_polling_and_test()) {
+
++ if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
++ clflush((void *)&current_thread_info()->flags);
++
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
+index 9dd077b78759..df393b4f7bf2 100644
+--- a/drivers/iio/adc/ad7887.c
++++ b/drivers/iio/adc/ad7887.c
+@@ -211,7 +211,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = 1,
+ .scan_index = 1,
+- .scan_type = IIO_ST('u', 12, 16, 0),
++ .scan_type = {
++ .sign = 'u',
++ .realbits = 12,
++ .storagebits = 16,
++ .shift = 0,
++ .endianness = IIO_BE,
++ },
+ },
+ .channel[1] = {
+ .type = IIO_VOLTAGE,
+@@ -221,7 +227,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = 0,
+ .scan_index = 0,
+- .scan_type = IIO_ST('u', 12, 16, 0),
++ .scan_type = {
++ .sign = 'u',
++ .realbits = 12,
++ .storagebits = 16,
++ .shift = 0,
++ .endianness = IIO_BE,
++ },
+ },
+ .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+ .int_vref_mv = 2500,
+diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
+index 3fb7757a1028..368660dfe135 100644
+--- a/drivers/iio/imu/adis16400_core.c
++++ b/drivers/iio/imu/adis16400_core.c
+@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = ADIS16448_BARO_OUT,
+ .scan_index = ADIS16400_SCAN_BARO,
+- .scan_type = IIO_ST('s', 16, 16, 0),
++ .scan_type = {
++ .sign = 's',
++ .realbits = 16,
++ .storagebits = 16,
++ .endianness = IIO_BE,
++ },
+ },
+ ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(11)
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index db5d0a316d0b..ea7051ee1493 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -206,7 +206,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
+ isert_conn->conn_rx_descs = NULL;
+ }
+
++static void isert_cq_tx_work(struct work_struct *);
+ static void isert_cq_tx_callback(struct ib_cq *, void *);
++static void isert_cq_rx_work(struct work_struct *);
+ static void isert_cq_rx_callback(struct ib_cq *, void *);
+
+ static int
+@@ -258,26 +260,36 @@ isert_create_device_ib_res(struct isert_device *device)
+ cq_desc[i].device = device;
+ cq_desc[i].cq_index = i;
+
++ INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
+ device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
+ isert_cq_rx_callback,
+ isert_cq_event_callback,
+ (void *)&cq_desc[i],
+ ISER_MAX_RX_CQ_LEN, i);
+- if (IS_ERR(device->dev_rx_cq[i]))
++ if (IS_ERR(device->dev_rx_cq[i])) {
++ ret = PTR_ERR(device->dev_rx_cq[i]);
++ device->dev_rx_cq[i] = NULL;
+ goto out_cq;
++ }
+
++ INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
+ device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
+ isert_cq_tx_callback,
+ isert_cq_event_callback,
+ (void *)&cq_desc[i],
+ ISER_MAX_TX_CQ_LEN, i);
+- if (IS_ERR(device->dev_tx_cq[i]))
++ if (IS_ERR(device->dev_tx_cq[i])) {
++ ret = PTR_ERR(device->dev_tx_cq[i]);
++ device->dev_tx_cq[i] = NULL;
+ goto out_cq;
++ }
+
+- if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
++ ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
++ if (ret)
+ goto out_cq;
+
+- if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
++ ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
++ if (ret)
+ goto out_cq;
+ }
+
+@@ -1686,7 +1698,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
+ {
+ struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+
+- INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
+ queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
+ }
+
+@@ -1730,7 +1741,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
+ {
+ struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+
+- INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
+ queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
+ }
+
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index e75d015024a1..74f47980117b 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
+ break;
+
+ case EV_ABS:
++ input_alloc_absinfo(dev);
++ if (!dev->absinfo)
++ return;
++
+ __set_bit(code, dev->absbit);
+ break;
+
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+index 8ee9d1556e6e..263dd921edc4 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
+ /* set LED in default state (end of init phase) */
+ pcan_usb_pro_set_led(dev, 0, 1);
+
++ kfree(bi);
++ kfree(fi);
++
+ return 0;
+
+ err_out:
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index cd76d2a6e014..a82229fe1c7f 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -7608,7 +7608,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+ {
+ u32 base = (u32) mapping & 0xffffffff;
+
+- return (base > 0xffffdcc0) && (base + len + 8 < base);
++ return base + len + 8 < base;
+ }
+
+ /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index c6867f926cff..c0bfc818c701 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->net->ethtool_ops = &dm9601_ethtool_ops;
+ dev->net->hard_header_len += DM_TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+- dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
++
++ /* dm9620/21a require room for 4 byte padding, even in dm9601
++ * mode, so we need +1 to be able to receive full size
++ * ethernet frames.
++ */
++ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = dm9601_mdio_read;
+@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+ {
+- int len;
++ int len, pad;
+
+ /* format:
+ b1: packet length low
+@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ b3..n: packet data
+ */
+
+- len = skb->len;
++ len = skb->len + DM_TX_OVERHEAD;
+
+- if (skb_headroom(skb) < DM_TX_OVERHEAD) {
++ /* workaround for dm962x errata with tx fifo getting out of
++ * sync if a USB bulk transfer retry happens right after a
++ * packet with odd / maxpacket length by adding up to 3 bytes
++ * padding.
++ */
++ while ((len & 1) || !(len % dev->maxpacket))
++ len++;
++
++ len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
++ pad = len - skb->len;
++
++ if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
+ struct sk_buff *skb2;
+
+- skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
++ skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+
+ __skb_push(skb, DM_TX_OVERHEAD);
+
+- /* usbnet adds padding if length is a multiple of packet size
+- if so, adjust length value in header */
+- if ((skb->len % dev->maxpacket) == 0)
+- len++;
++ if (pad) {
++ memset(skb->data + skb->len, 0, pad);
++ __skb_put(skb, pad);
++ }
+
+ skb->data[0] = len;
+ skb->data[1] = len >> 8;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+index 8d78253c26ce..a366d6b4626f 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ mask2 |= ATH9K_INT_CST;
+ if (isr2 & AR_ISR_S2_TSFOOR)
+ mask2 |= ATH9K_INT_TSFOOR;
++
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR_S2, isr2);
++ isr &= ~AR_ISR_BCNMISC;
++ }
+ }
+
+- isr = REG_READ(ah, AR_ISR_RAC);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
++ isr = REG_READ(ah, AR_ISR_RAC);
++
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+
+ *masked |= ATH9K_INT_TX;
+
+- s0_s = REG_READ(ah, AR_ISR_S0_S);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
++ s0_s = REG_READ(ah, AR_ISR_S0_S);
++ s1_s = REG_READ(ah, AR_ISR_S1_S);
++ } else {
++ s0_s = REG_READ(ah, AR_ISR_S0);
++ REG_WRITE(ah, AR_ISR_S0, s0_s);
++ s1_s = REG_READ(ah, AR_ISR_S1);
++ REG_WRITE(ah, AR_ISR_S1, s1_s);
++
++ isr &= ~(AR_ISR_TXOK |
++ AR_ISR_TXDESC |
++ AR_ISR_TXERR |
++ AR_ISR_TXEOL);
++ }
++
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+-
+- s1_s = REG_READ(ah, AR_ISR_S1_S);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ *masked |= mask2;
+ }
+
+- if (AR_SREV_9100(ah))
+- return true;
+-
+- if (isr & AR_ISR_GENTMR) {
++ if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
+ u32 s5_s;
+
+- s5_s = REG_READ(ah, AR_ISR_S5_S);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
++ s5_s = REG_READ(ah, AR_ISR_S5_S);
++ } else {
++ s5_s = REG_READ(ah, AR_ISR_S5);
++ }
++
+ ah->intr_gen_timer_trigger =
+ MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
+
+@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
+ !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ *masked |= ATH9K_INT_TIM_TIMER;
++
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR_S5, s5_s);
++ isr &= ~AR_ISR_GENTMR;
++ }
+ }
+
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR, isr);
++ REG_READ(ah, AR_ISR);
++ }
++
++ if (AR_SREV_9100(ah))
++ return true;
++
+ if (sync_cause) {
+ ath9k_debug_sync_cause(common, sync_cause);
+ fatal_int =
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+index d44258172c0f..79d67c35299b 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+@@ -147,21 +147,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+- for (i = 0; i < ETH_ALEN; i++)
+- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
++ if (iter_data->hw_macaddr != NULL) {
++ for (i = 0; i < ETH_ALEN; i++)
++ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
++ } else {
++ iter_data->hw_macaddr = mac;
++ }
+ }
+
+-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
++static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+ {
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_vif_iter_data iter_data;
+
+ /*
+- * Use the hardware MAC address as reference, the hardware uses it
+- * together with the BSSID mask when matching addresses.
++ * Pick the MAC address of the first interface as the new hardware
++ * MAC address. The hardware will use it together with the BSSID mask
++ * when matching addresses.
+ */
+- iter_data.hw_macaddr = common->macaddr;
++ iter_data.hw_macaddr = NULL;
+ memset(&iter_data.mask, 0xff, ETH_ALEN);
+
+ if (vif)
+@@ -173,6 +178,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+ ath9k_htc_bssid_iter, &iter_data);
+
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
++
++ if (iter_data.hw_macaddr)
++ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
++
+ ath_hw_setbssidmask(common);
+ }
+
+@@ -1083,7 +1092,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
+ goto out;
+ }
+
+- ath9k_htc_set_bssid_mask(priv, vif);
++ ath9k_htc_set_mac_bssid_mask(priv, vif);
+
+ priv->vif_slot |= (1 << avp->index);
+ priv->nvifs++;
+@@ -1148,7 +1157,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
+
+ ath9k_htc_set_opmode(priv);
+
+- ath9k_htc_set_bssid_mask(priv, vif);
++ ath9k_htc_set_mac_bssid_mask(priv, vif);
+
+ /*
+ * Stop ANI only if there are no associated station interfaces.
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 709301f88dcd..5ba0da9d1959 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -885,8 +885,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /*
+- * Use the hardware MAC address as reference, the hardware uses it
+- * together with the BSSID mask when matching addresses.
++ * Pick the MAC address of the first interface as the new hardware
++ * MAC address. The hardware will use it together with the BSSID mask
++ * when matching addresses.
+ */
+ memset(iter_data, 0, sizeof(*iter_data));
+ memset(&iter_data->mask, 0xff, ETH_ALEN);
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index 703f839af6ca..bb3b72ebf667 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -741,6 +741,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ };
+ int index = rtlpci->rx_ring[rx_queue_idx].idx;
+
++ if (rtlpci->driver_is_goingto_unload)
++ return;
+ /*RX NORMAL PKT */
+ while (count--) {
+ /*rx descriptor */
+@@ -1637,6 +1639,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
+ */
+ set_hal_stop(rtlhal);
+
++ rtlpci->driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+ cancel_work_sync(&rtlpriv->works.lps_change_work);
+
+@@ -1654,7 +1657,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
+
+- rtlpci->driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->hw_disable(hw);
+ /* some things are not needed if firmware not available */
+ if (!rtlpriv->max_fw_size)
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index b55c21890760..3c4b2af51611 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
+ (unsigned long long)cp, (unsigned long long)s,
+ (unsigned long long)da);
+
+- /*
+- * If the number of address cells is larger than 2 we assume the
+- * mapping doesn't specify a physical address. Rather, the address
+- * specifies an identifier that must match exactly.
+- */
+- if (na > 2 && memcmp(range, addr, na * 4) != 0)
+- return OF_BAD_ADDR;
+-
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 1ea75236a15f..be5cba52a09c 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
+
+ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status)) {
+- acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
++ if (status != AE_NOT_FOUND)
++ acpi_handle_warn(handle,
++ "can't evaluate _ADR (%#x)\n", status);
+ return AE_OK;
+ }
+
+@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
+ slot->flags &= (~SLOT_ENABLED);
+ }
+
++static bool acpiphp_no_hotplug(acpi_handle handle)
++{
++ struct acpi_device *adev = NULL;
++
++ acpi_bus_get_device(handle, &adev);
++ return adev && adev->flags.no_hotplug;
++}
++
++static bool slot_no_hotplug(struct acpiphp_slot *slot)
++{
++ struct acpiphp_func *func;
++
++ list_for_each_entry(func, &slot->funcs, sibling)
++ if (acpiphp_no_hotplug(func_to_handle(func)))
++ return true;
++
++ return false;
++}
+
+ /**
+ * get_slot_status - get ACPI slot status
+@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
+ unsigned long long sta;
+
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+- alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
++ alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
++ || acpiphp_no_hotplug(handle);
+ }
+ if (!alive) {
+ u32 v;
+@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
+ struct pci_dev *dev, *tmp;
+
+ mutex_lock(&slot->crit_sect);
+- /* wake up all functions */
+- if (get_slot_status(slot) == ACPI_STA_ALL) {
++ if (slot_no_hotplug(slot)) {
++ ; /* do nothing */
++ } else if (get_slot_status(slot) == ACPI_STA_ALL) {
+ /* remove stale devices if any */
+ list_for_each_entry_safe(dev, tmp, &bus->devices,
+ bus_list)
+diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
+index 2832576d8b12..114f5ef4b73a 100644
+--- a/drivers/pinctrl/pinctrl-baytrail.c
++++ b/drivers/pinctrl/pinctrl-baytrail.c
+@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
+
+ static const struct acpi_device_id byt_gpio_acpi_match[] = {
+ { "INT33B2", 0 },
++ { "INT33FC", 0 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
+diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
+index 11bd0d970a52..e2142956a8e5 100644
+--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
++++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
+@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
+ #define PINMUX_GPIO(_pin) \
+ [GPIO_##_pin] = { \
+ .pin = (u16)-1, \
+- .name = __stringify(name), \
++ .name = __stringify(GPIO_##_pin), \
+ .enum_id = _pin##_DATA, \
+ }
+
+diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
+index 00e667296360..557af943b2f5 100644
+--- a/drivers/power/power_supply_core.c
++++ b/drivers/power/power_supply_core.c
+@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
+ dev_set_drvdata(dev, psy);
+ psy->dev = dev;
+
++ rc = dev_set_name(dev, "%s", psy->name);
++ if (rc)
++ goto dev_set_name_failed;
++
+ INIT_WORK(&psy->changed_work, power_supply_changed_work);
+
+ rc = power_supply_check_supplies(psy);
+@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
+ if (rc)
+ goto wakeup_init_failed;
+
+- rc = kobject_set_name(&dev->kobj, "%s", psy->name);
+- if (rc)
+- goto kobject_set_name_failed;
+-
+ rc = device_add(dev);
+ if (rc)
+ goto device_add_failed;
+@@ -553,11 +553,11 @@ create_triggers_failed:
+ register_cooler_failed:
+ psy_unregister_thermal(psy);
+ register_thermal_failed:
+-wakeup_init_failed:
+ device_del(dev);
+-kobject_set_name_failed:
+ device_add_failed:
++wakeup_init_failed:
+ check_supplies_failed:
++dev_set_name_failed:
+ put_device(dev);
+ success:
+ return rc;
+diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
+index 3f4ca4e09a4c..34629ea913d4 100644
+--- a/drivers/s390/char/tty3270.c
++++ b/drivers/s390/char/tty3270.c
+@@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
+ return rc;
+ }
+
+- tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
++ tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
+ if (IS_ERR(tp->screen)) {
+ rc = PTR_ERR(tp->screen);
+ raw3270_put_view(&tp->view);
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 596480022b0a..3bb0a1d1622a 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+ schedule_delayed_work(&tgt->sess_del_work, 0);
+ else
+ schedule_delayed_work(&tgt->sess_del_work,
+- jiffies - sess->expires);
++ sess->expires - jiffies);
+ }
+
+ /* ha->hardware_lock supposed to be held on entry */
+@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+- unsigned long flags;
++ unsigned long flags, elapsed;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (!list_empty(&tgt->del_sess_list)) {
+ sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+ del_list_entry);
+- if (time_after_eq(jiffies, sess->expires)) {
++ elapsed = jiffies;
++ if (time_after_eq(elapsed, sess->expires)) {
+ qlt_undelete_sess(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
+ ha->tgt.tgt_ops->put_sess(sess);
+ } else {
+ schedule_delayed_work(&tgt->sess_del_work,
+- jiffies - sess->expires);
++ sess->expires - elapsed);
+ break;
+ }
+ }
+diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
+index 317a821b7906..316c44401372 100644
+--- a/drivers/staging/comedi/drivers.c
++++ b/drivers/staging/comedi/drivers.c
+@@ -417,7 +417,7 @@ int comedi_load_firmware(struct comedi_device *dev,
+ release_firmware(fw);
+ }
+
+- return ret;
++ return ret < 0 ? ret : 0;
+ }
+ EXPORT_SYMBOL_GPL(comedi_load_firmware);
+
+diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
+index 432e3f9c3301..c55f234b29e6 100644
+--- a/drivers/staging/comedi/drivers/8255_pci.c
++++ b/drivers/staging/comedi/drivers/8255_pci.c
+@@ -63,7 +63,8 @@ enum pci_8255_boardid {
+ BOARD_ADLINK_PCI7296,
+ BOARD_CB_PCIDIO24,
+ BOARD_CB_PCIDIO24H,
+- BOARD_CB_PCIDIO48H,
++ BOARD_CB_PCIDIO48H_OLD,
++ BOARD_CB_PCIDIO48H_NEW,
+ BOARD_CB_PCIDIO96H,
+ BOARD_NI_PCIDIO96,
+ BOARD_NI_PCIDIO96B,
+@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
+ .dio_badr = 2,
+ .n_8255 = 1,
+ },
+- [BOARD_CB_PCIDIO48H] = {
++ [BOARD_CB_PCIDIO48H_OLD] = {
+ .name = "cb_pci-dio48h",
+ .dio_badr = 1,
+ .n_8255 = 2,
+ },
++ [BOARD_CB_PCIDIO48H_NEW] = {
++ .name = "cb_pci-dio48h",
++ .dio_badr = 2,
++ .n_8255 = 2,
++ },
+ [BOARD_CB_PCIDIO96H] = {
+ .name = "cb_pci-dio96h",
+ .dio_badr = 2,
+@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
+ { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
+ { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
+ { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
+- { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H },
++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
++ .driver_data = BOARD_CB_PCIDIO48H_OLD },
++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
++ .driver_data = BOARD_CB_PCIDIO48H_NEW },
+ { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
+ { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
+ { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 38e44b9abf0f..d5c724b317aa 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
+ */
+ send_sig(SIGINT, np->np_thread, 1);
+ kthread_stop(np->np_thread);
++ np->np_thread = NULL;
+ }
+
+ np->np_transport->iscsit_free_np(np);
+@@ -830,24 +831,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
+ /*
+- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
+- * that adds support for RESERVE/RELEASE. There is a bug
+- * add with this new functionality that sets R/W bits when
+- * neither CDB carries any READ or WRITE datapayloads.
++ * From RFC-3720 Section 10.3.1:
++ *
++ * "Either or both of R and W MAY be 1 when either the
++ * Expected Data Transfer Length and/or Bidirectional Read
++ * Expected Data Transfer Length are 0"
++ *
++ * For this case, go ahead and clear the unnecssary bits
++ * to avoid any confusion with ->data_direction.
+ */
+- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
+- hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+- goto done;
+- }
++ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
++ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+
+- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
++ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ " set when Expected Data Transfer Length is 0 for"
+- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
+- return iscsit_add_reject_cmd(cmd,
+- ISCSI_REASON_BOOKMARK_INVALID, buf);
++ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
+ }
+-done:
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 1794c753954a..f442a9c93403 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1404,11 +1404,6 @@ old_sess_out:
+
+ out:
+ stop = kthread_should_stop();
+- if (!stop && signal_pending(current)) {
+- spin_lock_bh(&np->np_thread_lock);
+- stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
+- spin_unlock_bh(&np->np_thread_lock);
+- }
+ /* Wait for another socket.. */
+ if (!stop)
+ return 1;
+@@ -1416,7 +1411,6 @@ exit:
+ iscsi_stop_login_thread_timer(np);
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_thread_state = ISCSI_NP_THREAD_EXIT;
+- np->np_thread = NULL;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ return 0;
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index d90dbb0f1a69..e5e39658034c 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1107,6 +1107,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
+ dev->dev_attrib.block_size = block_size;
+ pr_debug("dev[%p]: SE Device block_size changed to %u\n",
+ dev, block_size);
++
++ if (dev->dev_attrib.max_bytes_per_io)
++ dev->dev_attrib.hw_max_sectors =
++ dev->dev_attrib.max_bytes_per_io / block_size;
++
+ return 0;
+ }
+
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index b662f89dedac..55725f5f56a2 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
+ pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+ " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
+ TARGET_CORE_MOD_VERSION);
+- pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+- " MaxSectors: %u\n",
+- hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
++ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
++ hba->hba_id, fd_host->fd_host_id);
+
+ return 0;
+ }
+@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
+ }
+
+ dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
+- dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
++ dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
++ dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
+ dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+
+ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
+diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
+index 37ffc5bd2399..d7772c167685 100644
+--- a/drivers/target/target_core_file.h
++++ b/drivers/target/target_core_file.h
+@@ -7,7 +7,10 @@
+ #define FD_DEVICE_QUEUE_DEPTH 32
+ #define FD_MAX_DEVICE_QUEUE_DEPTH 128
+ #define FD_BLOCKSIZE 512
+-#define FD_MAX_SECTORS 2048
++/*
++ * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
++ */
++#define FD_MAX_BYTES 8388608
+
+ #define RRF_EMULATE_CDB 0x01
+ #define RRF_GOT_LBA 0x02
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 4d6f430087d0..d4a89db511b5 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -93,6 +93,7 @@ struct n_tty_data {
+ size_t canon_head;
+ size_t echo_head;
+ size_t echo_commit;
++ size_t echo_mark;
+ DECLARE_BITMAP(char_map, 256);
+
+ /* private to n_tty_receive_overrun (single-threaded) */
+@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
+ {
+ ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
+ ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
++ ldata->echo_mark = 0;
+ ldata->line_start = 0;
+
+ ldata->erasing = 0;
+@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
+ size_t head;
+
+ head = ldata->echo_head;
++ ldata->echo_mark = head;
+ old = ldata->echo_commit - ldata->echo_tail;
+
+ /* Process committed echoes if the accumulated # of bytes
+@@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty)
+ size_t echoed;
+
+ if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
+- ldata->echo_commit == ldata->echo_tail)
++ ldata->echo_mark == ldata->echo_tail)
+ return;
+
+ mutex_lock(&ldata->output_lock);
++ ldata->echo_commit = ldata->echo_mark;
+ echoed = __process_echoes(tty);
+ mutex_unlock(&ldata->output_lock);
+
+@@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty)
+ tty->ops->flush_chars(tty);
+ }
+
++/* NB: echo_mark and echo_head should be equivalent here */
+ static void flush_echoes(struct tty_struct *tty)
+ {
+ struct n_tty_data *ldata = tty->disc_data;
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index daf710f5c3fc..8b2accbad3d1 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -417,6 +417,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
+ static const struct acpi_device_id dw8250_acpi_match[] = {
+ { "INT33C4", 0 },
+ { "INT33C5", 0 },
++ { "INT3434", 0 },
++ { "INT3435", 0 },
+ { "80860F0A", 0 },
+ { },
+ };
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index f87f1a0c8c6e..5ba30e078236 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -2050,6 +2050,9 @@ static int __init pmz_console_init(void)
+ /* Probe ports */
+ pmz_probe();
+
++ if (pmz_ports_count == 0)
++ return -ENODEV;
++
+ /* TODO: Autoprobe console based on OF */
+ /* pmz_console.index = i; */
+ register_console(&pmz_console);
+diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
+index 22fad8ad5ac2..d8a55e87877f 100644
+--- a/drivers/tty/tty_ldsem.c
++++ b/drivers/tty/tty_ldsem.c
+@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
+ return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
+ }
+
++/*
++ * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
++ * Returns 1 if count was successfully changed; @*old will have @new value.
++ * Returns 0 if count was not changed; @*old will have most recent sem->count
++ */
+ static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
+ {
+- long tmp = *old;
+- *old = atomic_long_cmpxchg(&sem->count, *old, new);
+- return *old == tmp;
++ long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
++ if (tmp == *old) {
++ *old = new;
++ return 1;
++ } else {
++ *old = tmp;
++ return 0;
++ }
+ }
+
+ /*
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 23763dcec069..d6a50b7bb4ca 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -575,6 +575,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ : CI_ROLE_GADGET;
+ }
+
++ /* only update vbus status for peripheral */
++ if (ci->role == CI_ROLE_GADGET)
++ ci_handle_vbus_change(ci);
++
+ ret = ci_role_start(ci, ci->role);
+ if (ret) {
+ dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 64d7a6d9a1ad..5d874d5cf500 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
+ return ret;
+
+ disable_reg:
+- regulator_disable(ci->platdata->reg_vbus);
++ if (ci->platdata->reg_vbus)
++ regulator_disable(ci->platdata->reg_vbus);
+
+ put_hcd:
+ usb_put_hcd(hcd);
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 9333083dd111..d98fa254eaaf 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1818,9 +1818,6 @@ static int udc_start(struct ci_hdrc *ci)
+ pm_runtime_no_callbacks(&ci->gadget.dev);
+ pm_runtime_enable(&ci->gadget.dev);
+
+- /* Update ci->vbus_active */
+- ci_handle_vbus_change(ci);
+-
+ return retval;
+
+ remove_trans:
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index d3318a0df8ee..6463ca3bcfba 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -820,13 +820,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
+ {
+ /* need autopm_get/put here to ensure the usbcore sees the new value */
+ int rv = usb_autopm_get_interface(intf);
+- if (rv < 0)
+- goto err;
+
+ intf->needs_remote_wakeup = on;
+- usb_autopm_put_interface(intf);
+-err:
+- return rv;
++ if (!rv)
++ usb_autopm_put_interface(intf);
++ return 0;
+ }
+
+ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index b8dffd59eb25..73f5208714a4 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ * any other sleep) on Haswell machines with LPT and LPT-LP
+ * with the new Intel BIOS
+ */
+- xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
++ /* Limit the quirk to only known vendors, as this triggers
++ * yet another BIOS bug on some other machines
++ * https://bugzilla.kernel.org/show_bug.cgi?id=66171
++ */
++ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
++ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 0d0d11880968..f7dca0b92bfb 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1809,9 +1809,6 @@ static void musb_free(struct musb *musb)
+ disable_irq_wake(musb->nIrq);
+ free_irq(musb->nIrq, musb);
+ }
+- cancel_work_sync(&musb->irq_work);
+- if (musb->dma_controller)
+- dma_controller_destroy(musb->dma_controller);
+
+ musb_host_free(musb);
+ }
+@@ -1893,6 +1890,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+
++ /* Init IRQ workqueue before request_irq */
++ INIT_WORK(&musb->irq_work, musb_irq_work);
++
+ /* setup musb parts of the core (especially endpoints) */
+ status = musb_core_init(plat->config->multipoint
+ ? MUSB_CONTROLLER_MHDRC
+@@ -1902,9 +1902,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+
+ setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
+
+- /* Init IRQ workqueue before request_irq */
+- INIT_WORK(&musb->irq_work, musb_irq_work);
+-
+ /* attach to the IRQ */
+ if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
+ dev_err(dev, "request_irq %d failed!\n", nIrq);
+@@ -1978,6 +1975,7 @@ fail4:
+ musb_host_cleanup(musb);
+
+ fail3:
++ cancel_work_sync(&musb->irq_work);
+ if (musb->dma_controller)
+ dma_controller_destroy(musb->dma_controller);
+ pm_runtime_put_sync(musb->controller);
+@@ -2036,6 +2034,10 @@ static int musb_remove(struct platform_device *pdev)
+ musb_exit_debugfs(musb);
+ musb_shutdown(pdev);
+
++ if (musb->dma_controller)
++ dma_controller_destroy(musb->dma_controller);
++
++ cancel_work_sync(&musb->irq_work);
+ musb_free(musb);
+ device_init_wakeup(dev, 0);
+ return 0;
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index 1f31e6b4c251..dc97744489b0 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -176,14 +176,7 @@ retry:
+ return result;
+ }
+
+- /* Try sending off another urb, unless in irq context (in which case
+- * there will be no free urb). */
+- if (!in_irq())
+- goto retry;
+-
+- clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+-
+- return 0;
++ goto retry; /* try sending off another urb */
+ }
+
+ /**
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 496b7e39d5be..cc7a24154490 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
+ #define ZTE_PRODUCT_MF628 0x0015
+ #define ZTE_PRODUCT_MF626 0x0031
+ #define ZTE_PRODUCT_MC2718 0xffe8
++#define ZTE_PRODUCT_AC2726 0xfff1
+
+ #define BENQ_VENDOR_ID 0x04a5
+ #define BENQ_PRODUCT_H10 0x4068
+@@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
+
+ { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
+ { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
+diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
+index fca4c752a4ed..eae2c873b39f 100644
+--- a/drivers/usb/serial/zte_ev.c
++++ b/drivers/usb/serial/zte_ev.c
+@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x19d2, 0xfffd) },
+ { USB_DEVICE(0x19d2, 0xfffc) },
+ { USB_DEVICE(0x19d2, 0xfffb) },
+- /* AC2726, AC8710_V3 */
+- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
++ /* AC8710_V3 */
+ { USB_DEVICE(0x19d2, 0xfff6) },
+ { USB_DEVICE(0x19d2, 0xfff7) },
+ { USB_DEVICE(0x19d2, 0xfff8) },
+diff --git a/fs/aio.c b/fs/aio.c
+index 6efb7f6cb22e..062a5f6a1448 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
+ int i;
+
+ for (i = 0; i < ctx->nr_pages; i++) {
++ struct page *page;
+ pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
+ page_count(ctx->ring_pages[i]));
+- put_page(ctx->ring_pages[i]);
++ page = ctx->ring_pages[i];
++ if (!page)
++ continue;
++ ctx->ring_pages[i] = NULL;
++ put_page(page);
+ }
+
+ put_aio_ring_file(ctx);
+@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
+ unsigned long flags;
+ int rc;
+
++ rc = 0;
++
++ /* Make sure the old page hasn't already been changed */
++ spin_lock(&mapping->private_lock);
++ ctx = mapping->private_data;
++ if (ctx) {
++ pgoff_t idx;
++ spin_lock_irqsave(&ctx->completion_lock, flags);
++ idx = old->index;
++ if (idx < (pgoff_t)ctx->nr_pages) {
++ if (ctx->ring_pages[idx] != old)
++ rc = -EAGAIN;
++ } else
++ rc = -EINVAL;
++ spin_unlock_irqrestore(&ctx->completion_lock, flags);
++ } else
++ rc = -EINVAL;
++ spin_unlock(&mapping->private_lock);
++
++ if (rc != 0)
++ return rc;
++
+ /* Writeback must be complete */
+ BUG_ON(PageWriteback(old));
+- put_page(old);
++ get_page(new);
+
+- rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
++ rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
+ if (rc != MIGRATEPAGE_SUCCESS) {
+- get_page(old);
++ put_page(new);
+ return rc;
+ }
+
+- get_page(new);
+-
+ /* We can potentially race against kioctx teardown here. Use the
+ * address_space's private data lock to protect the mapping's
+ * private_data.
+@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ migrate_page_copy(new, old);
+ idx = old->index;
+- if (idx < (pgoff_t)ctx->nr_pages)
+- ctx->ring_pages[idx] = new;
++ if (idx < (pgoff_t)ctx->nr_pages) {
++ /* And only do the move if things haven't changed */
++ if (ctx->ring_pages[idx] == old)
++ ctx->ring_pages[idx] = new;
++ else
++ rc = -EAGAIN;
++ } else
++ rc = -EINVAL;
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ } else
+ rc = -EBUSY;
+ spin_unlock(&mapping->private_lock);
+
++ if (rc == MIGRATEPAGE_SUCCESS)
++ put_page(old);
++ else
++ put_page(new);
++
+ return rc;
+ }
+ #endif
+@@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+ struct aio_ring *ring;
+ unsigned nr_events = ctx->max_reqs;
+ struct mm_struct *mm = current->mm;
+- unsigned long size, populate;
++ unsigned long size, unused;
+ int nr_pages;
+ int i;
+ struct file *file;
+@@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
+ return -EAGAIN;
+ }
+
++ ctx->aio_ring_file = file;
++ nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
++ / sizeof(struct io_event);
++
++ ctx->ring_pages = ctx->internal_pages;
++ if (nr_pages > AIO_RING_PAGES) {
++ ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
++ GFP_KERNEL);
++ if (!ctx->ring_pages) {
++ put_aio_ring_file(ctx);
++ return -ENOMEM;
++ }
++ }
++
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page;
+ page = find_or_create_page(file->f_inode->i_mapping,
+@@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
+ SetPageUptodate(page);
+ SetPageDirty(page);
+ unlock_page(page);
++
++ ctx->ring_pages[i] = page;
+ }
+- ctx->aio_ring_file = file;
+- nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
+- / sizeof(struct io_event);
++ ctx->nr_pages = i;
+
+- ctx->ring_pages = ctx->internal_pages;
+- if (nr_pages > AIO_RING_PAGES) {
+- ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
+- GFP_KERNEL);
+- if (!ctx->ring_pages) {
+- put_aio_ring_file(ctx);
+- return -ENOMEM;
+- }
++ if (unlikely(i != nr_pages)) {
++ aio_free_ring(ctx);
++ return -EAGAIN;
+ }
+
+ ctx->mmap_size = nr_pages * PAGE_SIZE;
+@@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
+ down_write(&mm->mmap_sem);
+ ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
+ PROT_READ | PROT_WRITE,
+- MAP_SHARED | MAP_POPULATE, 0, &populate);
++ MAP_SHARED, 0, &unused);
++ up_write(&mm->mmap_sem);
+ if (IS_ERR((void *)ctx->mmap_base)) {
+- up_write(&mm->mmap_sem);
+ ctx->mmap_size = 0;
+ aio_free_ring(ctx);
+ return -EAGAIN;
+@@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
+
+ pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
+
+- /* We must do this while still holding mmap_sem for write, as we
+- * need to be protected against userspace attempting to mremap()
+- * or munmap() the ring buffer.
+- */
+- ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
+- 1, 0, ctx->ring_pages, NULL);
+-
+- /* Dropping the reference here is safe as the page cache will hold
+- * onto the pages for us. It is also required so that page migration
+- * can unmap the pages and get the right reference count.
+- */
+- for (i = 0; i < ctx->nr_pages; i++)
+- put_page(ctx->ring_pages[i]);
+-
+- up_write(&mm->mmap_sem);
+-
+- if (unlikely(ctx->nr_pages != nr_pages)) {
+- aio_free_ring(ctx);
+- return -EAGAIN;
+- }
+-
+ ctx->user_id = ctx->mmap_base;
+ ctx->nr_events = nr_events; /* trusted copy */
+
+@@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+ aio_nr += ctx->max_reqs;
+ spin_unlock(&aio_nr_lock);
+
+- percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
++ percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
++ percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
+
+ err = ioctx_add_table(ctx, mm);
+ if (err)
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 6df8bd481425..ec3ba43b9faa 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -210,13 +210,17 @@ static int readpage_nounlock(struct file *filp, struct page *page)
+ if (err < 0) {
+ SetPageError(page);
+ goto out;
+- } else if (err < PAGE_CACHE_SIZE) {
++ } else {
++ if (err < PAGE_CACHE_SIZE) {
+ /* zero fill remainder of page */
+- zero_user_segment(page, err, PAGE_CACHE_SIZE);
++ zero_user_segment(page, err, PAGE_CACHE_SIZE);
++ } else {
++ flush_dcache_page(page);
++ }
+ }
+ SetPageUptodate(page);
+
+- if (err == 0)
++ if (err >= 0)
+ ceph_readpage_to_fscache(inode, page);
+
+ out:
+diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
+index 6bfe65e0b038..360b622b0be0 100644
+--- a/fs/ceph/cache.c
++++ b/fs/ceph/cache.c
+@@ -324,6 +324,9 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
++ if (!PageFsCache(page))
++ return;
++
+ fscache_wait_on_page_write(ci->fscache, page);
+ fscache_uncache_page(ci->fscache, page);
+ }
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index b7bda5d9611d..788901552eb1 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -642,6 +642,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
+ req->r_unsafe_dir = NULL;
+ }
+
++ complete_all(&req->r_safe_completion);
++
+ ceph_mdsc_put_request(req);
+ }
+
+@@ -1875,8 +1877,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
+ int mds = -1;
+ int err = -EAGAIN;
+
+- if (req->r_err || req->r_got_result)
++ if (req->r_err || req->r_got_result) {
++ if (req->r_aborted)
++ __unregister_request(mdsc, req);
+ goto out;
++ }
+
+ if (req->r_timeout &&
+ time_after_eq(jiffies, req->r_started + req->r_timeout)) {
+@@ -2186,7 +2191,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+ if (head->safe) {
+ req->r_got_safe = true;
+ __unregister_request(mdsc, req);
+- complete_all(&req->r_safe_completion);
+
+ if (req->r_got_unsafe) {
+ /*
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index b5ec2a268f56..45ccfbd8ea5f 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -475,9 +475,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
+ const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
+ extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
+ extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
+-extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+- const unsigned char *path,
+- struct cifs_sb_info *cifs_sb, unsigned int xid);
++extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
++ struct cifs_sb_info *cifs_sb,
++ struct cifs_fattr *fattr,
++ const unsigned char *path);
+ extern int mdfour(unsigned char *, unsigned char *, int);
+ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
+ const struct nls_table *codepage);
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 5384c2a640ca..f039c23d003d 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -193,7 +193,7 @@ check_name(struct dentry *direntry)
+ static int
+ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
+ struct tcon_link *tlink, unsigned oflags, umode_t mode,
+- __u32 *oplock, struct cifs_fid *fid, int *created)
++ __u32 *oplock, struct cifs_fid *fid)
+ {
+ int rc = -ENOENT;
+ int create_options = CREATE_NOT_DIR;
+@@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
+ .device = 0,
+ };
+
+- *created |= FILE_CREATED;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ args.uid = current_fsuid();
+ if (inode->i_mode & S_ISGID)
+@@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+ cifs_add_pending_open(&fid, tlink, &open);
+
+ rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
+- &oplock, &fid, opened);
++ &oplock, &fid);
+
+ if (rc) {
+ cifs_del_pending_open(&open);
+ goto out;
+ }
+
++ if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
++ *opened |= FILE_CREATED;
++
+ rc = finish_open(file, direntry, generic_file_open, opened);
+ if (rc) {
+ if (server->ops->close)
+@@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
+ struct TCP_Server_Info *server;
+ struct cifs_fid fid;
+ __u32 oplock;
+- int created = FILE_CREATED;
+
+ cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
+ inode, direntry->d_name.name, direntry);
+@@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
+ server->ops->new_lease_key(&fid);
+
+ rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
+- &oplock, &fid, &created);
++ &oplock, &fid);
+ if (!rc && server->ops->close)
+ server->ops->close(xid, tcon, &fid);
+
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 36f9ebb93ceb..49719b8228e5 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
+
+ /* check for Minshall+French symlinks */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+- int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
++ int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
++ full_path);
+ if (tmprc)
+ cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
+ }
+@@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
+
+ /* check for Minshall+French symlinks */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+- tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
++ tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
++ full_path);
+ if (tmprc)
+ cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
+ }
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index 7e36ceba0c7a..477e53bad551 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+
+
+ int
+-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+- const unsigned char *path,
+- struct cifs_sb_info *cifs_sb, unsigned int xid)
++CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
++ struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
++ const unsigned char *path)
+ {
+- int rc = 0;
++ int rc;
+ u8 *buf = NULL;
+ unsigned int link_len = 0;
+ unsigned int bytes_read = 0;
+- struct cifs_tcon *ptcon;
+
+ if (!CIFSCouldBeMFSymlink(fattr))
+ /* it's not a symlink */
+ return 0;
+
+ buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+- if (!buf) {
+- rc = -ENOMEM;
+- goto out;
+- }
++ if (!buf)
++ return -ENOMEM;
+
+- ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+- if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
+- rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
+- &bytes_read, cifs_sb, xid);
++ if (tcon->ses->server->ops->query_mf_symlink)
++ rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
++ &bytes_read, cifs_sb, xid);
+ else
+- goto out;
++ rc = -ENOSYS;
+
+- if (rc != 0)
++ if (rc)
+ goto out;
+
+ if (bytes_read == 0) /* not a symlink */
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index 288534920fe5..20d6697bd638 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
+ sb->s_blocksize - offset : towrite;
+
+ tmp_bh.b_state = 0;
++ tmp_bh.b_size = sb->s_blocksize;
+ err = ext2_get_block(inode, blk, &tmp_bh, 1);
+ if (err < 0)
+ goto out;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index af815ea9d7cc..745faaa7ef95 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -267,6 +267,16 @@ struct ext4_io_submit {
+ /* Translate # of blks to # of clusters */
+ #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
+ (sbi)->s_cluster_bits)
++/* Mask out the low bits to get the starting block of the cluster */
++#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
++ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
++#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
++ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
++/* Get the cluster offset */
++#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
++ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
++#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
++ ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+
+ /*
+ * Structure of a blocks group descriptor
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index 17ac112ab101..3fe29de832c8 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+ if (WARN_ON_ONCE(err)) {
+ ext4_journal_abort_handle(where, line, __func__, bh,
+ handle, err);
++ ext4_error_inode(inode, where, line,
++ bh->b_blocknr,
++ "journal_dirty_metadata failed: "
++ "handle type %u started at line %u, "
++ "credits %u/%u, errcode %d",
++ handle->h_type,
++ handle->h_line_no,
++ handle->h_requested_credits,
++ handle->h_buffer_credits, err);
+ }
+ } else {
+ if (inode)
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 54d52afcdb19..f76027fe58ae 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
+ {
+ ext4_fsblk_t block = ext4_ext_pblock(ext);
+ int len = ext4_ext_get_actual_len(ext);
++ ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
++ ext4_lblk_t last = lblock + len - 1;
+
+- if (len == 0)
++ if (lblock > last)
+ return 0;
+ return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
+ if (depth == 0) {
+ /* leaf entries */
+ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
++ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
++ ext4_fsblk_t pblock = 0;
++ ext4_lblk_t lblock = 0;
++ ext4_lblk_t prev = 0;
++ int len = 0;
+ while (entries) {
+ if (!ext4_valid_extent(inode, ext))
+ return 0;
++
++ /* Check for overlapping extents */
++ lblock = le32_to_cpu(ext->ee_block);
++ len = ext4_ext_get_actual_len(ext);
++ if ((lblock <= prev) && prev) {
++ pblock = ext4_ext_pblock(ext);
++ es->s_last_error_block = cpu_to_le64(pblock);
++ return 0;
++ }
+ ext++;
+ entries--;
++ prev = lblock + len - 1;
+ }
+ } else {
+ struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
+@@ -1844,8 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ depth = ext_depth(inode);
+ if (!path[depth].p_ext)
+ goto out;
+- b2 = le32_to_cpu(path[depth].p_ext->ee_block);
+- b2 &= ~(sbi->s_cluster_ratio - 1);
++ b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
+
+ /*
+ * get the next allocated block if the extent in the path
+@@ -1855,7 +1871,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ b2 = ext4_ext_next_allocated_block(path);
+ if (b2 == EXT_MAX_BLOCKS)
+ goto out;
+- b2 &= ~(sbi->s_cluster_ratio - 1);
++ b2 = EXT4_LBLK_CMASK(sbi, b2);
+ }
+
+ /* check for wrap through zero on extent logical start block*/
+@@ -2535,7 +2551,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
+ * extent, we have to mark the cluster as used (store negative
+ * cluster number in partial_cluster).
+ */
+- unaligned = pblk & (sbi->s_cluster_ratio - 1);
++ unaligned = EXT4_PBLK_COFF(sbi, pblk);
+ if (unaligned && (ee_len == num) &&
+ (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
+ *partial_cluster = EXT4_B2C(sbi, pblk);
+@@ -2629,7 +2645,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ * accidentally freeing it later on
+ */
+ pblk = ext4_ext_pblock(ex);
+- if (pblk & (sbi->s_cluster_ratio - 1))
++ if (EXT4_PBLK_COFF(sbi, pblk))
+ *partial_cluster =
+ -((long long)EXT4_B2C(sbi, pblk));
+ ex--;
+@@ -3784,7 +3800,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t lblk_start, lblk_end;
+- lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
++ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
+ lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
+
+ return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
+@@ -3843,9 +3859,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
+ trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
+
+ /* Check towards left side */
+- c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
+ if (c_offset) {
+- lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
++ lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
+ lblk_to = lblk_from + c_offset - 1;
+
+ if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
+@@ -3853,7 +3869,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
+ }
+
+ /* Now check towards right. */
+- c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
+ if (allocated_clusters && c_offset) {
+ lblk_from = lblk_start + num_blks;
+ lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
+@@ -4061,7 +4077,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ struct ext4_ext_path *path)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
++ ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+ ext4_lblk_t ex_cluster_start, ex_cluster_end;
+ ext4_lblk_t rr_cluster_start;
+ ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
+@@ -4079,8 +4095,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ (rr_cluster_start == ex_cluster_start)) {
+ if (rr_cluster_start == ex_cluster_end)
+ ee_start += ee_len - 1;
+- map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
+- c_offset;
++ map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
+ map->m_len = min(map->m_len,
+ (unsigned) sbi->s_cluster_ratio - c_offset);
+ /*
+@@ -4234,7 +4249,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ */
+ map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
+ newex.ee_block = cpu_to_le32(map->m_lblk);
+- cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
++ cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+
+ /*
+ * If we are doing bigalloc, check to see if the extent returned
+@@ -4302,7 +4317,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ * needed so that future calls to get_implied_cluster_alloc()
+ * work correctly.
+ */
+- offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
++ offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+ ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
+ ar.goal -= offset;
+ ar.logical -= offset;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index e274e9c1171f..1ddee3dfabe3 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file,
+ */
+ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
+ {
+- int retries = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int md_needed;
+@@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
+ * in order to allocate nrblocks
+ * worse case is one extent per block
+ */
+-repeat:
+ spin_lock(&ei->i_block_reservation_lock);
+ /*
+ * ext4_calc_metadata_amount() has side effects, which we have
+@@ -1238,10 +1236,6 @@ repeat:
+ ei->i_da_metadata_calc_len = save_len;
+ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+ spin_unlock(&ei->i_block_reservation_lock);
+- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+- cond_resched();
+- goto repeat;
+- }
+ return -ENOSPC;
+ }
+ ei->i_reserved_meta_blocks += md_needed;
+@@ -1255,7 +1249,6 @@ repeat:
+ */
+ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ {
+- int retries = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int md_needed;
+@@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ * in order to allocate nrblocks
+ * worse case is one extent per block
+ */
+-repeat:
+ spin_lock(&ei->i_block_reservation_lock);
+ /*
+ * ext4_calc_metadata_amount() has side effects, which we have
+@@ -1297,10 +1289,6 @@ repeat:
+ ei->i_da_metadata_calc_len = save_len;
+ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+ spin_unlock(&ei->i_block_reservation_lock);
+- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+- cond_resched();
+- goto repeat;
+- }
+ dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+ return -ENOSPC;
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index a41e3ba8cfaa..04a5c7504be9 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
+ {
+ struct ext4_prealloc_space *pa;
+ pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
++
++ BUG_ON(atomic_read(&pa->pa_count));
++ BUG_ON(pa->pa_deleted == 0);
+ kmem_cache_free(ext4_pspace_cachep, pa);
+ }
+
+@@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
+ ext4_group_t grp;
+ ext4_fsblk_t grp_blk;
+
+- if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
+- return;
+-
+ /* in this short window concurrent discard can set pa_deleted */
+ spin_lock(&pa->pa_lock);
++ if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
++ spin_unlock(&pa->pa_lock);
++ return;
++ }
++
+ if (pa->pa_deleted == 1) {
+ spin_unlock(&pa->pa_lock);
+ return;
+@@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
+ ext4_get_group_no_and_offset(sb, goal, &group, &block);
+
+ /* set up allocation goals */
+- ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
++ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
+ ac->ac_status = AC_STATUS_CONTINUE;
+ ac->ac_sb = sb;
+ ac->ac_inode = ar->inode;
+@@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ * blocks at the beginning or the end unless we are explicitly
+ * requested to avoid doing so.
+ */
+- overflow = block & (sbi->s_cluster_ratio - 1);
++ overflow = EXT4_PBLK_COFF(sbi, block);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
+ overflow = sbi->s_cluster_ratio - overflow;
+@@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ count += overflow;
+ }
+ }
+- overflow = count & (sbi->s_cluster_ratio - 1);
++ overflow = EXT4_LBLK_COFF(sbi, count);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
+ if (count > overflow)
+@@ -4794,8 +4799,8 @@ do_more:
+ " group:%d block:%d count:%lu failed"
+ " with %d", block_group, bit, count,
+ err);
+- }
+-
++ } else
++ EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
+
+ ext4_lock_group(sb, block_group);
+ mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 2c2e6cbc6bed..b947e0af9956 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -773,7 +773,7 @@ static void ext4_put_super(struct super_block *sb)
+ }
+
+ ext4_es_unregister_shrinker(sbi);
+- del_timer(&sbi->s_err_report);
++ del_timer_sync(&sbi->s_err_report);
+ ext4_release_system_zone(sb);
+ ext4_mb_release(sb);
+ ext4_ext_release(sb);
+@@ -3288,11 +3288,19 @@ int ext4_calculate_overhead(struct super_block *sb)
+ }
+
+
+-static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
++static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
+ {
+ ext4_fsblk_t resv_clusters;
+
+ /*
++ * There's no need to reserve anything when we aren't using extents.
++ * The space estimates are exact, there are no unwritten extents,
++ * hole punching doesn't need new metadata... This is needed especially
++ * to keep ext2/3 backward compatibility.
++ */
++ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
++ return 0;
++ /*
+ * By default we reserve 2% or 4096 clusters, whichever is smaller.
+ * This should cover the situations where we can not afford to run
+ * out of space like for example punch hole, or converting
+@@ -3300,7 +3308,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
+ * allocation would require 1, or 2 blocks, higher numbers are
+ * very rare.
+ */
+- resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
++ resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
++ EXT4_SB(sb)->s_cluster_bits;
+
+ do_div(resv_clusters, 50);
+ resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
+@@ -4043,10 +4052,10 @@ no_journal:
+ "available");
+ }
+
+- err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
++ err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
+- "reserved pool", ext4_calculate_resv_clusters(sbi));
++ "reserved pool", ext4_calculate_resv_clusters(sb));
+ goto failed_mount4a;
+ }
+
+@@ -4151,7 +4160,7 @@ failed_mount_wq:
+ }
+ failed_mount3:
+ ext4_es_unregister_shrinker(sbi);
+- del_timer(&sbi->s_err_report);
++ del_timer_sync(&sbi->s_err_report);
+ if (sbi->s_flex_groups)
+ ext4_kvfree(sbi->s_flex_groups);
+ percpu_counter_destroy(&sbi->s_freeclusters_counter);
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index 1f7d8057ea68..1253c2006029 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -984,6 +984,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
+ {
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
++ struct address_space *mapping = inode->i_mapping;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_holder gh;
+ int rv;
+@@ -1004,6 +1005,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
+ if (rv != 1)
+ goto out; /* dio not valid, fall back to buffered i/o */
+
++ /*
++ * Now since we are holding a deferred (CW) lock at this point, you
++ * might be wondering why this is ever needed. There is a case however
++ * where we've granted a deferred local lock against a cached exclusive
++ * glock. That is ok provided all granted local locks are deferred, but
++ * it also means that it is possible to encounter pages which are
++ * cached and possibly also mapped. So here we check for that and sort
++ * them out ahead of the dio. The glock state machine will take care of
++ * everything else.
++ *
++ * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
++ * the first place, mapping->nr_pages will always be zero.
++ */
++ if (mapping->nrpages) {
++ loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
++ loff_t len = iov_length(iov, nr_segs);
++ loff_t end = PAGE_ALIGN(offset + len) - 1;
++
++ rv = 0;
++ if (len == 0)
++ goto out;
++ if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
++ unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
++ rv = filemap_write_and_wait_range(mapping, lstart, end);
++ if (rv)
++ return rv;
++ truncate_inode_pages_range(mapping, lstart, end);
++ }
++
+ rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ offset, nr_segs, gfs2_get_block_direct,
+ NULL, NULL, 0);
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 610613fb65b5..9dcb9777a5f8 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
+ struct buffer_head *bh = bd->bd_bh;
+ struct gfs2_glock *gl = bd->bd_gl;
+
+- gfs2_remove_from_ail(bd);
+- bd->bd_bh = NULL;
+ bh->b_private = NULL;
+ bd->bd_blkno = bh->b_blocknr;
++ gfs2_remove_from_ail(bd); /* drops ref on bh */
++ bd->bd_bh = NULL;
+ bd->bd_ops = &gfs2_revoke_lops;
+ sdp->sd_log_num_revoke++;
+ atomic_inc(&gl->gl_revokes);
+diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
+index 932415050540..52f177be3bf8 100644
+--- a/fs/gfs2/meta_io.c
++++ b/fs/gfs2/meta_io.c
+@@ -258,6 +258,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
+ struct address_space *mapping = bh->b_page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
+ struct gfs2_bufdata *bd = bh->b_private;
++ int was_pinned = 0;
+
+ if (test_clear_buffer_pinned(bh)) {
+ trace_gfs2_pin(bd, 0);
+@@ -273,12 +274,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
+ tr->tr_num_databuf_rm++;
+ }
+ tr->tr_touched = 1;
++ was_pinned = 1;
+ brelse(bh);
+ }
+ if (bd) {
+ spin_lock(&sdp->sd_ail_lock);
+ if (bd->bd_tr) {
+ gfs2_trans_add_revoke(sdp, bd);
++ } else if (was_pinned) {
++ bh->b_private = NULL;
++ kmem_cache_free(gfs2_bufdata_cachep, bd);
+ }
+ spin_unlock(&sdp->sd_ail_lock);
+ }
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 19ff5e8c285c..21518b76cd8b 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1366,8 +1366,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
+ if (IS_ERR(s))
+ goto error_bdev;
+
+- if (s->s_root)
++ if (s->s_root) {
++ /*
++ * s_umount nests inside bd_mutex during
++ * __invalidate_device(). blkdev_put() acquires
++ * bd_mutex and can't be called under s_umount. Drop
++ * s_umount temporarily. This is safe as we're
++ * holding an active reference.
++ */
++ up_write(&s->s_umount);
+ blkdev_put(bdev, mode);
++ down_write(&s->s_umount);
++ }
+
+ memset(&args, 0, sizeof(args));
+ args.ar_quota = GFS2_QUOTA_DEFAULT;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 7aa9a32573bb..b0b74e58697b 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ * once a transaction -bzzz
+ */
+ jh->b_modified = 1;
+- J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
++ if (handle->h_buffer_credits <= 0) {
++ ret = -ENOSPC;
++ goto out_unlock_bh;
++ }
+ handle->h_buffer_credits--;
+ }
+
+@@ -1373,7 +1376,6 @@ out_unlock_bh:
+ jbd2_journal_put_journal_head(jh);
+ out:
+ JBUFFER_TRACE(jh, "exit");
+- WARN_ON(ret); /* All errors are bugs, so dump the stack */
+ return ret;
+ }
+
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 3e6c2e6c9cd2..4688a622b373 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -137,8 +137,6 @@ xfs_qm_dqpurge(
+ {
+ struct xfs_mount *mp = dqp->q_mount;
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+- struct xfs_dquot *gdqp = NULL;
+- struct xfs_dquot *pdqp = NULL;
+
+ xfs_dqlock(dqp);
+ if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
+@@ -146,21 +144,6 @@ xfs_qm_dqpurge(
+ return EAGAIN;
+ }
+
+- /*
+- * If this quota has a hint attached, prepare for releasing it now.
+- */
+- gdqp = dqp->q_gdquot;
+- if (gdqp) {
+- xfs_dqlock(gdqp);
+- dqp->q_gdquot = NULL;
+- }
+-
+- pdqp = dqp->q_pdquot;
+- if (pdqp) {
+- xfs_dqlock(pdqp);
+- dqp->q_pdquot = NULL;
+- }
+-
+ dqp->dq_flags |= XFS_DQ_FREEING;
+
+ xfs_dqflock(dqp);
+@@ -209,11 +192,47 @@ xfs_qm_dqpurge(
+ XFS_STATS_DEC(xs_qm_dquot_unused);
+
+ xfs_qm_dqdestroy(dqp);
++ return 0;
++}
++
++/*
++ * Release the group or project dquot pointers the user dquots maybe carrying
++ * around as a hint, and proceed to purge the user dquot cache if requested.
++*/
++STATIC int
++xfs_qm_dqpurge_hints(
++ struct xfs_dquot *dqp,
++ void *data)
++{
++ struct xfs_dquot *gdqp = NULL;
++ struct xfs_dquot *pdqp = NULL;
++ uint flags = *((uint *)data);
+
++ xfs_dqlock(dqp);
++ if (dqp->dq_flags & XFS_DQ_FREEING) {
++ xfs_dqunlock(dqp);
++ return EAGAIN;
++ }
++
++ /* If this quota has a hint attached, prepare for releasing it now */
++ gdqp = dqp->q_gdquot;
+ if (gdqp)
+- xfs_qm_dqput(gdqp);
++ dqp->q_gdquot = NULL;
++
++ pdqp = dqp->q_pdquot;
+ if (pdqp)
+- xfs_qm_dqput(pdqp);
++ dqp->q_pdquot = NULL;
++
++ xfs_dqunlock(dqp);
++
++ if (gdqp)
++ xfs_qm_dqrele(gdqp);
++ if (pdqp)
++ xfs_qm_dqrele(pdqp);
++
++ if (flags & XFS_QMOPT_UQUOTA)
++ return xfs_qm_dqpurge(dqp, NULL);
++
+ return 0;
+ }
+
+@@ -225,8 +244,18 @@ xfs_qm_dqpurge_all(
+ struct xfs_mount *mp,
+ uint flags)
+ {
+- if (flags & XFS_QMOPT_UQUOTA)
+- xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
++ /*
++ * We have to release group/project dquot hint(s) from the user dquot
++ * at first if they are there, otherwise we would run into an infinite
++ * loop while walking through radix tree to purge other type of dquots
++ * since their refcount is not zero if the user dquot refers to them
++ * as hint.
++ *
++ * Call the special xfs_qm_dqpurge_hints() will end up go through the
++ * general xfs_qm_dqpurge() against user dquot cache if requested.
++ */
++ xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
++
+ if (flags & XFS_QMOPT_GQUOTA)
+ xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
+ if (flags & XFS_QMOPT_PQUOTA)
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index 2611577869ae..d78b353b12b4 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -169,7 +169,8 @@ struct acpi_device_flags {
+ u32 ejectable:1;
+ u32 power_manageable:1;
+ u32 match_driver:1;
+- u32 reserved:27;
++ u32 no_hotplug:1;
++ u32 reserved:26;
+ };
+
+ /* File System */
+@@ -357,6 +358,7 @@ extern struct kobject *acpi_kobj;
+ extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
+ void acpi_bus_private_data_handler(acpi_handle, void *);
+ int acpi_bus_get_private_data(acpi_handle, void **);
++void acpi_bus_no_hotplug(acpi_handle handle);
+ extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
+ extern int register_acpi_notifier(struct notifier_block *);
+ extern int unregister_acpi_notifier(struct notifier_block *);
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index f330d28e4d0e..b12079afbd5f 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+ #endif
+
+ #ifndef pte_accessible
+-# define pte_accessible(pte) ((void)(pte),1)
++# define pte_accessible(mm, pte) ((void)(pte), 1)
+ #endif
+
+ #ifndef flush_tlb_fix_spurious_fault
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 3d79e513c0b3..0bd7a2ec8a45 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -588,7 +588,7 @@
+ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+- {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
++ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
+index 669fef5c745a..3e0fbe441763 100644
+--- a/include/linux/auxvec.h
++++ b/include/linux/auxvec.h
+@@ -3,6 +3,6 @@
+
+ #include <uapi/linux/auxvec.h>
+
+-#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
++#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
+ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
+ #endif /* _LINUX_AUXVEC_H */
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 0e23c26485f4..9b503376738f 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -418,6 +418,7 @@ enum {
+ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
+ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
+ ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
++ ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
+
+ /* DMA mask for user DMA control: User visible values; DO NOT
+ renumber */
+diff --git a/include/linux/migrate.h b/include/linux/migrate.h
+index 8d3c57fdf221..ee8b14ae4f3f 100644
+--- a/include/linux/migrate.h
++++ b/include/linux/migrate.h
+@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page);
+ extern int migrate_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+- struct buffer_head *head, enum migrate_mode mode);
++ struct buffer_head *head, enum migrate_mode mode,
++ int extra_count);
+ #else
+
+ static inline void putback_lru_pages(struct list_head *l) {}
+@@ -90,10 +91,18 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
+ #endif /* CONFIG_MIGRATION */
+
+ #ifdef CONFIG_NUMA_BALANCING
+-extern int migrate_misplaced_page(struct page *page, int node);
++extern bool pmd_trans_migrating(pmd_t pmd);
++extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
+ extern int migrate_misplaced_page(struct page *page, int node);
+ extern bool migrate_ratelimited(int node);
+ #else
++static inline bool pmd_trans_migrating(pmd_t pmd)
++{
++ return false;
++}
++static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
++{
++}
+ static inline int migrate_misplaced_page(struct page *page, int node)
+ {
+ return -EAGAIN; /* can't migrate now */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index d9851eeb6e1d..8e082f18fb6a 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -435,6 +435,14 @@ struct mm_struct {
+ */
+ int first_nid;
+ #endif
++#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
++ /*
++ * An operation with batched TLB flushing is going on. Anything that
++ * can move process memory needs to flush the TLB when moving a
++ * PROT_NONE or PROT_NUMA mapped page.
++ */
++ bool tlb_flush_pending;
++#endif
+ struct uprobes_state uprobes_state;
+ };
+
+@@ -455,4 +463,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
+ return mm->cpu_vm_mask_var;
+ }
+
++#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
++/*
++ * Memory barriers to keep this state in sync are graciously provided by
++ * the page table locks, outside of which no page table modifications happen.
++ * The barriers below prevent the compiler from re-ordering the instructions
++ * around the memory barriers that are already present in the code.
++ */
++static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
++{
++ barrier();
++ return mm->tlb_flush_pending;
++}
++static inline void set_tlb_flush_pending(struct mm_struct *mm)
++{
++ mm->tlb_flush_pending = true;
++
++ /*
++ * Guarantee that the tlb_flush_pending store does not leak into the
++ * critical section updating the page tables
++ */
++ smp_mb__before_spinlock();
++}
++/* Clearing is done after a TLB flush, which also provides a barrier. */
++static inline void clear_tlb_flush_pending(struct mm_struct *mm)
++{
++ barrier();
++ mm->tlb_flush_pending = false;
++}
++#else
++static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
++{
++ return false;
++}
++static inline void set_tlb_flush_pending(struct mm_struct *mm)
++{
++}
++static inline void clear_tlb_flush_pending(struct mm_struct *mm)
++{
++}
++#endif
++
+ #endif /* _LINUX_MM_TYPES_H */
+diff --git a/include/linux/reboot.h b/include/linux/reboot.h
+index 8e00f9f6f963..9e7db9e73cc1 100644
+--- a/include/linux/reboot.h
++++ b/include/linux/reboot.h
+@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
+ * Architecture-specific implementations of sys_reboot commands.
+ */
+
++extern void migrate_to_reboot_cpu(void);
+ extern void machine_restart(char *cmd);
+ extern void machine_halt(void);
+ extern void machine_power_off(void);
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 5bdb8b7d2a69..23bfd1028457 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -624,6 +624,7 @@ struct se_dev_attrib {
+ u32 unmap_granularity;
+ u32 unmap_granularity_alignment;
+ u32 max_write_same_len;
++ u32 max_bytes_per_io;
+ struct se_device *da_dev;
+ struct config_group da_group;
+ };
+diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
+index 46d41e8b0dcc..a71442bd7ec9 100644
+--- a/include/uapi/drm/radeon_drm.h
++++ b/include/uapi/drm/radeon_drm.h
+@@ -981,6 +981,8 @@ struct drm_radeon_cs {
+ #define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
+ /* query if CP DMA is supported on the compute ring */
+ #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
++/* query the number of render backends */
++#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
+
+
+ struct drm_radeon_info {
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 5c9127dc1b66..b6fd78344c53 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -4490,14 +4490,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
+ root->number_of_cgroups++;
+
+- /* each css holds a ref to the cgroup's dentry and the parent css */
+- for_each_root_subsys(root, ss) {
+- struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+-
+- dget(dentry);
+- css_get(css->parent);
+- }
+-
+ /* hold a ref to the parent's dentry */
+ dget(parent->dentry);
+
+@@ -4509,6 +4501,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ if (err)
+ goto err_destroy;
+
++ /* each css holds a ref to the cgroup's dentry and parent css */
++ dget(dentry);
++ css_get(css->parent);
++
++ /* mark it consumed for error path */
++ css_ar[ss->subsys_id] = NULL;
++
+ if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
+ parent->parent) {
+ pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
+@@ -4555,6 +4554,14 @@ err_free_cgrp:
+ return err;
+
+ err_destroy:
++ for_each_root_subsys(root, ss) {
++ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
++
++ if (css) {
++ percpu_ref_cancel_init(&css->refcnt);
++ ss->css_free(css);
++ }
++ }
+ cgroup_destroy_locked(cgrp);
+ mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+@@ -4716,8 +4723,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
+ * will be invoked to perform the rest of destruction once the
+ * percpu refs of all css's are confirmed to be killed.
+ */
+- for_each_root_subsys(cgrp->root, ss)
+- kill_css(cgroup_css(cgrp, ss));
++ for_each_root_subsys(cgrp->root, ss) {
++ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
++
++ if (css)
++ kill_css(css);
++ }
+
+ /*
+ * Mark @cgrp dead. This prevents further task migration and child
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 086fe73ad6bd..690cfacaed71 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
+ spin_lock_init(&mm->page_table_lock);
+ mm_init_aio(mm);
+ mm_init_owner(mm, p);
++ clear_tlb_flush_pending(mm);
+
+ if (likely(!mm_alloc_pgd(mm))) {
+ mm->def_flags = 0;
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index b462fa197517..aa6a8aadb911 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
+ bool pm_freezing;
+ bool pm_nosig_freezing;
+
++/*
++ * Temporary export for the deadlock workaround in ata_scsi_hotplug().
++ * Remove once the hack becomes unnecessary.
++ */
++EXPORT_SYMBOL_GPL(pm_freezing);
++
+ /* protects freezing and frozen transitions */
+ static DEFINE_SPINLOCK(freezer_lock);
+
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index ecd783dda9ae..355e13af62c5 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -1680,6 +1680,7 @@ int kernel_kexec(void)
+ {
+ kexec_in_progress = true;
+ kernel_restart_prepare(NULL);
++ migrate_to_reboot_cpu();
+ printk(KERN_EMERG "Starting new kernel\n");
+ machine_shutdown();
+ }
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index f813b3474646..662c83fc16b7 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb)
+ }
+ EXPORT_SYMBOL(unregister_reboot_notifier);
+
+-static void migrate_to_reboot_cpu(void)
++void migrate_to_reboot_cpu(void)
+ {
+ /* The boot cpu is always logical cpu 0 */
+ int cpu = reboot_cpu;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 513fc2fd5109..7765ad82736a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -974,6 +974,13 @@ void task_numa_work(struct callback_head *work)
+ if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
+ continue;
+
++ /*
++ * Skip inaccessible VMAs to avoid any confusion between
++ * PROT_NONE and NUMA hinting ptes
++ */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
++ continue;
++
+ do {
+ start = max(start, vma->vm_start);
+ end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 01970c8e64df..417b1b3fd7e9 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -899,6 +899,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+ {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Change rq's cpupri only if rt_rq is the top queue.
++ */
++ if (&rq->rt != rt_rq)
++ return;
++#endif
+ if (rq->online && prio < prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
+ }
+@@ -908,6 +915,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+ {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Change rq's cpupri only if rt_rq is the top queue.
++ */
++ if (&rq->rt != rt_rq)
++ return;
++#endif
+ if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
+ }
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index f3bd09eecb7e..1345d9ff0662 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -775,7 +775,7 @@ static int ftrace_profile_init(void)
+ int cpu;
+ int ret = 0;
+
+- for_each_online_cpu(cpu) {
++ for_each_possible_cpu(cpu) {
+ ret = ftrace_profile_init_cpu(cpu);
+ if (ret)
+ break;
+diff --git a/mm/compaction.c b/mm/compaction.c
+index b5326b141a25..74ad00908c79 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
+ bool migrate_scanner)
+ {
+ struct zone *zone = cc->zone;
++
++ if (cc->ignore_skip_hint)
++ return;
++
+ if (!page)
+ return;
+
+diff --git a/mm/fremap.c b/mm/fremap.c
+index 5bff08147768..bbc4d660221a 100644
+--- a/mm/fremap.c
++++ b/mm/fremap.c
+@@ -208,9 +208,10 @@ get_write_lock:
+ if (mapping_cap_account_dirty(mapping)) {
+ unsigned long addr;
+ struct file *file = get_file(vma->vm_file);
++ /* mmap_region may free vma; grab the info now */
++ vm_flags = vma->vm_flags;
+
+- addr = mmap_region(file, start, size,
+- vma->vm_flags, pgoff);
++ addr = mmap_region(file, start, size, vm_flags, pgoff);
+ fput(file);
+ if (IS_ERR_VALUE(addr)) {
+ err = addr;
+@@ -218,7 +219,7 @@ get_write_lock:
+ BUG_ON(addr != start);
+ err = 0;
+ }
+- goto out;
++ goto out_freed;
+ }
+ mutex_lock(&mapping->i_mmap_mutex);
+ flush_dcache_mmap_lock(mapping);
+@@ -253,6 +254,7 @@ get_write_lock:
+ out:
+ if (vma)
+ vm_flags = vma->vm_flags;
++out_freed:
+ if (likely(!has_write_lock))
+ up_read(&mm->mmap_sem);
+ else
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index cca80d96e509..47962456ed87 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -884,6 +884,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ ret = 0;
+ goto out_unlock;
+ }
++
++ /* mmap_sem prevents this happening but warn if that changes */
++ WARN_ON(pmd_trans_migrating(pmd));
++
+ if (unlikely(pmd_trans_splitting(pmd))) {
+ /* split huge page running from under us */
+ spin_unlock(&src_mm->page_table_lock);
+@@ -1240,6 +1244,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+ return ERR_PTR(-EFAULT);
+
++ /* Full NUMA hinting faults to serialise migration in fault paths */
++ if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
++ goto out;
++
+ page = pmd_page(*pmd);
+ VM_BUG_ON(!PageHead(page));
+ if (flags & FOLL_TOUCH) {
+@@ -1290,6 +1298,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (unlikely(!pmd_same(pmd, *pmdp)))
+ goto out_unlock;
+
++ /*
++ * If there are potential migrations, wait for completion and retry
++ * without disrupting NUMA hinting information. Do not relock and
++ * check_same as the page may no longer be mapped.
++ */
++ if (unlikely(pmd_trans_migrating(*pmdp))) {
++ spin_unlock(&mm->page_table_lock);
++ wait_migrate_huge_page(vma->anon_vma, pmdp);
++ goto out;
++ }
++
+ page = pmd_page(pmd);
+ page_nid = page_to_nid(page);
+ count_vm_numa_event(NUMA_HINT_FAULTS);
+@@ -1306,23 +1325,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ /* If the page was locked, there are no parallel migrations */
+ if (page_locked)
+ goto clear_pmdnuma;
++ }
+
+- /*
+- * Otherwise wait for potential migrations and retry. We do
+- * relock and check_same as the page may no longer be mapped.
+- * As the fault is being retried, do not account for it.
+- */
++ /* Migration could have started since the pmd_trans_migrating check */
++ if (!page_locked) {
+ spin_unlock(&mm->page_table_lock);
+ wait_on_page_locked(page);
+ page_nid = -1;
+ goto out;
+ }
+
+- /* Page is misplaced, serialise migrations and parallel THP splits */
++ /*
++ * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
++ * to serialises splits
++ */
+ get_page(page);
+ spin_unlock(&mm->page_table_lock);
+- if (!page_locked)
+- lock_page(page);
+ anon_vma = page_lock_anon_vma_read(page);
+
+ /* Confirm the PTE did not while locked */
+@@ -1334,6 +1352,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ goto out_unlock;
+ }
+
++ /* Bail if we fail to protect against THP splits for any reason */
++ if (unlikely(!anon_vma)) {
++ put_page(page);
++ page_nid = -1;
++ goto clear_pmdnuma;
++ }
++
+ /*
+ * Migrate the THP to the requested node, returns with page unlocked
+ * and pmd_numa cleared.
+@@ -1466,20 +1491,24 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+
+ if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+ pmd_t entry;
+- entry = pmdp_get_and_clear(mm, addr, pmd);
+ if (!prot_numa) {
++ entry = pmdp_get_and_clear(mm, addr, pmd);
++ if (pmd_numa(entry))
++ entry = pmd_mknonnuma(entry);
+ entry = pmd_modify(entry, newprot);
+ BUG_ON(pmd_write(entry));
++ set_pmd_at(mm, addr, pmd, entry);
+ } else {
+ struct page *page = pmd_page(*pmd);
++ entry = *pmd;
+
+ /* only check non-shared pages */
+ if (page_mapcount(page) == 1 &&
+ !pmd_numa(*pmd)) {
+ entry = pmd_mknuma(entry);
++ set_pmd_at(mm, addr, pmd, entry);
+ }
+ }
+- set_pmd_at(mm, addr, pmd, entry);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ ret = 1;
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 36cc2d0570ab..e0e979276df0 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -337,7 +337,7 @@ struct mem_cgroup {
+ static size_t memcg_size(void)
+ {
+ return sizeof(struct mem_cgroup) +
+- nr_node_ids * sizeof(struct mem_cgroup_per_node);
++ nr_node_ids * sizeof(struct mem_cgroup_per_node *);
+ }
+
+ /* internal only representation about the status of kmem accounting. */
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index bf3351b5115e..9aea53f4551c 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -938,6 +938,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ BUG_ON(!PageHWPoison(p));
+ return SWAP_FAIL;
+ }
++ /*
++ * We pinned the head page for hwpoison handling,
++ * now we split the thp and we are interested in
++ * the hwpoisoned raw page, so move the refcount
++ * to it.
++ */
++ if (hpage != p) {
++ put_page(hpage);
++ get_page(p);
++ }
+ /* THP is split, so ppage should be the real poisoned page. */
+ ppage = p;
+ }
+@@ -1519,10 +1529,16 @@ static int soft_offline_huge_page(struct page *page, int flags)
+ if (ret > 0)
+ ret = -EIO;
+ } else {
+- set_page_hwpoison_huge_page(hpage);
+- dequeue_hwpoisoned_huge_page(hpage);
+- atomic_long_add(1 << compound_order(hpage),
+- &num_poisoned_pages);
++ /* overcommit hugetlb page will be freed to buddy */
++ if (PageHuge(page)) {
++ set_page_hwpoison_huge_page(hpage);
++ dequeue_hwpoisoned_huge_page(hpage);
++ atomic_long_add(1 << compound_order(hpage),
++ &num_poisoned_pages);
++ } else {
++ SetPageHWPoison(page);
++ atomic_long_inc(&num_poisoned_pages);
++ }
+ }
+ return ret;
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 04729647f359..6b22d8f9bfb8 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1317,7 +1317,7 @@ static long do_mbind(unsigned long start, unsigned long len,
+ if (nr_failed && (flags & MPOL_MF_STRICT))
+ err = -EIO;
+ } else
+- putback_lru_pages(&pagelist);
++ putback_movable_pages(&pagelist);
+
+ up_write(&mm->mmap_sem);
+ mpol_out:
+diff --git a/mm/migrate.c b/mm/migrate.c
+index c04692774e88..e3cf71dd1288 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -36,6 +36,7 @@
+ #include <linux/hugetlb_cgroup.h>
+ #include <linux/gfp.h>
+ #include <linux/balloon_compaction.h>
++#include <linux/mmu_notifier.h>
+
+ #include <asm/tlbflush.h>
+
+@@ -315,14 +316,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
+ */
+ int migrate_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+- struct buffer_head *head, enum migrate_mode mode)
++ struct buffer_head *head, enum migrate_mode mode,
++ int extra_count)
+ {
+- int expected_count = 0;
++ int expected_count = 1 + extra_count;
+ void **pslot;
+
+ if (!mapping) {
+ /* Anonymous page without mapping */
+- if (page_count(page) != 1)
++ if (page_count(page) != expected_count)
+ return -EAGAIN;
+ return MIGRATEPAGE_SUCCESS;
+ }
+@@ -332,7 +334,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ pslot = radix_tree_lookup_slot(&mapping->page_tree,
+ page_index(page));
+
+- expected_count = 2 + page_has_private(page);
++ expected_count += 1 + page_has_private(page);
+ if (page_count(page) != expected_count ||
+ radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
+ spin_unlock_irq(&mapping->tree_lock);
+@@ -525,7 +527,7 @@ int migrate_page(struct address_space *mapping,
+
+ BUG_ON(PageWriteback(page)); /* Writeback must be complete */
+
+- rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+
+ if (rc != MIGRATEPAGE_SUCCESS)
+ return rc;
+@@ -552,7 +554,7 @@ int buffer_migrate_page(struct address_space *mapping,
+
+ head = page_buffers(page);
+
+- rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
++ rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
+
+ if (rc != MIGRATEPAGE_SUCCESS)
+ return rc;
+@@ -1596,6 +1598,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
+ return 1;
+ }
+
++bool pmd_trans_migrating(pmd_t pmd)
++{
++ struct page *page = pmd_page(pmd);
++ return PageLocked(page);
++}
++
++void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
++{
++ struct page *page = pmd_page(*pmd);
++ wait_on_page_locked(page);
++}
++
+ /*
+ * Attempt to migrate a misplaced page to the specified destination
+ * node. Caller is expected to have an elevated reference count on
+@@ -1655,12 +1669,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ unsigned long address,
+ struct page *page, int node)
+ {
+- unsigned long haddr = address & HPAGE_PMD_MASK;
+ pg_data_t *pgdat = NODE_DATA(node);
+ int isolated = 0;
+ struct page *new_page = NULL;
+ struct mem_cgroup *memcg = NULL;
+ int page_lru = page_is_file_cache(page);
++ unsigned long mmun_start = address & HPAGE_PMD_MASK;
++ unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
++ pmd_t orig_entry;
+
+ /*
+ * Don't migrate pages that are mapped in multiple processes.
+@@ -1690,6 +1706,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ goto out_fail;
+ }
+
++ if (mm_tlb_flush_pending(mm))
++ flush_tlb_range(vma, mmun_start, mmun_end);
++
+ /* Prepare a page as a migration target */
+ __set_page_locked(new_page);
+ SetPageSwapBacked(new_page);
+@@ -1701,9 +1720,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ WARN_ON(PageLRU(new_page));
+
+ /* Recheck the target PMD */
++ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+ spin_lock(&mm->page_table_lock);
+- if (unlikely(!pmd_same(*pmd, entry))) {
++ if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
++fail_putback:
+ spin_unlock(&mm->page_table_lock);
++ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
+ /* Reverse changes made by migrate_page_copy() */
+ if (TestClearPageActive(new_page))
+@@ -1720,7 +1742,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ putback_lru_page(page);
+ mod_zone_page_state(page_zone(page),
+ NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
+- goto out_fail;
++
++ goto out_unlock;
+ }
+
+ /*
+@@ -1732,16 +1755,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ */
+ mem_cgroup_prepare_migration(page, new_page, &memcg);
+
++ orig_entry = *pmd;
+ entry = mk_pmd(new_page, vma->vm_page_prot);
+- entry = pmd_mknonnuma(entry);
+- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ entry = pmd_mkhuge(entry);
++ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+
+- pmdp_clear_flush(vma, haddr, pmd);
+- set_pmd_at(mm, haddr, pmd, entry);
+- page_add_new_anon_rmap(new_page, vma, haddr);
++ /*
++ * Clear the old entry under pagetable lock and establish the new PTE.
++ * Any parallel GUP will either observe the old page blocking on the
++ * page lock, block on the page table lock or observe the new page.
++ * The SetPageUptodate on the new page and page_add_new_anon_rmap
++ * guarantee the copy is visible before the pagetable update.
++ */
++ flush_cache_range(vma, mmun_start, mmun_end);
++ page_add_new_anon_rmap(new_page, vma, mmun_start);
++ pmdp_clear_flush(vma, mmun_start, pmd);
++ set_pmd_at(mm, mmun_start, pmd, entry);
++ flush_tlb_range(vma, mmun_start, mmun_end);
+ update_mmu_cache_pmd(vma, address, &entry);
++
++ if (page_count(page) != 2) {
++ set_pmd_at(mm, mmun_start, pmd, orig_entry);
++ flush_tlb_range(vma, mmun_start, mmun_end);
++ update_mmu_cache_pmd(vma, address, &entry);
++ page_remove_rmap(new_page);
++ goto fail_putback;
++ }
++
+ page_remove_rmap(page);
++
+ /*
+ * Finish the charge transaction under the page table lock to
+ * prevent split_huge_page() from dividing up the charge
+@@ -1749,6 +1791,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ */
+ mem_cgroup_end_migration(memcg, page, new_page, true);
+ spin_unlock(&mm->page_table_lock);
++ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
+ unlock_page(new_page);
+ unlock_page(page);
+@@ -1766,10 +1809,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ out_fail:
+ count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
+ out_dropref:
+- entry = pmd_mknonnuma(entry);
+- set_pmd_at(mm, haddr, pmd, entry);
+- update_mmu_cache_pmd(vma, address, &entry);
++ spin_lock(&mm->page_table_lock);
++ if (pmd_same(*pmd, entry)) {
++ entry = pmd_mknonnuma(entry);
++ set_pmd_at(mm, mmun_start, pmd, entry);
++ update_mmu_cache_pmd(vma, address, &entry);
++ }
++ spin_unlock(&mm->page_table_lock);
+
++out_unlock:
+ unlock_page(page);
+ put_page(page);
+ return 0;
+diff --git a/mm/mlock.c b/mm/mlock.c
+index d480cd6fc475..192e6eebe4f2 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page)
+
+ /**
+ * munlock_vma_page - munlock a vma page
+- * @page - page to be unlocked
++ * @page - page to be unlocked, either a normal page or THP page head
++ *
++ * returns the size of the page as a page mask (0 for normal page,
++ * HPAGE_PMD_NR - 1 for THP head page)
+ *
+ * called from munlock()/munmap() path with page supposedly on the LRU.
+ * When we munlock a page, because the vma where we found the page is being
+@@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page)
+ */
+ unsigned int munlock_vma_page(struct page *page)
+ {
+- unsigned int page_mask = 0;
++ unsigned int nr_pages;
+
+ BUG_ON(!PageLocked(page));
+
+ if (TestClearPageMlocked(page)) {
+- unsigned int nr_pages = hpage_nr_pages(page);
++ nr_pages = hpage_nr_pages(page);
+ mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+- page_mask = nr_pages - 1;
+ if (!isolate_lru_page(page))
+ __munlock_isolated_page(page);
+ else
+ __munlock_isolation_failed(page);
++ } else {
++ nr_pages = hpage_nr_pages(page);
+ }
+
+- return page_mask;
++ /*
++ * Regardless of the original PageMlocked flag, we determine nr_pages
++ * after touching the flag. This leaves a possible race with a THP page
++ * split, such that a whole THP page was munlocked, but nr_pages == 1.
++ * Returning a smaller mask due to that is OK, the worst that can
++ * happen is subsequent useless scanning of the former tail pages.
++ * The NR_MLOCK accounting can however become broken.
++ */
++ return nr_pages - 1;
+ }
+
+ /**
+@@ -286,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
+ {
+ int i;
+ int nr = pagevec_count(pvec);
+- int delta_munlocked = -nr;
++ int delta_munlocked;
+ struct pagevec pvec_putback;
+ int pgrescued = 0;
+
++ pagevec_init(&pvec_putback, 0);
++
+ /* Phase 1: page isolation */
+ spin_lock_irq(&zone->lru_lock);
+ for (i = 0; i < nr; i++) {
+@@ -318,18 +332,21 @@ skip_munlock:
+ /*
+ * We won't be munlocking this page in the next phase
+ * but we still need to release the follow_page_mask()
+- * pin.
++ * pin. We cannot do it under lru_lock however. If it's
++ * the last pin, __page_cache_release would deadlock.
+ */
++ pagevec_add(&pvec_putback, pvec->pages[i]);
+ pvec->pages[i] = NULL;
+- put_page(page);
+- delta_munlocked++;
+ }
+ }
++ delta_munlocked = -nr + pagevec_count(&pvec_putback);
+ __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
+ spin_unlock_irq(&zone->lru_lock);
+
++ /* Now we can release pins of pages that we are not munlocking */
++ pagevec_release(&pvec_putback);
++
+ /* Phase 2: page munlock */
+- pagevec_init(&pvec_putback, 0);
+ for (i = 0; i < nr; i++) {
+ struct page *page = pvec->pages[i];
+
+@@ -440,7 +457,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
+
+ while (start < end) {
+ struct page *page = NULL;
+- unsigned int page_mask, page_increm;
++ unsigned int page_mask;
++ unsigned long page_increm;
+ struct pagevec pvec;
+ struct zone *zone;
+ int zoneid;
+@@ -490,7 +508,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
+ goto next;
+ }
+ }
+- page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
++ /* It's a bug to munlock in the middle of a THP page */
++ VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
++ page_increm = 1 + page_mask;
+ start += page_increm * PAGE_SIZE;
+ next:
+ cond_resched();
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 6c3f56f19275..7651a571f283 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -54,13 +54,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ pte_t ptent;
+ bool updated = false;
+
+- ptent = ptep_modify_prot_start(mm, addr, pte);
+ if (!prot_numa) {
++ ptent = ptep_modify_prot_start(mm, addr, pte);
++ if (pte_numa(ptent))
++ ptent = pte_mknonnuma(ptent);
+ ptent = pte_modify(ptent, newprot);
+ updated = true;
+ } else {
+ struct page *page;
+
++ ptent = *pte;
+ page = vm_normal_page(vma, addr, oldpte);
+ if (page) {
+ int this_nid = page_to_nid(page);
+@@ -73,6 +76,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ if (!pte_numa(oldpte) &&
+ page_mapcount(page) == 1) {
+ ptent = pte_mknuma(ptent);
++ set_pte_at(mm, addr, pte, ptent);
+ updated = true;
+ }
+ }
+@@ -89,7 +93,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+
+ if (updated)
+ pages++;
+- ptep_modify_prot_commit(mm, addr, pte, ptent);
++
++ /* Only !prot_numa always clears the pte */
++ if (!prot_numa)
++ ptep_modify_prot_commit(mm, addr, pte, ptent);
+ } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
+ swp_entry_t entry = pte_to_swp_entry(oldpte);
+
+@@ -209,6 +216,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
+ BUG_ON(addr >= end);
+ pgd = pgd_offset(mm, addr);
+ flush_cache_range(vma, addr, end);
++ set_tlb_flush_pending(mm);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+@@ -220,6 +228,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
+ /* Only flush the TLB if we actually modified any entries: */
+ if (pages)
+ flush_tlb_range(vma, start, end);
++ clear_tlb_flush_pending(mm);
+
+ return pages;
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index dd886fac451a..317ea747d2cd 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1822,7 +1822,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
+
+ static bool zone_local(struct zone *local_zone, struct zone *zone)
+ {
+- return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE;
++ return local_zone->node == zone->node;
+ }
+
+ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
+@@ -1919,18 +1919,17 @@ zonelist_scan:
+ * page was allocated in should have no effect on the
+ * time the page has in memory before being reclaimed.
+ *
+- * When zone_reclaim_mode is enabled, try to stay in
+- * local zones in the fastpath. If that fails, the
+- * slowpath is entered, which will do another pass
+- * starting with the local zones, but ultimately fall
+- * back to remote zones that do not partake in the
+- * fairness round-robin cycle of this zonelist.
++ * Try to stay in local zones in the fastpath. If
++ * that fails, the slowpath is entered, which will do
++ * another pass starting with the local zones, but
++ * ultimately fall back to remote zones that do not
++ * partake in the fairness round-robin cycle of this
++ * zonelist.
+ */
+ if (alloc_flags & ALLOC_WMARK_LOW) {
+ if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+ continue;
+- if (zone_reclaim_mode &&
+- !zone_local(preferred_zone, zone))
++ if (!zone_local(preferred_zone, zone))
+ continue;
+ }
+ /*
+@@ -2396,7 +2395,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
+ * thrash fairness information for zones that are not
+ * actually part of this zonelist's round-robin cycle.
+ */
+- if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
++ if (!zone_local(preferred_zone, zone))
+ continue;
+ mod_zone_page_state(zone, NR_ALLOC_BATCH,
+ high_wmark_pages(zone) -
+diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
+index 3929a40bd6c0..28e64f504ba5 100644
+--- a/mm/pgtable-generic.c
++++ b/mm/pgtable-generic.c
+@@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep)
+ {
++ struct mm_struct *mm = (vma)->vm_mm;
+ pte_t pte;
+- pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
+- if (pte_accessible(pte))
++ pte = ptep_get_and_clear(mm, address, ptep);
++ if (pte_accessible(mm, pte))
+ flush_tlb_page(vma, address);
+ return pte;
+ }
+@@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+ {
++ pmd_t entry = *pmdp;
++ if (pmd_numa(entry))
++ entry = pmd_mknonnuma(entry);
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ }
+diff --git a/mm/rmap.c b/mm/rmap.c
+index fd3ee7a54a13..b9d2222a0ecb 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
+ spinlock_t *ptl;
+
+ if (unlikely(PageHuge(page))) {
++ /* when pud is not present, pte will be NULL */
+ pte = huge_pte_offset(mm, address);
++ if (!pte)
++ return NULL;
++
+ ptl = &mm->page_table_lock;
+ goto check;
+ }
+diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
+index a271c27fac77..722da616438c 100644
+--- a/net/wireless/radiotap.c
++++ b/net/wireless/radiotap.c
+@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
+ /* find payload start allowing for extended bitmap(s) */
+
+ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
++ if ((unsigned long)iterator->_arg -
++ (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
++ (unsigned long)iterator->_max_length)
++ return -EINVAL;
+ while (get_unaligned_le32(iterator->_arg) &
+ (1 << IEEE80211_RADIOTAP_EXT)) {
+ iterator->_arg += sizeof(uint32_t);
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 32b10f53d0b4..2dcb37736d84 100644
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -82,7 +82,9 @@ kallsyms()
+ kallsymopt="${kallsymopt} --all-symbols"
+ fi
+
+- kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
++ if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
++ kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
++ fi
+
+ local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index d9a78fd8a2e1..392a0445265c 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3792,7 +3792,7 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
+ u32 nlbl_sid;
+ u32 nlbl_type;
+
+- selinux_skb_xfrm_sid(skb, &xfrm_sid);
++ selinux_xfrm_skb_sid(skb, &xfrm_sid);
+ selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+
+ err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
+@@ -4297,8 +4297,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ }
+ err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
+ PEER__RECV, &ad);
+- if (err)
++ if (err) {
+ selinux_netlbl_err(skb, err, 0);
++ return err;
++ }
+ }
+
+ if (secmark_active) {
+@@ -4809,22 +4811,32 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ * as fast and as clean as possible. */
+ if (!selinux_policycap_netpeer)
+ return selinux_ip_postroute_compat(skb, ifindex, family);
++
++ secmark_active = selinux_secmark_enabled();
++ peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
++ if (!secmark_active && !peerlbl_active)
++ return NF_ACCEPT;
++
++ sk = skb->sk;
++
+ #ifdef CONFIG_XFRM
+ /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
+ * packet transformation so allow the packet to pass without any checks
+ * since we'll have another chance to perform access control checks
+ * when the packet is on it's final way out.
+ * NOTE: there appear to be some IPv6 multicast cases where skb->dst
+- * is NULL, in this case go ahead and apply access control. */
+- if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
++ * is NULL, in this case go ahead and apply access control.
++ * is NULL, in this case go ahead and apply access control.
++ * NOTE: if this is a local socket (skb->sk != NULL) that is in the
++ * TCP listening state we cannot wait until the XFRM processing
++ * is done as we will miss out on the SA label if we do;
++ * unfortunately, this means more work, but it is only once per
++ * connection. */
++ if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
++ !(sk != NULL && sk->sk_state == TCP_LISTEN))
+ return NF_ACCEPT;
+ #endif
+- secmark_active = selinux_secmark_enabled();
+- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+- if (!secmark_active && !peerlbl_active)
+- return NF_ACCEPT;
+
+- sk = skb->sk;
+ if (sk == NULL) {
+ /* Without an associated socket the packet is either coming
+ * from the kernel or it is being forwarded; check the packet
+@@ -4852,6 +4864,25 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ struct sk_security_struct *sksec = sk->sk_security;
+ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
+ return NF_DROP;
++ /* At this point, if the returned skb peerlbl is SECSID_NULL
++ * and the packet has been through at least one XFRM
++ * transformation then we must be dealing with the "final"
++ * form of labeled IPsec packet; since we've already applied
++ * all of our access controls on this packet we can safely
++ * pass the packet. */
++ if (skb_sid == SECSID_NULL) {
++ switch (family) {
++ case PF_INET:
++ if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
++ return NF_ACCEPT;
++ break;
++ case PF_INET6:
++ if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
++ return NF_ACCEPT;
++ default:
++ return NF_DROP_ERR(-ECONNREFUSED);
++ }
++ }
+ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
+ return NF_DROP;
+ secmark_perm = PACKET__SEND;
+@@ -5521,11 +5552,11 @@ static int selinux_setprocattr(struct task_struct *p,
+ /* Check for ptracing, and update the task SID if ok.
+ Otherwise, leave SID unchanged and fail. */
+ ptsid = 0;
+- task_lock(p);
++ rcu_read_lock();
+ tracer = ptrace_parent(p);
+ if (tracer)
+ ptsid = task_sid(tracer);
+- task_unlock(p);
++ rcu_read_unlock();
+
+ if (tracer) {
+ error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
+diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
+index 6713f04e30ba..c1af4e14b12f 100644
+--- a/security/selinux/include/xfrm.h
++++ b/security/selinux/include/xfrm.h
+@@ -47,6 +47,7 @@ int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
+ int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto);
+ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
++int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
+
+ static inline void selinux_xfrm_notify_policyload(void)
+ {
+@@ -85,12 +86,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int
+ static inline void selinux_xfrm_notify_policyload(void)
+ {
+ }
+-#endif
+
+-static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
++static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+ {
+- int err = selinux_xfrm_decode_session(skb, sid, 0);
+- BUG_ON(err);
++ *sid = SECSID_NULL;
++ return 0;
+ }
++#endif
+
+ #endif /* _SELINUX_XFRM_H_ */
+diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
+index d03081886214..78504a18958a 100644
+--- a/security/selinux/xfrm.c
++++ b/security/selinux/xfrm.c
+@@ -152,21 +152,13 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
+ return rc;
+ }
+
+-/*
+- * LSM hook implementation that checks and/or returns the xfrm sid for the
+- * incoming packet.
+- */
+-
+-int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
++static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
++ u32 *sid, int ckall)
+ {
+- struct sec_path *sp;
++ struct sec_path *sp = skb->sp;
+
+ *sid = SECSID_NULL;
+
+- if (skb == NULL)
+- return 0;
+-
+- sp = skb->sp;
+ if (sp) {
+ int i, sid_set = 0;
+
+@@ -190,6 +182,45 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+ return 0;
+ }
+
++static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
++{
++ struct dst_entry *dst = skb_dst(skb);
++ struct xfrm_state *x;
++
++ if (dst == NULL)
++ return SECSID_NULL;
++ x = dst->xfrm;
++ if (x == NULL || !selinux_authorizable_xfrm(x))
++ return SECSID_NULL;
++
++ return x->security->ctx_sid;
++}
++
++/*
++ * LSM hook implementation that checks and/or returns the xfrm sid for the
++ * incoming packet.
++ */
++
++int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
++{
++ if (skb == NULL) {
++ *sid = SECSID_NULL;
++ return 0;
++ }
++ return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
++}
++
++int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
++{
++ int rc;
++
++ rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
++ if (rc == 0 && *sid == SECSID_NULL)
++ *sid = selinux_xfrm_skb_sid_egress(skb);
++
++ return rc;
++}
++
+ /*
+ * Security blob allocation for xfrm_policy and xfrm_state
+ * CTX does not have a meaningful value on input
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 6e03b465e44e..a2104671f51d 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
+ case SNDRV_PCM_STATE_DISCONNECTED:
+ err = -EBADFD;
+ goto _endloop;
++ case SNDRV_PCM_STATE_PAUSED:
++ continue;
+ }
+ if (!tout) {
+ snd_printd("%s write error (DMA or IRQ trouble?)\n",
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index a63aff2ca594..f7e76619f7c9 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -3430,6 +3430,10 @@ static void check_probe_mask(struct azx *chip, int dev)
+ * white/black-list for enable_msi
+ */
+ static struct snd_pci_quirk msi_black_list[] = {
++ SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
+ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 21b948af6ed0..dce47c414ea7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4004,10 +4004,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
++ SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
++ SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
+index 0048ce5bfa2f..831a34d7cc72 100644
+--- a/sound/soc/codecs/wm5110.c
++++ b/sound/soc/codecs/wm5110.c
+@@ -1031,7 +1031,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
+ { "HPOUT2R", NULL, "OUT2R" },
+
+ { "HPOUT3L", NULL, "OUT3L" },
+- { "HPOUT3R", NULL, "OUT3L" },
++ { "HPOUT3R", NULL, "OUT3R" },
+
+ { "SPKOUTLN", NULL, "OUT4L" },
+ { "SPKOUTLP", NULL, "OUT4L" },
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index 4dfa8dceeabf..48bae0ec500f 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -1443,7 +1443,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_B:
+- aif1 |= WM8904_AIF_LRCLK_INV;
++ aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif1 |= 0x3;
+ break;
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 60b6b593c407..0d5de6003849 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1466,13 +1466,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
+ return ret;
+
+ /* Wait for the RAM to start, should be near instantaneous */
+- count = 0;
+- do {
++ for (count = 0; count < 10; ++count) {
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
+ &val);
+ if (ret != 0)
+ return ret;
+- } while (!(val & ADSP2_RAM_RDY) && ++count < 10);
++
++ if (val & ADSP2_RAM_RDY)
++ break;
++
++ msleep(1);
++ }
+
+ if (!(val & ADSP2_RAM_RDY)) {
+ adsp_err(dsp, "Failed to start DSP RAM\n");
+diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
+index 52af7f6fb37f..540832e9e684 100644
+--- a/sound/soc/tegra/tegra20_i2s.c
++++ b/sound/soc/tegra/tegra20_i2s.c
+@@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+ {
+ struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+- unsigned int mask, val;
++ unsigned int mask = 0, val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+@@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
+ return -EINVAL;
+ }
+
+- mask = TEGRA20_I2S_CTRL_MASTER_ENABLE;
++ mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+- val = TEGRA20_I2S_CTRL_MASTER_ENABLE;
++ val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
+index 551b3c93ce93..2e7d4aca3d7d 100644
+--- a/sound/soc/tegra/tegra20_spdif.c
++++ b/sound/soc/tegra/tegra20_spdif.c
+@@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
+ {
+ struct device *dev = dai->dev;
+ struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+- unsigned int mask, val;
++ unsigned int mask = 0, val = 0;
+ int ret, spdifclock;
+
+- mask = TEGRA20_SPDIF_CTRL_PACK |
+- TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
++ mask |= TEGRA20_SPDIF_CTRL_PACK |
++ TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+- val = TEGRA20_SPDIF_CTRL_PACK |
+- TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
++ val |= TEGRA20_SPDIF_CTRL_PACK |
++ TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
+index 47565fd04505..732e9cb1be97 100644
+--- a/sound/soc/tegra/tegra30_i2s.c
++++ b/sound/soc/tegra/tegra30_i2s.c
+@@ -117,7 +117,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+ {
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+- unsigned int mask, val;
++ unsigned int mask = 0, val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+@@ -126,10 +126,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
+ return -EINVAL;
+ }
+
+- mask = TEGRA30_I2S_CTRL_MASTER_ENABLE;
++ mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+- val = TEGRA30_I2S_CTRL_MASTER_ENABLE;
++ val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
+index dc4de3762111..bcf1d2f0b791 100644
+--- a/tools/power/cpupower/utils/cpupower-set.c
++++ b/tools/power/cpupower/utils/cpupower-set.c
+@@ -18,9 +18,9 @@
+ #include "helpers/bitmask.h"
+
+ static struct option set_opts[] = {
+- { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
+- { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
+- { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
++ { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
++ { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
++ { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
+ { },
+ };
+
diff --git a/1007_linux-3.12.8.patch b/1007_linux-3.12.8.patch
new file mode 100644
index 00000000..72ce87bd
--- /dev/null
+++ b/1007_linux-3.12.8.patch
@@ -0,0 +1,2622 @@
+diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+index 24765c146e31..5883ec878ddd 100644
+--- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
++++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+@@ -159,6 +159,8 @@ clock which they consume.
+ mixer 343
+ hdmi 344
+ g2d 345
++ mdma0 346
++ smmu_mdma0 347
+
+
+ [Clock Muxes]
+diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
+index c01223628a87..8e48e3b14227 100644
+--- a/Documentation/networking/packet_mmap.txt
++++ b/Documentation/networking/packet_mmap.txt
+@@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
+ [shutdown] close() --------> destruction of the transmission socket and
+ deallocation of all associated resources.
+
++Socket creation and destruction is also straight forward, and is done
++the same way as in capturing described in the previous paragraph:
++
++ int fd = socket(PF_PACKET, mode, 0);
++
++The protocol can optionally be 0 in case we only want to transmit
++via this socket, which avoids an expensive call to packet_rcv().
++In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
++set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
++
+ Binding the socket to your network interface is mandatory (with zero copy) to
+ know the header size of frames used in the circular buffer.
+
+diff --git a/Makefile b/Makefile
+index c2f0b7985b41..5d0ec13bb77d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
+index bbac42a78ce5..6a1117e481b1 100644
+--- a/arch/arm/boot/dts/exynos5250.dtsi
++++ b/arch/arm/boot/dts/exynos5250.dtsi
+@@ -556,7 +556,7 @@
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x10800000 0x1000>;
+ interrupts = <0 33 0>;
+- clocks = <&clock 271>;
++ clocks = <&clock 346>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 65ed63f68ef8..1f735aafd5ec 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -35,7 +35,13 @@
+ #include <asm/tls.h>
+ #include <asm/system_misc.h>
+
+-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
++static const char *handler[]= {
++ "prefetch abort",
++ "data abort",
++ "address exception",
++ "interrupt",
++ "undefined instruction",
++};
+
+ void *vectors_page;
+
+diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
+index 9ee78f7b4990..782f6c71fa0a 100644
+--- a/arch/arm/mach-footbridge/dc21285-timer.c
++++ b/arch/arm/mach-footbridge/dc21285-timer.c
+@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
+ void __init footbridge_timer_init(void)
+ {
+ struct clock_event_device *ce = &ckevt_dc21285;
++ unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
+
+- clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
++ clocksource_register_hz(&cksrc_dc21285, rate);
+
+ setup_irq(ce->irq, &footbridge_timer_irq);
+
+ ce->cpumask = cpumask_of(smp_processor_id());
+- clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
++ clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
+ }
+diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
+index 7f8f6076d360..07b91832bd2c 100644
+--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
++++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
+@@ -482,7 +482,7 @@ static struct platform_device lcdc0_device = {
+ .id = 0,
+ .dev = {
+ .platform_data = &lcdc0_info,
+- .coherent_dma_mask = ~0,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -579,7 +579,7 @@ static struct platform_device hdmi_lcdc_device = {
+ .id = 1,
+ .dev = {
+ .platform_data = &hdmi_lcdc_info,
+- .coherent_dma_mask = ~0,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
+index f1994968d303..5cc96d004b4f 100644
+--- a/arch/arm/mach-shmobile/board-kzm9g.c
++++ b/arch/arm/mach-shmobile/board-kzm9g.c
+@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = {
+ .resource = lcdc_resources,
+ .dev = {
+ .platform_data = &lcdc_info,
+- .coherent_dma_mask = ~0,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
+index af06753eb809..e721d2ccceae 100644
+--- a/arch/arm/mach-shmobile/board-mackerel.c
++++ b/arch/arm/mach-shmobile/board-mackerel.c
+@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = {
+ .resource = lcdc_resources,
+ .dev = {
+ .platform_data = &lcdc_info,
+- .coherent_dma_mask = ~0,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = {
+ .id = 1,
+ .dev = {
+ .platform_data = &hdmi_lcdc_info,
+- .coherent_dma_mask = ~0,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
+index 6d5ba9afb16a..3387e60e4ea3 100644
+--- a/arch/arm/mm/flush.c
++++ b/arch/arm/mm/flush.c
+@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
+ unsigned long i;
+ if (cache_is_vipt_nonaliasing()) {
+ for (i = 0; i < (1 << compound_order(page)); i++) {
+- void *addr = kmap_atomic(page);
++ void *addr = kmap_atomic(page + i);
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ }
+ } else {
+ for (i = 0; i < (1 << compound_order(page)); i++) {
+- void *addr = kmap_high_get(page);
++ void *addr = kmap_high_get(page + i);
+ if (addr) {
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+- kunmap_high(page);
++ kunmap_high(page + i);
+ }
+ }
+ }
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index f0e2784e7cca..2f9b751878ba 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
+ void mark_rodata_ro(void);
+ #endif
+
+-#ifdef CONFIG_PA8X00
+-/* Only pa8800, pa8900 needs this */
+-
+ #include <asm/kmap_types.h>
+
+ #define ARCH_HAS_KMAP
+
+-void kunmap_parisc(void *addr);
+-
+ static inline void *kmap(struct page *page)
+ {
+ might_sleep();
++ flush_dcache_page(page);
+ return page_address(page);
+ }
+
+ static inline void kunmap(struct page *page)
+ {
+- kunmap_parisc(page_address(page));
++ flush_kernel_dcache_page_addr(page_address(page));
+ }
+
+ static inline void *kmap_atomic(struct page *page)
+ {
+ pagefault_disable();
++ flush_dcache_page(page);
+ return page_address(page);
+ }
+
+ static inline void __kunmap_atomic(void *addr)
+ {
+- kunmap_parisc(addr);
++ flush_kernel_dcache_page_addr(addr);
+ pagefault_enable();
+ }
+
+ #define kmap_atomic_prot(page, prot) kmap_atomic(page)
+ #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
+ #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
+-#endif
+
+ #endif /* _PARISC_CACHEFLUSH_H */
+
+diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
+index b7adb2ac049c..c53fc63149e8 100644
+--- a/arch/parisc/include/asm/page.h
++++ b/arch/parisc/include/asm/page.h
+@@ -28,9 +28,8 @@ struct page;
+
+ void clear_page_asm(void *page);
+ void copy_page_asm(void *to, void *from);
+-void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
+-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+- struct page *pg);
++#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
++#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
+
+ /* #define CONFIG_PARISC_TMPALIAS */
+
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index c035673209f7..a72545554a31 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
+ }
+ EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+
+-void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
+-{
+- clear_page_asm(vto);
+- if (!parisc_requires_coherency())
+- flush_kernel_dcache_page_asm(vto);
+-}
+-EXPORT_SYMBOL(clear_user_page);
+-
+-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+- struct page *pg)
+-{
+- /* Copy using kernel mapping. No coherency is needed
+- (all in kmap/kunmap) on machines that don't support
+- non-equivalent aliasing. However, the `from' page
+- needs to be flushed before it can be accessed through
+- the kernel mapping. */
+- preempt_disable();
+- flush_dcache_page_asm(__pa(vfrom), vaddr);
+- preempt_enable();
+- copy_page_asm(vto, vfrom);
+- if (!parisc_requires_coherency())
+- flush_kernel_dcache_page_asm(vto);
+-}
+-EXPORT_SYMBOL(copy_user_page);
+-
+-#ifdef CONFIG_PA8X00
+-
+-void kunmap_parisc(void *addr)
+-{
+- if (parisc_requires_coherency())
+- flush_kernel_dcache_page_addr(addr);
+-}
+-EXPORT_SYMBOL(kunmap_parisc);
+-#endif
+-
+ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ {
+ unsigned long flags;
+diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
+index 4d0bda7b11e3..5be9f879957f 100644
+--- a/arch/x86/include/asm/fpu-internal.h
++++ b/arch/x86/include/asm/fpu-internal.h
+@@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
+ /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. "m" is a random variable that should be in L1 */
+- alternative_input(
+- ASM_NOP8 ASM_NOP2,
+- "emms\n\t" /* clear stack tags */
+- "fildl %P[addr]", /* set F?P to defined value */
+- X86_FEATURE_FXSAVE_LEAK,
+- [addr] "m" (tsk->thread.fpu.has_fpu));
++ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
++ asm volatile(
++ "fnclex\n\t"
++ "emms\n\t"
++ "fildl %P[addr]" /* set F?P to defined value */
++ : : [addr] "m" (tsk->thread.fpu.has_fpu));
++ }
+
+ return fpu_restore_checking(&tsk->thread.fpu);
+ }
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 2c9958cd7a43..ffa5af4c221a 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -68,6 +68,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
+ MODULE_DESCRIPTION("ACPI Battery Driver");
+ MODULE_LICENSE("GPL");
+
++static int battery_bix_broken_package;
+ static unsigned int cache_time = 1000;
+ module_param(cache_time, uint, 0644);
+ MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+@@ -443,7 +444,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
+ return -ENODEV;
+ }
+- if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
++
++ if (battery_bix_broken_package)
++ result = extract_package(battery, buffer.pointer,
++ extended_info_offsets + 1,
++ ARRAY_SIZE(extended_info_offsets) - 1);
++ else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
+ result = extract_package(battery, buffer.pointer,
+ extended_info_offsets,
+ ARRAY_SIZE(extended_info_offsets));
+@@ -1054,6 +1060,17 @@ static int battery_notify(struct notifier_block *nb,
+ return 0;
+ }
+
++static struct dmi_system_id bat_dmi_table[] = {
++ {
++ .ident = "NEC LZ750/LS",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
++ },
++ },
++ {},
++};
++
+ static int acpi_battery_add(struct acpi_device *device)
+ {
+ int result = 0;
+@@ -1163,6 +1180,8 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
+ if (!acpi_battery_dir)
+ return;
+ #endif
++ if (dmi_check_system(bat_dmi_table))
++ battery_bix_broken_package = 1;
+ if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_battery_dir(acpi_battery_dir);
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 14f1e9506338..cfc6073c0487 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
++ PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
+index 2168d15bc728..57a818b2b5f2 100644
+--- a/drivers/char/tpm/tpm_ppi.c
++++ b/drivers/char/tpm/tpm_ppi.c
+@@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
+ static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
+ void **return_value)
+ {
+- acpi_status status;
++ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+- status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+- if (strstr(buffer.pointer, context) != NULL) {
+- *return_value = handle;
++
++ if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
++ if (strstr(buffer.pointer, context) != NULL) {
++ *return_value = handle;
++ status = AE_CTRL_TERMINATE;
++ }
+ kfree(buffer.pointer);
+- return AE_CTRL_TERMINATE;
+ }
+- return AE_OK;
++
++ return status;
+ }
+
+ static inline void ppi_assign_params(union acpi_object params[4],
+diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
+index 8d3009e44fba..5543b7df8e16 100644
+--- a/drivers/clk/clk-divider.c
++++ b/drivers/clk/clk-divider.c
+@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
+ return 0;
+ }
+
+-static unsigned int _get_val(struct clk_divider *divider, u8 div)
++static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
+ {
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div;
+diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
+index ad5ff50c5f28..1a7c1b929c69 100644
+--- a/drivers/clk/samsung/clk-exynos4.c
++++ b/drivers/clk/samsung/clk-exynos4.c
+@@ -39,7 +39,7 @@
+ #define SRC_TOP1 0xc214
+ #define SRC_CAM 0xc220
+ #define SRC_TV 0xc224
+-#define SRC_MFC 0xcc28
++#define SRC_MFC 0xc228
+ #define SRC_G3D 0xc22c
+ #define E4210_SRC_IMAGE 0xc230
+ #define SRC_LCD0 0xc234
+diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
+index adf32343c9f9..e52359cf9b6f 100644
+--- a/drivers/clk/samsung/clk-exynos5250.c
++++ b/drivers/clk/samsung/clk-exynos5250.c
+@@ -25,6 +25,7 @@
+ #define MPLL_LOCK 0x4000
+ #define MPLL_CON0 0x4100
+ #define SRC_CORE1 0x4204
++#define GATE_IP_ACP 0x8800
+ #define CPLL_LOCK 0x10020
+ #define EPLL_LOCK 0x10030
+ #define VPLL_LOCK 0x10040
+@@ -75,7 +76,6 @@
+ #define SRC_CDREX 0x20200
+ #define PLL_DIV2_SEL 0x20a24
+ #define GATE_IP_DISP1 0x10928
+-#define GATE_IP_ACP 0x10000
+
+ /* list of PLLs to be registered */
+ enum exynos5250_plls {
+@@ -120,7 +120,8 @@ enum exynos5250_clks {
+ spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
+ hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
+ tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
+- wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
++ wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
++ smmu_mdma0,
+
+ /* mux clocks */
+ mout_hdmi = 1024,
+@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
+ GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
+ GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
+ GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
+- GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
+- GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
++ GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
++ GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
+ GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
+ GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
+ GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
+@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
+ GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
+ GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
+ GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
+- GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
++ GATE(sysreg, "sysreg", "aclk66",
++ GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
+ GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
+@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
+ GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
+ GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
+ GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
++ GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
++ GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
+ };
+
+ static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 99d8ab548a34..d5dc567efd96 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -518,7 +518,8 @@ static void intel_pstate_timer_func(unsigned long __data)
+ }
+
+ #define ICPU(model, policy) \
+- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
++ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
++ (unsigned long)&policy }
+
+ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ ICPU(0x2a, default_policy),
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+index 57cda2a1437b..3dc7a997b795 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
+ u16 offset = nv_ro16(bios, init->offset + 1);
+
+ trace("JUMP\t0x%04x\n", offset);
+- init->offset = offset;
++
++ if (init_exec(init))
++ init->offset = offset;
++ else
++ init->offset += 3;
+ }
+
+ /**
+diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
+index 05188351711d..a97263e902ff 100644
+--- a/drivers/leds/leds-lp5521.c
++++ b/drivers/leds/leds-lp5521.c
+@@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
+ if (i % 2)
+ goto err;
+
+- mutex_lock(&chip->lock);
+-
+ for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
+ ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
+- if (ret) {
+- mutex_unlock(&chip->lock);
++ if (ret)
+ return -EINVAL;
+- }
+ }
+
+- mutex_unlock(&chip->lock);
+-
+ return size;
+
+ err:
+@@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev,
+ {
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
++ int ret;
+
+ mutex_lock(&chip->lock);
+
+ chip->engine_idx = nr;
+ lp5521_load_engine(chip);
++ ret = lp5521_update_program_memory(chip, buf, len);
+
+ mutex_unlock(&chip->lock);
+
+- return lp5521_update_program_memory(chip, buf, len);
++ return ret;
+ }
+ store_load(1)
+ store_load(2)
+diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
+index fe3bcbb5747f..f382fc1e57a0 100644
+--- a/drivers/leds/leds-lp5523.c
++++ b/drivers/leds/leds-lp5523.c
+@@ -336,18 +336,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
+ if (i % 2)
+ goto err;
+
+- mutex_lock(&chip->lock);
+-
+ for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
+ ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
+- if (ret) {
+- mutex_unlock(&chip->lock);
++ if (ret)
+ return -EINVAL;
+- }
+ }
+
+- mutex_unlock(&chip->lock);
+-
+ return size;
+
+ err:
+@@ -547,15 +541,17 @@ static ssize_t store_engine_load(struct device *dev,
+ {
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
++ int ret;
+
+ mutex_lock(&chip->lock);
+
+ chip->engine_idx = nr;
+ lp5523_load_engine_and_select_page(chip);
++ ret = lp5523_update_program_memory(chip, buf, len);
+
+ mutex_unlock(&chip->lock);
+
+- return lp5523_update_program_memory(chip, buf, len);
++ return ret;
+ }
+ store_load(1)
+ store_load(2)
+diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
+index e6ae7720f9e1..6ed83feb0c52 100644
+--- a/drivers/mfd/rtsx_pcr.c
++++ b/drivers/mfd/rtsx_pcr.c
+@@ -1230,8 +1230,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
+
+ pcr->remove_pci = true;
+
+- cancel_delayed_work(&pcr->carddet_work);
+- cancel_delayed_work(&pcr->idle_work);
++ /* Disable interrupts at the pcr level */
++ spin_lock_irq(&pcr->lock);
++ rtsx_pci_writel(pcr, RTSX_BIER, 0);
++ pcr->bier = 0;
++ spin_unlock_irq(&pcr->lock);
++
++ cancel_delayed_work_sync(&pcr->carddet_work);
++ cancel_delayed_work_sync(&pcr->idle_work);
+
+ mfd_remove_devices(&pcidev->dev);
+
+diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
+index 9e1601487263..4fc96d6c6ac0 100644
+--- a/drivers/net/ethernet/arc/emac_main.c
++++ b/drivers/net/ethernet/arc/emac_main.c
+@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
+ /* Make sure pointer to data buffer is set */
+ wmb();
+
++ skb_tx_timestamp(skb);
++
+ *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
+
+ /* Increment index to point to the next BD */
+@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
+
+ arc_reg_set(priv, R_STATUS, TXPL_MASK);
+
+- skb_tx_timestamp(skb);
+-
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index a82229fe1c7f..3ff1f272c6c8 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -16485,6 +16485,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
+ /* Clear this out for sanity. */
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
++ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
++ tw32(TG3PCI_REG_BASE_ADDR, 0);
++
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index b2793b91cc55..63090c0ddeb9 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -425,6 +425,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ /* If this was the last BD in the ring, start at the beginning again. */
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+
++ skb_tx_timestamp(skb);
++
+ fep->cur_tx = bdp;
+
+ if (fep->cur_tx == fep->dirty_tx)
+@@ -433,8 +435,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+
+- skb_tx_timestamp(skb);
+-
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+index 2d1c6bdd3618..7628e0fd8455 100644
+--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
+- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
++ dev->features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index 07c9bc4c61bc..f59a0b6f1ae5 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx)
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) +
+- NET_IP_ALIGN + efx->rx_dma_len);
++ efx->rx_ip_align + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+@@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx)
+ WARN_ON(channel->rx_pkt_n_frags);
+ }
+
++ efx_ptp_start_datapath(efx);
++
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+ }
+@@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
++ efx_ptp_stop_datapath(efx);
++
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+@@ -2550,6 +2554,8 @@ static int efx_init_struct(struct efx_nic *efx,
+
+ efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
++ efx->rx_ip_align =
++ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ spin_lock_init(&efx->stats_lock);
+diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
+index 366c8e3e3784..4b0bd8a1514d 100644
+--- a/drivers/net/ethernet/sfc/mcdi.c
++++ b/drivers/net/ethernet/sfc/mcdi.c
+@@ -50,6 +50,7 @@ struct efx_mcdi_async_param {
+ static void efx_mcdi_timeout_async(unsigned long context);
+ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached_out);
++static bool efx_mcdi_poll_once(struct efx_nic *efx);
+
+ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
+ {
+@@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
+ }
+ }
+
++static bool efx_mcdi_poll_once(struct efx_nic *efx)
++{
++ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
++
++ rmb();
++ if (!efx->type->mcdi_poll_response(efx))
++ return false;
++
++ spin_lock_bh(&mcdi->iface_lock);
++ efx_mcdi_read_response_header(efx);
++ spin_unlock_bh(&mcdi->iface_lock);
++
++ return true;
++}
++
+ static int efx_mcdi_poll(struct efx_nic *efx)
+ {
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+@@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
+
+ time = jiffies;
+
+- rmb();
+- if (efx->type->mcdi_poll_response(efx))
++ if (efx_mcdi_poll_once(efx))
+ break;
+
+ if (time_after(time, finish))
+ return -ETIMEDOUT;
+ }
+
+- spin_lock_bh(&mcdi->iface_lock);
+- efx_mcdi_read_response_header(efx);
+- spin_unlock_bh(&mcdi->iface_lock);
+-
+ /* Return rc=0 like wait_event_timeout() */
+ return 0;
+ }
+@@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
++ netif_err(efx, hw, efx->net_dev,
++ "MC command 0x%x inlen %d mode %d timed out\n",
++ cmd, (int)inlen, mcdi->mode);
++
++ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
++ netif_err(efx, hw, efx->net_dev,
++ "MCDI request was completed without an event\n");
++ rc = 0;
++ }
++
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+@@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
++ }
+
+- netif_err(efx, hw, efx->net_dev,
+- "MC command 0x%x inlen %d mode %d timed out\n",
+- cmd, (int)inlen, mcdi->mode);
+- } else {
++ if (rc == 0) {
+ size_t hdr_len, data_len;
+
+ /* At the very least we need a memory barrier here to ensure
+diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
+index b172ed133055..2aeb45167511 100644
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -673,6 +673,8 @@ struct vfdi_status;
+ * @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
++ * @rx_ip_align: RX DMA address offset to have IP header aligned in
++ * in accordance with NET_IP_ALIGN
+ * @rx_dma_len: Current maximum RX DMA length
+ * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
+@@ -806,6 +808,7 @@ struct efx_nic {
+ unsigned rss_spread;
+ unsigned tx_channel_offset;
+ unsigned n_tx_channels;
++ unsigned int rx_ip_align;
+ unsigned int rx_dma_len;
+ unsigned int rx_buffer_order;
+ unsigned int rx_buffer_truesize;
+diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
+index 890bbbe8320e..3d713b518847 100644
+--- a/drivers/net/ethernet/sfc/nic.h
++++ b/drivers/net/ethernet/sfc/nic.h
+@@ -528,6 +528,8 @@ extern void efx_ptp_get_ts_info(struct efx_nic *efx,
+ extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+ extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
++void efx_ptp_start_datapath(struct efx_nic *efx);
++void efx_ptp_stop_datapath(struct efx_nic *efx);
+
+ extern const struct efx_nic_type falcon_a1_nic_type;
+ extern const struct efx_nic_type falcon_b0_nic_type;
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
+index 03acf57df045..3dd39dcfe36b 100644
+--- a/drivers/net/ethernet/sfc/ptp.c
++++ b/drivers/net/ethernet/sfc/ptp.c
+@@ -220,6 +220,7 @@ struct efx_ptp_timeset {
+ * @evt_list: List of MC receive events awaiting packets
+ * @evt_free_list: List of free events
+ * @evt_lock: Lock for manipulating evt_list and evt_free_list
++ * @evt_overflow: Boolean indicating that event list has overflowed
+ * @rx_evts: Instantiated events (on evt_list and evt_free_list)
+ * @workwq: Work queue for processing pending PTP operations
+ * @work: Work task
+@@ -270,6 +271,7 @@ struct efx_ptp_data {
+ struct list_head evt_list;
+ struct list_head evt_free_list;
+ spinlock_t evt_lock;
++ bool evt_overflow;
+ struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
+ struct workqueue_struct *workwq;
+ struct work_struct work;
+@@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
+ }
+ }
+ }
++ /* If the event overflow flag is set and the event list is now empty
++ * clear the flag to re-enable the overflow warning message.
++ */
++ if (ptp->evt_overflow && list_empty(&ptp->evt_list))
++ ptp->evt_overflow = false;
+ spin_unlock_bh(&ptp->evt_lock);
+ }
+
+@@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
+ break;
+ }
+ }
++ /* If the event overflow flag is set and the event list is now empty
++ * clear the flag to re-enable the overflow warning message.
++ */
++ if (ptp->evt_overflow && list_empty(&ptp->evt_list))
++ ptp->evt_overflow = false;
+ spin_unlock_bh(&ptp->evt_lock);
+
+ return rc;
+@@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+ __skb_queue_tail(q, skb);
+ } else if (time_after(jiffies, match->expiry)) {
+ match->state = PTP_PACKET_STATE_TIMED_OUT;
+- netif_warn(efx, rx_err, efx->net_dev,
+- "PTP packet - no timestamp seen\n");
++ if (net_ratelimit())
++ netif_warn(efx, rx_err, efx->net_dev,
++ "PTP packet - no timestamp seen\n");
+ __skb_queue_tail(q, skb);
+ } else {
+ /* Replace unprocessed entry and stop */
+@@ -788,9 +801,14 @@ fail:
+ static int efx_ptp_stop(struct efx_nic *efx)
+ {
+ struct efx_ptp_data *ptp = efx->ptp_data;
+- int rc = efx_ptp_disable(efx);
+ struct list_head *cursor;
+ struct list_head *next;
++ int rc;
++
++ if (ptp == NULL)
++ return 0;
++
++ rc = efx_ptp_disable(efx);
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+@@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx)
+ list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
+ list_move(cursor, &efx->ptp_data->evt_free_list);
+ }
++ ptp->evt_overflow = false;
+ spin_unlock_bh(&efx->ptp_data->evt_lock);
+
+ return rc;
+ }
+
++static int efx_ptp_restart(struct efx_nic *efx)
++{
++ if (efx->ptp_data && efx->ptp_data->enabled)
++ return efx_ptp_start(efx);
++ return 0;
++}
++
+ static void efx_ptp_pps_worker(struct work_struct *work)
+ {
+ struct efx_ptp_data *ptp =
+@@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
+ spin_lock_init(&ptp->evt_lock);
+ for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
+ list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
++ ptp->evt_overflow = false;
+
+ ptp->phc_clock_info.owner = THIS_MODULE;
+ snprintf(ptp->phc_clock_info.name,
+@@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+ skb->len >= PTP_MIN_LENGTH &&
+ skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
+ likely(skb->protocol == htons(ETH_P_IP)) &&
++ skb_transport_header_was_set(skb) &&
++ skb_network_header_len(skb) >= sizeof(struct iphdr) &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP &&
++ skb_headlen(skb) >=
++ skb_transport_offset(skb) + sizeof(struct udphdr) &&
+ udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
+ }
+
+@@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ {
+ if ((enable_wanted != efx->ptp_data->enabled) ||
+ (enable_wanted && (efx->ptp_data->mode != new_mode))) {
+- int rc;
++ int rc = 0;
+
+ if (enable_wanted) {
+ /* Change of mode requires disable */
+@@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ * succeed.
+ */
+ efx->ptp_data->mode = new_mode;
+- rc = efx_ptp_start(efx);
++ if (netif_running(efx->net_dev))
++ rc = efx_ptp_start(efx);
+ if (rc == 0) {
+ rc = efx_ptp_synchronize(efx,
+ PTP_SYNC_ATTEMPTS * 2);
+@@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
+ list_add_tail(&evt->link, &ptp->evt_list);
+
+ queue_work(ptp->workwq, &ptp->work);
+- } else {
+- netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
++ } else if (!ptp->evt_overflow) {
++ /* Log a warning message and set the event overflow flag.
++ * The message won't be logged again until the event queue
++ * becomes empty.
++ */
++ netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
++ ptp->evt_overflow = true;
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+ }
+@@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+ if (rc != 0)
+ return rc;
+
+- ptp_data->current_adjfreq = delta;
++ ptp_data->current_adjfreq = adjustment_ns;
+ return 0;
+ }
+
+@@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+- MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
++ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+ return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+@@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx)
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
+ &efx_ptp_channel_type;
+ }
++
++void efx_ptp_start_datapath(struct efx_nic *efx)
++{
++ if (efx_ptp_restart(efx))
++ netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
++}
++
++void efx_ptp_stop_datapath(struct efx_nic *efx)
++{
++ efx_ptp_stop(efx);
++}
+diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
+index 4a596725023f..f18d5864bfa7 100644
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -93,7 +93,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+
+ void efx_rx_config_page_split(struct efx_nic *efx)
+ {
+- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
++ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
+ EFX_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+@@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+- rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
++ rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
+ rx_buf->page = page;
+- rx_buf->page_offset = page_offset + NET_IP_ALIGN;
++ rx_buf->page_offset = page_offset + efx->rx_ip_align;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
+index 571452e786d5..61a1540f1347 100644
+--- a/drivers/net/ethernet/tehuti/tehuti.c
++++ b/drivers/net/ethernet/tehuti/tehuti.c
+@@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
+ | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
+- /*| NETIF_F_FRAGLIST */
+ ;
+ ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 0029148077a9..def50bd53124 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1016,7 +1016,7 @@ static int temac_of_probe(struct platform_device *op)
+ platform_set_drvdata(op, ndev);
+ SET_NETDEV_DEV(ndev, &op->dev);
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
++ ndev->features = NETIF_F_SG;
+ ndev->netdev_ops = &temac_netdev_ops;
+ ndev->ethtool_ops = &temac_ethtool_ops;
+ #if 0
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index b2ff038d6d20..f9293da19e26 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op)
+
+ SET_NETDEV_DEV(ndev, &op->dev);
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
++ ndev->features = NETIF_F_SG;
+ ndev->netdev_ops = &axienet_netdev_ops;
+ ndev->ethtool_ops = &axienet_ethtool_ops;
+
+diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
+index 3169252613fa..5d78c1d08abd 100644
+--- a/drivers/net/hamradio/hdlcdrv.c
++++ b/drivers/net/hamradio/hdlcdrv.c
+@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ case HDLCDRVCTL_CALIBRATE:
+ if(!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
++ return -EINVAL;
+ s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
+ return 0;
+
+diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
+index 5af1c3e5032a..b7e967540509 100644
+--- a/drivers/net/hamradio/yam.c
++++ b/drivers/net/hamradio/yam.c
+@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ break;
+
+ case SIOCYAMGCFG:
++ memset(&yi, 0, sizeof(yi));
+ yi.cfg.mask = 0xffffffff;
+ yi.cfg.iobase = yp->iobase;
+ yi.cfg.irq = yp->irq;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 524f713f6017..f8135725bcf6 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -327,7 +327,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
+ return -EINVAL;
+
+ nvdev->start_remove = true;
+- cancel_delayed_work_sync(&ndevctx->dwork);
+ cancel_work_sync(&ndevctx->work);
+ netif_tx_disable(ndev);
+ rndis_filter_device_remove(hdev);
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index dc76670c2f2a..5895e4dbbf2a 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -767,11 +767,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ const struct sk_buff *skb,
+ const struct iovec *iv, int len)
+ {
+- struct macvlan_dev *vlan;
+ int ret;
+ int vnet_hdr_len = 0;
+ int vlan_offset = 0;
+- int copied;
++ int copied, total;
+
+ if (q->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr vnet_hdr;
+@@ -786,7 +785,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
+ return -EFAULT;
+ }
+- copied = vnet_hdr_len;
++ total = copied = vnet_hdr_len;
++ total += skb->len;
+
+ if (!vlan_tx_tag_present(skb))
+ len = min_t(int, skb->len, len);
+@@ -801,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+
+ vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ len = min_t(int, skb->len + VLAN_HLEN, len);
++ total += VLAN_HLEN;
+
+ copy = min_t(int, vlan_offset, len);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
+@@ -818,19 +819,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ }
+
+ ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+- copied += len;
+
+ done:
+- rcu_read_lock();
+- vlan = rcu_dereference(q->vlan);
+- if (vlan) {
+- preempt_disable();
+- macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
+- preempt_enable();
+- }
+- rcu_read_unlock();
+-
+- return ret ? ret : copied;
++ return ret ? ret : total;
+ }
+
+ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+@@ -885,7 +876,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ }
+
+ ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
+- ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
++ ret = min_t(ssize_t, ret, len);
++ if (ret > 0)
++ iocb->ki_pos = ret;
+ out:
+ return ret;
+ }
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 782e38bfc1ee..7c8343a4f918 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1184,7 +1184,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ {
+ struct tun_pi pi = { 0, skb->protocol };
+ ssize_t total = 0;
+- int vlan_offset = 0;
++ int vlan_offset = 0, copied;
+
+ if (!(tun->flags & TUN_NO_PI)) {
+ if ((len -= sizeof(pi)) < 0)
+@@ -1248,6 +1248,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ total += tun->vnet_hdr_sz;
+ }
+
++ copied = total;
++ total += skb->len;
+ if (!vlan_tx_tag_present(skb)) {
+ len = min_t(int, skb->len, len);
+ } else {
+@@ -1262,24 +1264,24 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+
+ vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ len = min_t(int, skb->len + VLAN_HLEN, len);
++ total += VLAN_HLEN;
+
+ copy = min_t(int, vlan_offset, len);
+- ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
++ ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
+ len -= copy;
+- total += copy;
++ copied += copy;
+ if (ret || !len)
+ goto done;
+
+ copy = min_t(int, sizeof(veth), len);
+- ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
++ ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
+ len -= copy;
+- total += copy;
++ copied += copy;
+ if (ret || !len)
+ goto done;
+ }
+
+- skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
+- total += len;
++ skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+
+ done:
+ tun->dev->stats.tx_packets++;
+@@ -1356,6 +1358,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ ret = tun_do_read(tun, tfile, iocb, iv, len,
+ file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len);
++ if (ret > 0)
++ iocb->ki_pos = ret;
+ out:
+ tun_put(tun);
+ return ret;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index bbc9cb84ec1f..8065066a6230 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -297,26 +297,61 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
+ return skb;
+ }
+
+-static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
++static struct sk_buff *receive_small(void *buf, unsigned int len)
+ {
+- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+- struct page *page;
+- int num_buf, i, len;
++ struct sk_buff * skb = buf;
++
++ len -= sizeof(struct virtio_net_hdr);
++ skb_trim(skb, len);
++
++ return skb;
++}
++
++static struct sk_buff *receive_big(struct net_device *dev,
++ struct receive_queue *rq,
++ void *buf)
++{
++ struct page *page = buf;
++ struct sk_buff *skb = page_to_skb(rq, page, 0);
++
++ if (unlikely(!skb))
++ goto err;
++
++ return skb;
++
++err:
++ dev->stats.rx_dropped++;
++ give_pages(rq, page);
++ return NULL;
++}
++
++static struct sk_buff *receive_mergeable(struct net_device *dev,
++ struct receive_queue *rq,
++ void *buf,
++ unsigned int len)
++{
++ struct skb_vnet_hdr *hdr = page_address(buf);
++ int num_buf = hdr->mhdr.num_buffers;
++ struct page *page = buf;
++ struct sk_buff *skb = page_to_skb(rq, page, len);
++ int i;
++
++ if (unlikely(!skb))
++ goto err_skb;
+
+- num_buf = hdr->mhdr.num_buffers;
+ while (--num_buf) {
+ i = skb_shinfo(skb)->nr_frags;
+ if (i >= MAX_SKB_FRAGS) {
+ pr_debug("%s: packet too long\n", skb->dev->name);
+ skb->dev->stats.rx_length_errors++;
+- return -EINVAL;
++ goto err_frags;
+ }
+ page = virtqueue_get_buf(rq->vq, &len);
+ if (!page) {
+- pr_debug("%s: rx error: %d buffers missing\n",
+- skb->dev->name, hdr->mhdr.num_buffers);
+- skb->dev->stats.rx_length_errors++;
+- return -EINVAL;
++ pr_debug("%s: rx error: %d buffers %d missing\n",
++ dev->name, hdr->mhdr.num_buffers, num_buf);
++ dev->stats.rx_length_errors++;
++ goto err_buf;
+ }
+
+ if (len > PAGE_SIZE)
+@@ -326,7 +361,26 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
+
+ --rq->num;
+ }
+- return 0;
++ return skb;
++err_skb:
++ give_pages(rq, page);
++ while (--num_buf) {
++err_frags:
++ buf = virtqueue_get_buf(rq->vq, &len);
++ if (unlikely(!buf)) {
++ pr_debug("%s: rx error: %d buffers missing\n",
++ dev->name, num_buf);
++ dev->stats.rx_length_errors++;
++ break;
++ }
++ page = buf;
++ give_pages(rq, page);
++ --rq->num;
++ }
++err_buf:
++ dev->stats.rx_dropped++;
++ dev_kfree_skb(skb);
++ return NULL;
+ }
+
+ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
+@@ -335,7 +389,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
+ struct net_device *dev = vi->dev;
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
+ struct sk_buff *skb;
+- struct page *page;
+ struct skb_vnet_hdr *hdr;
+
+ if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
+@@ -347,25 +400,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
+ dev_kfree_skb(buf);
+ return;
+ }
++ if (vi->mergeable_rx_bufs)
++ skb = receive_mergeable(dev, rq, buf, len);
++ else if (vi->big_packets)
++ skb = receive_big(dev, rq, buf);
++ else
++ skb = receive_small(buf, len);
+
+- if (!vi->mergeable_rx_bufs && !vi->big_packets) {
+- skb = buf;
+- len -= sizeof(struct virtio_net_hdr);
+- skb_trim(skb, len);
+- } else {
+- page = buf;
+- skb = page_to_skb(rq, page, len);
+- if (unlikely(!skb)) {
+- dev->stats.rx_dropped++;
+- give_pages(rq, page);
+- return;
+- }
+- if (vi->mergeable_rx_bufs)
+- if (receive_mergeable(rq, skb)) {
+- dev_kfree_skb(skb);
+- return;
+- }
+- }
++ if (unlikely(!skb))
++ return;
+
+ hdr = skb_vnet_hdr(skb);
+
+@@ -1307,6 +1350,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
+
+ static void virtnet_free_queues(struct virtnet_info *vi)
+ {
++ int i;
++
++ for (i = 0; i < vi->max_queue_pairs; i++)
++ netif_napi_del(&vi->rq[i].napi);
++
+ kfree(vi->rq);
+ kfree(vi->sq);
+ }
+@@ -1724,16 +1772,17 @@ static int virtnet_restore(struct virtio_device *vdev)
+ if (err)
+ return err;
+
+- if (netif_running(vi->dev))
++ if (netif_running(vi->dev)) {
++ for (i = 0; i < vi->curr_queue_pairs; i++)
++ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
++ schedule_delayed_work(&vi->refill, 0);
++
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_enable(&vi->rq[i]);
++ }
+
+ netif_device_attach(vi->dev);
+
+- for (i = 0; i < vi->curr_queue_pairs; i++)
+- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+- schedule_delayed_work(&vi->refill, 0);
+-
+ mutex_lock(&vi->config_lock);
+ vi->config_enable = true;
+ mutex_unlock(&vi->config_lock);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 2ef5b6219f3f..146236891889 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1672,7 +1672,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ netdev_dbg(dev, "circular route to %pI4\n",
+ &dst->sin.sin_addr.s_addr);
+ dev->stats.collisions++;
+- goto tx_error;
++ goto rt_tx_error;
+ }
+
+ /* Bypass encapsulation if the destination is local */
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 2634d691ec17..dbc024bd4adf 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2645,13 +2645,16 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+ }
+
+ if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
++ /* too large values might cause issues with arcmsr */
++ int vpd_buf_len = 64;
++
+ sdev->no_report_opcodes = 1;
+
+ /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
+ * CODES is unsupported and the device has an ATA
+ * Information VPD page (SAT).
+ */
+- if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
++ if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
+ sdev->no_write_same = 1;
+ }
+
+diff --git a/include/linux/net.h b/include/linux/net.h
+index 8bd9d926b3cf..41103f84527e 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -180,7 +180,7 @@ struct proto_ops {
+ int offset, size_t size, int flags);
+ ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len, unsigned int flags);
+- void (*set_peek_off)(struct sock *sk, int val);
++ int (*set_peek_off)(struct sock *sk, int val);
+ };
+
+ #define DECLARE_SOCKADDR(type, dst, src) \
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 25f5d2d11e7c..21eae43348fb 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1872,6 +1872,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
+ return dev->header_ops->parse(skb, haddr);
+ }
+
++static inline int dev_rebuild_header(struct sk_buff *skb)
++{
++ const struct net_device *dev = skb->dev;
++
++ if (!dev->header_ops || !dev->header_ops->rebuild)
++ return 0;
++ return dev->header_ops->rebuild(skb);
++}
++
+ typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+ extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+ static inline int unregister_gifconf(unsigned int family)
+@@ -2945,6 +2954,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
+ dev->gso_max_size = size;
+ }
+
++static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
++ int pulled_hlen, u16 mac_offset,
++ int mac_len)
++{
++ skb->protocol = protocol;
++ skb->encapsulation = 1;
++ skb_push(skb, pulled_hlen);
++ skb_reset_transport_header(skb);
++ skb->mac_header = mac_offset;
++ skb->network_header = skb->mac_header + mac_len;
++ skb->mac_len = mac_len;
++}
++
+ static inline bool netif_is_bond_master(struct net_device *dev)
+ {
+ return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f66f346dd164..efa1649a822a 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+ skb->mac_header += offset;
+ }
+
++static inline void skb_pop_mac_header(struct sk_buff *skb)
++{
++ skb->mac_header = skb->network_header;
++}
++
+ static inline void skb_probe_transport_header(struct sk_buff *skb,
+ const int offset_hint)
+ {
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 5ac63c9a995a..ceae65e69a64 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7277,7 +7277,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
+
+ runtime_enabled = quota != RUNTIME_INF;
+ runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
+- account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
++ /*
++ * If we need to toggle cfs_bandwidth_used, off->on must occur
++ * before making related changes, and on->off must occur afterwards
++ */
++ if (runtime_enabled && !runtime_was_enabled)
++ cfs_bandwidth_usage_inc();
+ raw_spin_lock_irq(&cfs_b->lock);
+ cfs_b->period = ns_to_ktime(period);
+ cfs_b->quota = quota;
+@@ -7303,6 +7308,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
+ unthrottle_cfs_rq(cfs_rq);
+ raw_spin_unlock_irq(&rq->lock);
+ }
++ if (runtime_was_enabled && !runtime_enabled)
++ cfs_bandwidth_usage_dec();
+ out_unlock:
+ mutex_unlock(&cfs_constraints_mutex);
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 7765ad82736a..411732334906 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2077,13 +2077,14 @@ static inline bool cfs_bandwidth_used(void)
+ return static_key_false(&__cfs_bandwidth_used);
+ }
+
+-void account_cfs_bandwidth_used(int enabled, int was_enabled)
++void cfs_bandwidth_usage_inc(void)
+ {
+- /* only need to count groups transitioning between enabled/!enabled */
+- if (enabled && !was_enabled)
+- static_key_slow_inc(&__cfs_bandwidth_used);
+- else if (!enabled && was_enabled)
+- static_key_slow_dec(&__cfs_bandwidth_used);
++ static_key_slow_inc(&__cfs_bandwidth_used);
++}
++
++void cfs_bandwidth_usage_dec(void)
++{
++ static_key_slow_dec(&__cfs_bandwidth_used);
+ }
+ #else /* HAVE_JUMP_LABEL */
+ static bool cfs_bandwidth_used(void)
+@@ -2091,7 +2092,8 @@ static bool cfs_bandwidth_used(void)
+ return true;
+ }
+
+-void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
++void cfs_bandwidth_usage_inc(void) {}
++void cfs_bandwidth_usage_dec(void) {}
+ #endif /* HAVE_JUMP_LABEL */
+
+ /*
+@@ -2457,6 +2459,13 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
+ if (idle)
+ goto out_unlock;
+
++ /*
++ * if we have relooped after returning idle once, we need to update our
++ * status as actually running, so that other cpus doing
++ * __start_cfs_bandwidth will stop trying to cancel us.
++ */
++ cfs_b->timer_active = 1;
++
+ __refill_cfs_bandwidth_runtime(cfs_b);
+
+ if (!throttled) {
+@@ -2517,7 +2526,13 @@ static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
+ /* how long we wait to gather additional slack before distributing */
+ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
+
+-/* are we near the end of the current quota period? */
++/*
++ * Are we near the end of the current quota period?
++ *
++ * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
++ * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
++ * migrate_hrtimers, base is never cleared, so we are fine.
++ */
+ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
+ {
+ struct hrtimer *refresh_timer = &cfs_b->period_timer;
+@@ -2593,10 +2608,12 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
+ u64 expires;
+
+ /* confirm we're still not at a refresh boundary */
+- if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
++ raw_spin_lock(&cfs_b->lock);
++ if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
++ raw_spin_unlock(&cfs_b->lock);
+ return;
++ }
+
+- raw_spin_lock(&cfs_b->lock);
+ if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
+ runtime = cfs_b->runtime;
+ cfs_b->runtime = 0;
+@@ -2717,11 +2734,11 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+ * (timer_active==0 becomes visible before the hrtimer call-back
+ * terminates). In either case we ensure that it's re-programmed
+ */
+- while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
++ while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
++ hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
++ /* bounce the lock to allow do_sched_cfs_period_timer to run */
+ raw_spin_unlock(&cfs_b->lock);
+- /* ensure cfs_b->lock is available while we wait */
+- hrtimer_cancel(&cfs_b->period_timer);
+-
++ cpu_relax();
+ raw_spin_lock(&cfs_b->lock);
+ /* if someone else restarted the timer then we're done */
+ if (cfs_b->timer_active)
+@@ -6223,7 +6240,8 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
+ se->cfs_rq = parent->my_q;
+
+ se->my_q = cfs_rq;
+- update_load_set(&se->load, 0);
++ /* guarantee group entities always have weight */
++ update_load_set(&se->load, NICE_0_LOAD);
+ se->parent = parent;
+ }
+
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index b3c5653e1dca..a6208afd80e7 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1305,7 +1305,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
+ extern void init_cfs_rq(struct cfs_rq *cfs_rq);
+ extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
+
+-extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
++extern void cfs_bandwidth_usage_inc(void);
++extern void cfs_bandwidth_usage_dec(void);
+
+ #ifdef CONFIG_NO_HZ_COMMON
+ enum rq_nohz_flag_bits {
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 09bf1c38805b..edf44d079da7 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -549,6 +549,23 @@ static const struct header_ops vlan_header_ops = {
+ .parse = eth_header_parse,
+ };
+
++static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
++ unsigned short type,
++ const void *daddr, const void *saddr,
++ unsigned int len)
++{
++ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
++ struct net_device *real_dev = vlan->real_dev;
++
++ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
++}
++
++static const struct header_ops vlan_passthru_header_ops = {
++ .create = vlan_passthru_hard_header,
++ .rebuild = dev_rebuild_header,
++ .parse = eth_header_parse,
++};
++
+ static struct device_type vlan_type = {
+ .name = "vlan",
+ };
+@@ -592,7 +609,7 @@ static int vlan_dev_init(struct net_device *dev)
+
+ dev->needed_headroom = real_dev->needed_headroom;
+ if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
+- dev->header_ops = real_dev->header_ops;
++ dev->header_ops = &vlan_passthru_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len;
+ } else {
+ dev->header_ops = &vlan_header_ops;
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 8b0b610ca2c9..1b148a3affa7 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -2004,7 +2004,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
+ u32 old;
+ struct net_bridge_mdb_htable *mdb;
+
+- spin_lock(&br->multicast_lock);
++ spin_lock_bh(&br->multicast_lock);
+ if (!netif_running(br->dev))
+ goto unlock;
+
+@@ -2036,7 +2036,7 @@ rollback:
+ }
+
+ unlock:
+- spin_unlock(&br->multicast_lock);
++ spin_unlock_bh(&br->multicast_lock);
+
+ return err;
+ }
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index e14c33b42f75..9a63c4206e4a 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -442,6 +442,16 @@ extern netdev_features_t br_features_recompute(struct net_bridge *br,
+ extern int br_handle_frame_finish(struct sk_buff *skb);
+ extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
+
++static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
++{
++ return rcu_dereference(dev->rx_handler) == br_handle_frame;
++}
++
++static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
++{
++ return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
++}
++
+ /* br_ioctl.c */
+ extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+ extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index 8660ea3be705..bdb459d21ad8 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+ if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
+ goto err;
+
+- p = br_port_get_rcu(dev);
++ p = br_port_get_check_rcu(dev);
+ if (!p)
+ goto err;
+
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 5e78d44333b9..f27d126239b1 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
+ .hdrsize = 0,
+ .name = "NET_DM",
+ .version = 2,
+- .maxattr = NET_DM_CMD_MAX,
+ };
+
+ static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 6072610a8672..11af243bf92f 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1274,7 +1274,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
+
+ if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
+ skb->len) < 0 &&
+- dev->header_ops->rebuild(skb))
++ dev_rebuild_header(skb))
+ return 0;
+
+ return dev_queue_xmit(skb);
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index fc75c9e461b8..0c1482c6ff98 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+ !vlan_hw_offload_capable(netif_skb_features(skb),
+ skb->vlan_proto)) {
+ skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
+- if (unlikely(!skb))
+- break;
++ if (unlikely(!skb)) {
++ /* This is actually a packet drop, but we
++ * don't want the code at the end of this
++ * function to try and re-queue a NULL skb.
++ */
++ status = NETDEV_TX_OK;
++ goto unlock_txq;
++ }
+ skb->vlan_tci = 0;
+ }
+
+@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+ if (status == NETDEV_TX_OK)
+ txq_trans_update(txq);
+ }
++ unlock_txq:
+ __netif_tx_unlock(txq);
+
+ if (status == NETDEV_TX_OK)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c28c7fed0d0b..743e6ebf5f9f 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3541,6 +3541,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ skb->tstamp.tv64 = 0;
+ skb->pkt_type = PACKET_HOST;
+ skb->skb_iif = 0;
++ skb->local_df = 0;
+ skb_dst_drop(skb);
+ skb->mark = 0;
+ secpath_reset(skb);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 0b39e7ae4383..5cec994ee2f3 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -888,7 +888,7 @@ set_rcvbuf:
+
+ case SO_PEEK_OFF:
+ if (sock->ops->set_peek_off)
+- sock->ops->set_peek_off(sk, val);
++ ret = sock->ops->set_peek_off(sk, val);
+ else
+ ret = -EOPNOTSUPP;
+ break;
+diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
+index 523be38e37de..f2e15738534d 100644
+--- a/net/ipv4/fib_rules.c
++++ b/net/ipv4/fib_rules.c
+@@ -104,7 +104,10 @@ errout:
+ static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+ {
+ struct fib_result *result = (struct fib_result *) arg->result;
+- struct net_device *dev = result->fi->fib_dev;
++ struct net_device *dev = NULL;
++
++ if (result->fi)
++ dev = result->fi->fib_dev;
+
+ /* do not accept result if the route does
+ * not meet the required prefix length
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index 55e6bfb3a289..db98705905f7 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+ netdev_features_t enc_features;
+ int ghl = GRE_HEADER_SECTION;
+ struct gre_base_hdr *greh;
++ u16 mac_offset = skb->mac_header;
+ int mac_len = skb->mac_len;
+ __be16 protocol = skb->protocol;
+ int tnl_hlen;
+@@ -57,13 +58,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+ } else
+ csum = false;
+
++ if (unlikely(!pskb_may_pull(skb, ghl)))
++ goto out;
++
+ /* setup inner skb. */
+ skb->protocol = greh->protocol;
+ skb->encapsulation = 0;
+
+- if (unlikely(!pskb_may_pull(skb, ghl)))
+- goto out;
+-
+ __skb_pull(skb, ghl);
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb_inner_network_offset(skb));
+@@ -72,8 +73,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+ /* segment inner packet. */
+ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+ segs = skb_mac_gso_segment(skb, enc_features);
+- if (!segs || IS_ERR(segs))
++ if (!segs || IS_ERR(segs)) {
++ skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
+ goto out;
++ }
+
+ skb = segs;
+ tnl_hlen = skb_tnl_header_len(skb);
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 5f648751fce2..31cf54d18221 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = inet->inet_dport;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = inet->inet_rcv_saddr;
+ r->id.idiag_dst[0] = inet->inet_daddr;
+
+@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
+
+ r->idiag_family = tw->tw_family;
+ r->idiag_retrans = 0;
++
+ r->id.idiag_if = tw->tw_bound_dev_if;
+ sock_diag_save_cookie(tw, r->id.idiag_cookie);
++
+ r->id.idiag_sport = tw->tw_sport;
+ r->id.idiag_dport = tw->tw_dport;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = tw->tw_rcv_saddr;
+ r->id.idiag_dst[0] = tw->tw_daddr;
++
+ r->idiag_state = tw->tw_substate;
+ r->idiag_timer = 3;
+ r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
+@@ -732,8 +743,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = ireq->rmt_port;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = ireq->loc_addr;
+ r->id.idiag_dst[0] = ireq->rmt_addr;
++
+ r->idiag_expires = jiffies_to_msecs(tmo);
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index d7aea4c5b940..e560ef34cf4b 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
+ iph->saddr, iph->daddr, tpi->key);
+
+ if (tunnel) {
++ skb_pop_mac_header(skb);
+ ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
+ return PACKET_RCVD;
+ }
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5e2c2f1a075d..6ca990726d5b 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2294,6 +2294,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+ netdev_features_t features)
+ {
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
++ u16 mac_offset = skb->mac_header;
+ int mac_len = skb->mac_len;
+ int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
+ __be16 protocol = skb->protocol;
+@@ -2313,8 +2314,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+ /* segment inner packet. */
+ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+ segs = skb_mac_gso_segment(skb, enc_features);
+- if (!segs || IS_ERR(segs))
++ if (!segs || IS_ERR(segs)) {
++ skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
++ mac_len);
+ goto out;
++ }
+
+ outer_hlen = skb_tnl_header_len(skb);
+ skb = segs;
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index f35eccaa855e..6b809e4bf1ed 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ {
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ unsigned int mss;
++ int offset;
++ __wsum csum;
++
++ if (skb->encapsulation &&
++ skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
++ segs = skb_udp_tunnel_segment(skb, features);
++ goto out;
++ }
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+@@ -62,27 +70,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ goto out;
+ }
+
++ /* Do software UFO. Complete and fill in the UDP checksum as
++ * HW cannot do checksum of UDP packets sent as multiple
++ * IP fragments.
++ */
++ offset = skb_checksum_start_offset(skb);
++ csum = skb_checksum(skb, offset, skb->len - offset, 0);
++ offset += skb->csum_offset;
++ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
++ skb->ip_summed = CHECKSUM_NONE;
++
+ /* Fragment the skb. IP headers of the fragments are updated in
+ * inet_gso_segment()
+ */
+- if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
+- segs = skb_udp_tunnel_segment(skb, features);
+- else {
+- int offset;
+- __wsum csum;
+-
+- /* Do software UFO. Complete and fill in the UDP checksum as
+- * HW cannot do checksum of UDP packets sent as multiple
+- * IP fragments.
+- */
+- offset = skb_checksum_start_offset(skb);
+- csum = skb_checksum(skb, offset, skb->len - offset, 0);
+- offset += skb->csum_offset;
+- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+- skb->ip_summed = CHECKSUM_NONE;
+-
+- segs = skb_segment(skb, features);
+- }
++ segs = skb_segment(skb, features);
+ out:
+ return segs;
+ }
+diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
+index e27591635f92..3fd0a578329e 100644
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -122,7 +122,11 @@ out:
+ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+ {
+ struct rt6_info *rt = (struct rt6_info *) arg->result;
+- struct net_device *dev = rt->rt6i_idev->dev;
++ struct net_device *dev = NULL;
++
++ if (rt->rt6i_idev)
++ dev = rt->rt6i_idev->dev;
++
+ /* do not accept result if the route does
+ * not meet the required prefix length
+ */
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 77308af056bc..0accb1321dd6 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -84,6 +84,8 @@ static int ip6_dst_gc(struct dst_ops *ops);
+
+ static int ip6_pkt_discard(struct sk_buff *skb);
+ static int ip6_pkt_discard_out(struct sk_buff *skb);
++static int ip6_pkt_prohibit(struct sk_buff *skb);
++static int ip6_pkt_prohibit_out(struct sk_buff *skb);
+ static void ip6_link_failure(struct sk_buff *skb);
+ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu);
+@@ -234,9 +236,6 @@ static const struct rt6_info ip6_null_entry_template = {
+
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+
+-static int ip6_pkt_prohibit(struct sk_buff *skb);
+-static int ip6_pkt_prohibit_out(struct sk_buff *skb);
+-
+ static const struct rt6_info ip6_prohibit_entry_template = {
+ .dst = {
+ .__refcnt = ATOMIC_INIT(1),
+@@ -1570,21 +1569,24 @@ int ip6_route_add(struct fib6_config *cfg)
+ goto out;
+ }
+ }
+- rt->dst.output = ip6_pkt_discard_out;
+- rt->dst.input = ip6_pkt_discard;
+ rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
+ switch (cfg->fc_type) {
+ case RTN_BLACKHOLE:
+ rt->dst.error = -EINVAL;
++ rt->dst.output = dst_discard;
++ rt->dst.input = dst_discard;
+ break;
+ case RTN_PROHIBIT:
+ rt->dst.error = -EACCES;
++ rt->dst.output = ip6_pkt_prohibit_out;
++ rt->dst.input = ip6_pkt_prohibit;
+ break;
+ case RTN_THROW:
+- rt->dst.error = -EAGAIN;
+- break;
+ default:
+- rt->dst.error = -ENETUNREACH;
++ rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
++ : -ENETUNREACH;
++ rt->dst.output = ip6_pkt_discard_out;
++ rt->dst.input = ip6_pkt_discard;
+ break;
+ }
+ goto install_route;
+@@ -1908,9 +1910,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
+ else
+ rt->rt6i_gateway = *dest;
+ rt->rt6i_flags = ort->rt6i_flags;
+- if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
+- (RTF_DEFAULT | RTF_ADDRCONF))
+- rt6_set_from(rt, ort);
++ rt6_set_from(rt, ort);
+ rt->rt6i_metric = 0;
+
+ #ifdef CONFIG_IPV6_SUBTREES
+@@ -2149,8 +2149,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb)
+ return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
+ }
+
+-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+-
+ static int ip6_pkt_prohibit(struct sk_buff *skb)
+ {
+ return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
+@@ -2162,8 +2160,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
+ return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
+ }
+
+-#endif
+-
+ /*
+ * Allocate a dst for local (unicast / anycast) address.
+ */
+@@ -2173,12 +2169,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+ bool anycast)
+ {
+ struct net *net = dev_net(idev->dev);
+- struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
+-
+- if (!rt) {
+- net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
++ struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
++ DST_NOCOUNT, NULL);
++ if (!rt)
+ return ERR_PTR(-ENOMEM);
+- }
+
+ in6_dev_hold(idev);
+
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 34c6fff3ae84..06556d6e1a4d 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -88,7 +88,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+
+ /* Check if there is enough headroom to insert fragment header. */
+ tnl_hlen = skb_tnl_header_len(skb);
+- if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
++ if (skb_mac_header(skb) < skb->head + tnl_hlen + frag_hdr_sz) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 7b01b9f5846c..c71b699eb555 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ unsigned long cpu_flags;
+ size_t copied = 0;
+ u32 peek_seq = 0;
+- u32 *seq;
++ u32 *seq, skb_len;
+ unsigned long used;
+ int target; /* Read at least this many bytes */
+ long timeo;
+@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+ continue;
+ found_ok_skb:
++ skb_len = skb->len;
+ /* Ok so how much can we use? */
+ used = skb->len - offset;
+ if (len < used)
+@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ /* Partial read */
+- if (used + offset < skb->len)
++ if (used + offset < skb_len)
+ continue;
+ } while (len > 0);
+
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 70b5a05c0a4e..1eb2b78e927b 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ {
+ struct sta_info *sta = tx->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
+ struct ieee80211_local *local = tx->local;
+
+ if (unlikely(!sta))
+@@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
+ int ac = skb_get_queue_mapping(tx->skb);
+
+- /* only deauth, disassoc and action are bufferable MMPDUs */
+- if (ieee80211_is_mgmt(hdr->frame_control) &&
+- !ieee80211_is_deauth(hdr->frame_control) &&
+- !ieee80211_is_disassoc(hdr->frame_control) &&
+- !ieee80211_is_action(hdr->frame_control)) {
+- info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+- return TX_CONTINUE;
+- }
+-
+ ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
+ sta->sta.addr, sta->sta.aid, ac);
+ if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
+@@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ static ieee80211_tx_result debug_noinline
+ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
+ {
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
++
+ if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
+ return TX_CONTINUE;
+
++ /* only deauth, disassoc and action are bufferable MMPDUs */
++ if (ieee80211_is_mgmt(hdr->frame_control) &&
++ !ieee80211_is_deauth(hdr->frame_control) &&
++ !ieee80211_is_disassoc(hdr->frame_control) &&
++ !ieee80211_is_action(hdr->frame_control)) {
++ if (tx->flags & IEEE80211_TX_UNICAST)
++ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
++ return TX_CONTINUE;
++ }
++
+ if (tx->flags & IEEE80211_TX_UNICAST)
+ return ieee80211_tx_h_unicast_ps_buf(tx);
+ else
+diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
+index 5f9bfd060dea..17c1bcb182c6 100644
+--- a/net/netfilter/nf_conntrack_seqadj.c
++++ b/net/netfilter/nf_conntrack_seqadj.c
+@@ -41,8 +41,8 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ spin_lock_bh(&ct->lock);
+ this_way = &seqadj->seq[dir];
+ if (this_way->offset_before == this_way->offset_after ||
+- before(this_way->correction_pos, seq)) {
+- this_way->correction_pos = seq;
++ before(this_way->correction_pos, ntohl(seq))) {
++ this_way->correction_pos = ntohl(seq);
+ this_way->offset_before = this_way->offset_after;
+ this_way->offset_after += off;
+ }
+diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
+index f02b3605823e..1fb2258c3535 100644
+--- a/net/netfilter/nf_nat_irc.c
++++ b/net/netfilter/nf_nat_irc.c
+@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
+ struct nf_conntrack_expect *exp)
+ {
+ char buffer[sizeof("4294967296 65635")];
++ struct nf_conn *ct = exp->master;
++ union nf_inet_addr newaddr;
+ u_int16_t port;
+ unsigned int ret;
+
+ /* Reply comes from server. */
++ newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
++
+ exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+ exp->dir = IP_CT_DIR_REPLY;
+ exp->expectfn = nf_nat_follow_master;
+@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
+ }
+
+ if (port == 0) {
+- nf_ct_helper_log(skb, exp->master, "all ports in use");
++ nf_ct_helper_log(skb, ct, "all ports in use");
+ return NF_DROP;
+ }
+
+- ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
+- protoff, matchoff, matchlen, buffer,
+- strlen(buffer));
++ /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
++ * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
++ * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
++ * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
++ * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
++ *
++ * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
++ * 255.255.255.255==4294967296, 10 digits)
++ * P: bound port (min 1 d, max 5d (65635))
++ * F: filename (min 1 d )
++ * S: size (min 1 d )
++ * 0x01, \n: terminators
++ */
++ /* AAA = "us", ie. where server normally talks to. */
++ snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
++ pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
++ buffer, &newaddr.ip, port);
++
++ ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
++ matchlen, buffer, strlen(buffer));
+ if (ret != NF_ACCEPT) {
+- nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
++ nf_ct_helper_log(skb, ct, "cannot mangle packet");
+ nf_ct_unexpect_related(exp);
+ }
++
+ return ret;
+ }
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index ba2548bd85bf..88cfbc189558 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -237,6 +237,30 @@ struct packet_skb_cb {
+ static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
+ static void __fanout_link(struct sock *sk, struct packet_sock *po);
+
++static struct net_device *packet_cached_dev_get(struct packet_sock *po)
++{
++ struct net_device *dev;
++
++ rcu_read_lock();
++ dev = rcu_dereference(po->cached_dev);
++ if (likely(dev))
++ dev_hold(dev);
++ rcu_read_unlock();
++
++ return dev;
++}
++
++static void packet_cached_dev_assign(struct packet_sock *po,
++ struct net_device *dev)
++{
++ rcu_assign_pointer(po->cached_dev, dev);
++}
++
++static void packet_cached_dev_reset(struct packet_sock *po)
++{
++ RCU_INIT_POINTER(po->cached_dev, NULL);
++}
++
+ /* register_prot_hook must be invoked with the po->bind_lock held,
+ * or from a context in which asynchronous accesses to the packet
+ * socket is not possible (packet_create()).
+@@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk)
+ struct packet_sock *po = pkt_sk(sk);
+
+ if (!po->running) {
+- if (po->fanout) {
++ if (po->fanout)
+ __fanout_link(sk, po);
+- } else {
++ else
+ dev_add_pack(&po->prot_hook);
+- rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
+- }
+
+ sock_hold(sk);
+ po->running = 1;
+@@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
+ struct packet_sock *po = pkt_sk(sk);
+
+ po->running = 0;
+- if (po->fanout) {
++
++ if (po->fanout)
+ __fanout_unlink(sk, po);
+- } else {
++ else
+ __dev_remove_pack(&po->prot_hook);
+- RCU_INIT_POINTER(po->cached_dev, NULL);
+- }
+
+ __sock_put(sk);
+
+@@ -2059,19 +2080,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ return tp_len;
+ }
+
+-static struct net_device *packet_cached_dev_get(struct packet_sock *po)
+-{
+- struct net_device *dev;
+-
+- rcu_read_lock();
+- dev = rcu_dereference(po->cached_dev);
+- if (dev)
+- dev_hold(dev);
+- rcu_read_unlock();
+-
+- return dev;
+-}
+-
+ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ {
+ struct sk_buff *skb;
+@@ -2088,7 +2096,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+
+ mutex_lock(&po->pg_vec_lock);
+
+- if (saddr == NULL) {
++ if (likely(saddr == NULL)) {
+ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+@@ -2242,7 +2250,7 @@ static int packet_snd(struct socket *sock,
+ * Get and verify the address.
+ */
+
+- if (saddr == NULL) {
++ if (likely(saddr == NULL)) {
+ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+@@ -2451,6 +2459,8 @@ static int packet_release(struct socket *sock)
+
+ spin_lock(&po->bind_lock);
+ unregister_prot_hook(sk, false);
++ packet_cached_dev_reset(po);
++
+ if (po->prot_hook.dev) {
+ dev_put(po->prot_hook.dev);
+ po->prot_hook.dev = NULL;
+@@ -2506,14 +2516,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
+
+ spin_lock(&po->bind_lock);
+ unregister_prot_hook(sk, true);
++
+ po->num = protocol;
+ po->prot_hook.type = protocol;
+ if (po->prot_hook.dev)
+ dev_put(po->prot_hook.dev);
+- po->prot_hook.dev = dev;
+
++ po->prot_hook.dev = dev;
+ po->ifindex = dev ? dev->ifindex : 0;
+
++ packet_cached_dev_assign(po, dev);
++
+ if (protocol == 0)
+ goto out_unlock;
+
+@@ -2626,7 +2639,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
+ po = pkt_sk(sk);
+ sk->sk_family = PF_PACKET;
+ po->num = proto;
+- RCU_INIT_POINTER(po->cached_dev, NULL);
++
++ packet_cached_dev_reset(po);
+
+ sk->sk_destruct = packet_sock_destruct;
+ sk_refcnt_debug_inc(sk);
+@@ -3337,6 +3351,7 @@ static int packet_notifier(struct notifier_block *this,
+ sk->sk_error_report(sk);
+ }
+ if (msg == NETDEV_UNREGISTER) {
++ packet_cached_dev_reset(po);
+ po->ifindex = -1;
+ if (po->prot_hook.dev)
+ dev_put(po->prot_hook.dev);
+diff --git a/net/rds/ib.c b/net/rds/ib.c
+index b4c8b0022fee..ba2dffeff608 100644
+--- a/net/rds/ib.c
++++ b/net/rds/ib.c
+@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ /* due to this, we will claim to support iWARP devices unless we
+ check node_type. */
+- if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
++ if (ret || !cm_id->device ||
++ cm_id->device->node_type != RDMA_NODE_IB_CA)
+ ret = -EADDRNOTAVAIL;
+
+ rdsdebug("addr %pI4 ret %d node type %d\n",
+diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
+index e59094981175..37be6e226d1b 100644
+--- a/net/rds/ib_send.c
++++ b/net/rds/ib_send.c
+@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
+ && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
+ rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
+ scat = &rm->data.op_sg[sg];
+- ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+- ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
+- return ret;
++ ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
++ return sizeof(struct rds_header) + ret;
+ }
+
+ /* FIXME we may overallocate here */
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 33af77246bfe..62ced6516c58 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (msg->msg_name) {
+ struct sockaddr_rose *srose;
++ struct full_sockaddr_rose *full_srose = msg->msg_name;
+
+ memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
+ srose = msg->msg_name;
+@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+ srose->srose_ndigis = rose->dest_ndigis;
+- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
+- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
+- for (n = 0 ; n < rose->dest_ndigis ; n++)
+- full_srose->srose_digis[n] = rose->dest_digis[n];
+- msg->msg_namelen = sizeof(struct full_sockaddr_rose);
+- } else {
+- if (rose->dest_ndigis >= 1) {
+- srose->srose_ndigis = 1;
+- srose->srose_digi = rose->dest_digis[0];
+- }
+- msg->msg_namelen = sizeof(struct sockaddr_rose);
+- }
++ for (n = 0 ; n < rose->dest_ndigis ; n++)
++ full_srose->srose_digis[n] = rose->dest_digis[n];
++ msg->msg_namelen = sizeof(struct full_sockaddr_rose);
+ }
+
+ skb_free_datagram(sk, skb);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 01625ccc3ae6..a427623ee574 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -530,13 +530,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
+ static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
+ struct msghdr *, size_t, int);
+
+-static void unix_set_peek_off(struct sock *sk, int val)
++static int unix_set_peek_off(struct sock *sk, int val)
+ {
+ struct unix_sock *u = unix_sk(sk);
+
+- mutex_lock(&u->readlock);
++ if (mutex_lock_interruptible(&u->readlock))
++ return -EINTR;
++
+ sk->sk_peek_off = val;
+ mutex_unlock(&u->readlock);
++
++ return 0;
+ }
+
+
+@@ -714,7 +718,9 @@ static int unix_autobind(struct socket *sock)
+ int err;
+ unsigned int retries = 0;
+
+- mutex_lock(&u->readlock);
++ err = mutex_lock_interruptible(&u->readlock);
++ if (err)
++ return err;
+
+ err = 0;
+ if (u->addr)
+@@ -873,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ goto out;
+ addr_len = err;
+
+- mutex_lock(&u->readlock);
++ err = mutex_lock_interruptible(&u->readlock);
++ if (err)
++ goto out;
+
+ err = -EINVAL;
+ if (u->addr)
diff --git a/1008_linux-3.12.9.patch b/1008_linux-3.12.9.patch
new file mode 100644
index 00000000..dd786047
--- /dev/null
+++ b/1008_linux-3.12.9.patch
@@ -0,0 +1,761 @@
+diff --git a/Makefile b/Makefile
+index 5d0ec13bb77d..4ee77eaa7b1f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
+index f35906b3d8c9..41960fb0daff 100644
+--- a/arch/arm/kernel/devtree.c
++++ b/arch/arm/kernel/devtree.c
+@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void)
+
+ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+ {
+- return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
++ return phys_id == cpu_logical_map(cpu);
+ }
+
+ /**
+diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
+index 8e44973b0139..2f176a495c32 100644
+--- a/arch/arm/mach-highbank/highbank.c
++++ b/arch/arm/mach-highbank/highbank.c
+@@ -66,6 +66,7 @@ void highbank_set_cpu_jump(int cpu, void *jump_addr)
+
+ static void highbank_l2x0_disable(void)
+ {
++ outer_flush_all();
+ /* Disable PL310 L2 Cache controller */
+ highbank_smc1(0x102, 0x0);
+ }
+diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
+index 57911430324e..3f44b162fcab 100644
+--- a/arch/arm/mach-omap2/omap4-common.c
++++ b/arch/arm/mach-omap2/omap4-common.c
+@@ -163,6 +163,7 @@ void __iomem *omap4_get_l2cache_base(void)
+
+ static void omap4_l2x0_disable(void)
+ {
++ outer_flush_all();
+ /* Disable PL310 L2 Cache controller */
+ omap_smc1(0x102, 0x0);
+ }
+diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+index e09f0bfb7b8f..4b8e4d3cd6ea 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
++++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+@@ -10,6 +10,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/ptrace.h>
++#include <linux/syscore_ops.h>
+
+ #include <asm/apic.h>
+
+@@ -816,6 +817,18 @@ out:
+ return ret;
+ }
+
++static void ibs_eilvt_setup(void)
++{
++ /*
++ * Force LVT offset assignment for family 10h: The offsets are
++ * not assigned by the BIOS for this family, so the OS is
++ * responsible for doing it. If the OS assignment fails, fall
++ * back to BIOS settings and try to setup this.
++ */
++ if (boot_cpu_data.x86 == 0x10)
++ force_ibs_eilvt_setup();
++}
++
+ static inline int get_ibs_lvt_offset(void)
+ {
+ u64 val;
+@@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy)
+ setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
+ }
+
++#ifdef CONFIG_PM
++
++static int perf_ibs_suspend(void)
++{
++ clear_APIC_ibs(NULL);
++ return 0;
++}
++
++static void perf_ibs_resume(void)
++{
++ ibs_eilvt_setup();
++ setup_APIC_ibs(NULL);
++}
++
++static struct syscore_ops perf_ibs_syscore_ops = {
++ .resume = perf_ibs_resume,
++ .suspend = perf_ibs_suspend,
++};
++
++static void perf_ibs_pm_init(void)
++{
++ register_syscore_ops(&perf_ibs_syscore_ops);
++}
++
++#else
++
++static inline void perf_ibs_pm_init(void) { }
++
++#endif
++
+ static int
+ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+ {
+@@ -877,18 +920,12 @@ static __init int amd_ibs_init(void)
+ if (!caps)
+ return -ENODEV; /* ibs not supported by the cpu */
+
+- /*
+- * Force LVT offset assignment for family 10h: The offsets are
+- * not assigned by the BIOS for this family, so the OS is
+- * responsible for doing it. If the OS assignment fails, fall
+- * back to BIOS settings and try to setup this.
+- */
+- if (boot_cpu_data.x86 == 0x10)
+- force_ibs_eilvt_setup();
++ ibs_eilvt_setup();
+
+ if (!ibs_eilvt_valid())
+ goto out;
+
++ perf_ibs_pm_init();
+ get_online_cpus();
+ ibs_caps = caps;
+ /* make ibs_caps visible to other cpus: */
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index f0dcb0ceb6a2..15a569a47b4d 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1085,7 +1085,7 @@ ENTRY(ftrace_caller)
+ pushl $0 /* Pass NULL as regs pointer */
+ movl 4*4(%esp), %eax
+ movl 0x4(%ebp), %edx
+- leal function_trace_op, %ecx
++ movl function_trace_op, %ecx
+ subl $MCOUNT_INSN_SIZE, %eax
+
+ .globl ftrace_call
+@@ -1143,7 +1143,7 @@ ENTRY(ftrace_regs_caller)
+ movl 12*4(%esp), %eax /* Load ip (1st parameter) */
+ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
+ movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
+- leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
++ movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+ pushl %esp /* Save pt_regs as 4th parameter */
+
+ GLOBAL(ftrace_regs_call)
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index b077f4cc225a..9ce256739175 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -88,7 +88,7 @@ END(function_hook)
+ MCOUNT_SAVE_FRAME \skip
+
+ /* Load the ftrace_ops into the 3rd parameter */
+- leaq function_trace_op, %rdx
++ movq function_trace_op(%rip), %rdx
+
+ /* Load ip into the first parameter */
+ movq RIP(%rsp), %rdi
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index ab19263baf39..fb78bb9ad8f6 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -156,7 +156,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
+ { "80860F14", (unsigned long)&byt_sdio_dev_desc },
+ { "80860F41", (unsigned long)&byt_i2c_dev_desc },
+ { "INT33B2", },
+- { "INT33FC", },
+
+ { }
+ };
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index a069b5e2a2d2..920cd19edc69 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -961,12 +961,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
+ enum pipe pipe;
+ struct intel_crtc *intel_crtc;
+
++ dev_priv->ddi_plls.spll_refcount = 0;
++ dev_priv->ddi_plls.wrpll1_refcount = 0;
++ dev_priv->ddi_plls.wrpll2_refcount = 0;
++
+ for_each_pipe(pipe) {
+ intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+- if (!intel_crtc->active)
++ if (!intel_crtc->active) {
++ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+ continue;
++ }
+
+ intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+ pipe);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index aad6f7bfc589..dd2d542e4651 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -10592,9 +10592,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
+
+ intel_setup_overlay(dev);
+
+- drm_modeset_lock_all(dev);
++ mutex_lock(&dev->mode_config.mutex);
+ intel_modeset_setup_hw_state(dev, false);
+- drm_modeset_unlock_all(dev);
++ mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ void intel_modeset_cleanup(struct drm_device *dev)
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 78be66176840..942509892895 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
+ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
+ #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
+-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
++#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
+ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index ba46d9749a0b..015bc455cf1c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1119,6 +1119,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
+ rdev->raid_disk = -1;
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
++ clear_bit(Bitmap_sync, &rdev->flags);
+ clear_bit(WriteMostly, &rdev->flags);
+
+ if (mddev->raid_disks == 0) {
+@@ -1197,6 +1198,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
+ */
+ if (ev1 < mddev->bitmap->events_cleared)
+ return 0;
++ if (ev1 < mddev->events)
++ set_bit(Bitmap_sync, &rdev->flags);
+ } else {
+ if (ev1 < mddev->events)
+ /* just a hot-add of a new device, leave raid_disk at -1 */
+@@ -1605,6 +1608,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+ rdev->raid_disk = -1;
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
++ clear_bit(Bitmap_sync, &rdev->flags);
+ clear_bit(WriteMostly, &rdev->flags);
+
+ if (mddev->raid_disks == 0) {
+@@ -1687,6 +1691,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+ */
+ if (ev1 < mddev->bitmap->events_cleared)
+ return 0;
++ if (ev1 < mddev->events)
++ set_bit(Bitmap_sync, &rdev->flags);
+ } else {
+ if (ev1 < mddev->events)
+ /* just a hot-add of a new device, leave raid_disk at -1 */
+@@ -2830,6 +2836,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
+ else
+ rdev->saved_raid_disk = -1;
+ clear_bit(In_sync, &rdev->flags);
++ clear_bit(Bitmap_sync, &rdev->flags);
+ err = rdev->mddev->pers->
+ hot_add_disk(rdev->mddev, rdev);
+ if (err) {
+@@ -5773,6 +5780,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
+ info->raid_disk < mddev->raid_disks) {
+ rdev->raid_disk = info->raid_disk;
+ set_bit(In_sync, &rdev->flags);
++ clear_bit(Bitmap_sync, &rdev->flags);
+ } else
+ rdev->raid_disk = -1;
+ } else
+@@ -7731,7 +7739,8 @@ static int remove_and_add_spares(struct mddev *mddev,
+ if (test_bit(Faulty, &rdev->flags))
+ continue;
+ if (mddev->ro &&
+- rdev->saved_raid_disk < 0)
++ ! (rdev->saved_raid_disk >= 0 &&
++ !test_bit(Bitmap_sync, &rdev->flags)))
+ continue;
+
+ rdev->recovery_offset = 0;
+@@ -7812,9 +7821,12 @@ void md_check_recovery(struct mddev *mddev)
+ * As we only add devices that are already in-sync,
+ * we can activate the spares immediately.
+ */
+- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ remove_and_add_spares(mddev, NULL);
+- mddev->pers->spare_active(mddev);
++ /* There is no thread, but we need to call
++ * ->spare_active and clear saved_raid_disk
++ */
++ md_reap_sync_thread(mddev);
++ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ goto unlock;
+ }
+
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 608050c43f17..636756450a19 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -129,6 +129,9 @@ struct md_rdev {
+ enum flag_bits {
+ Faulty, /* device is known to have a fault */
+ In_sync, /* device is in_sync with rest of array */
++ Bitmap_sync, /* ..actually, not quite In_sync. Need a
++ * bitmap-based recovery to get fully in sync
++ */
+ Unmerged, /* device is being added to array and should
+ * be considerred for bvec_merge_fn but not
+ * yet for actual IO
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 73dc8a377522..308575d23550 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1319,7 +1319,7 @@ read_again:
+ /* Could not read all from this device, so we will
+ * need another r10_bio.
+ */
+- sectors_handled = (r10_bio->sectors + max_sectors
++ sectors_handled = (r10_bio->sector + max_sectors
+ - bio->bi_sector);
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+@@ -1327,7 +1327,7 @@ read_again:
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+- spin_unlock(&conf->device_lock);
++ spin_unlock_irq(&conf->device_lock);
+ /* Cannot call generic_make_request directly
+ * as that will be queued in __generic_make_request
+ * and subsequent mempool_alloc might block
+@@ -3220,10 +3220,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ if (j == conf->copies) {
+ /* Cannot recover, so abort the recovery or
+ * record a bad block */
+- put_buf(r10_bio);
+- if (rb2)
+- atomic_dec(&rb2->remaining);
+- r10_bio = rb2;
+ if (any_working) {
+ /* problem is that there are bad blocks
+ * on other device(s)
+@@ -3255,6 +3251,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ mirror->recovery_disabled
+ = mddev->recovery_disabled;
+ }
++ put_buf(r10_bio);
++ if (rb2)
++ atomic_dec(&rb2->remaining);
++ r10_bio = rb2;
+ break;
+ }
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 8a0665d04567..93174c6ab37c 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3502,7 +3502,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+ */
+ set_bit(R5_Insync, &dev->flags);
+
+- if (rdev && test_bit(R5_WriteError, &dev->flags)) {
++ if (test_bit(R5_WriteError, &dev->flags)) {
+ /* This flag does not apply to '.replacement'
+ * only to .rdev, so make sure to check that*/
+ struct md_rdev *rdev2 = rcu_dereference(
+@@ -3515,7 +3515,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+ } else
+ clear_bit(R5_WriteError, &dev->flags);
+ }
+- if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
++ if (test_bit(R5_MadeGood, &dev->flags)) {
+ /* This flag does not apply to '.replacement'
+ * only to .rdev, so make sure to check that*/
+ struct md_rdev *rdev2 = rcu_dereference(
+diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
+index 114f5ef4b73a..2832576d8b12 100644
+--- a/drivers/pinctrl/pinctrl-baytrail.c
++++ b/drivers/pinctrl/pinctrl-baytrail.c
+@@ -512,7 +512,6 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
+
+ static const struct acpi_device_id byt_gpio_acpi_match[] = {
+ { "INT33B2", 0 },
+- { "INT33FC", 0 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
+diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
+index 34ab0679e992..b95a8b3395ae 100644
+--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
++++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
+@@ -325,8 +325,8 @@ static int apci1032_auto_attach(struct comedi_device *dev,
+ s = &dev->subdevices[1];
+ if (dev->irq) {
+ dev->read_subdev = s;
+- s->type = COMEDI_SUBD_DI | SDF_CMD_READ;
+- s->subdev_flags = SDF_READABLE;
++ s->type = COMEDI_SUBD_DI;
++ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->n_chan = 1;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
+index 78cea193504f..c9702bfa88be 100644
+--- a/drivers/staging/comedi/drivers/adl_pci9111.c
++++ b/drivers/staging/comedi/drivers/adl_pci9111.c
+@@ -869,7 +869,7 @@ static int pci9111_auto_attach(struct comedi_device *dev,
+ pci9111_reset(dev);
+
+ if (pcidev->irq > 0) {
+- ret = request_irq(dev->irq, pci9111_interrupt,
++ ret = request_irq(pcidev->irq, pci9111_interrupt,
+ IRQF_SHARED, dev->board_name, dev);
+ if (ret)
+ return ret;
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index aaa22867e656..1440d0b4a7bc 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1537,6 +1537,8 @@ static int pl011_startup(struct uart_port *port)
+ /*
+ * Provoke TX FIFO interrupt into asserting.
+ */
++ spin_lock_irq(&uap->port.lock);
++
+ cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
+ writew(cr, uap->port.membase + UART011_CR);
+ writew(0, uap->port.membase + UART011_FBRD);
+@@ -1561,6 +1563,8 @@ static int pl011_startup(struct uart_port *port)
+ cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
+ writew(cr, uap->port.membase + UART011_CR);
+
++ spin_unlock_irq(&uap->port.lock);
++
+ /*
+ * initialise the old status of the modem signals
+ */
+@@ -1629,11 +1633,13 @@ static void pl011_shutdown(struct uart_port *port)
+ * it during startup().
+ */
+ uap->autorts = false;
++ spin_lock_irq(&uap->port.lock);
+ cr = readw(uap->port.membase + UART011_CR);
+ uap->old_cr = cr;
+ cr &= UART011_CR_RTS | UART011_CR_DTR;
+ cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
+ writew(cr, uap->port.membase + UART011_CR);
++ spin_unlock_irq(&uap->port.lock);
+
+ /*
+ * disable break condition and fifos
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 89f96719a29b..f27c1d12a1fa 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -3064,8 +3064,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
+ * thus don't need to be hashed. They also don't need a name until a
+ * user wants to identify the object in /proc/pid/fd/. The little hack
+ * below allows us to generate a name for these objects on demand:
++ *
++ * Some pseudo inodes are mountable. When they are mounted
++ * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
++ * and instead have d_path return the mounted path.
+ */
+- if (path->dentry->d_op && path->dentry->d_op->d_dname)
++ if (path->dentry->d_op && path->dentry->d_op->d_dname &&
++ (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
+ return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
+
+ rcu_read_lock();
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 9f4935b8f208..3595180b62ac 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -510,13 +510,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+ }
+ WARN_ON(inode->i_state & I_SYNC);
+ /*
+- * Skip inode if it is clean. We don't want to mess with writeback
+- * lists in this function since flusher thread may be doing for example
+- * sync in parallel and if we move the inode, it could get skipped. So
+- * here we make sure inode is on some writeback list and leave it there
+- * unless we have completely cleaned the inode.
++ * Skip inode if it is clean and we have no outstanding writeback in
++ * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
++ * function since flusher thread may be doing for example sync in
++ * parallel and if we move the inode, it could get skipped. So here we
++ * make sure inode is on some writeback list and leave it there unless
++ * we have completely cleaned the inode.
+ */
+- if (!(inode->i_state & I_DIRTY))
++ if (!(inode->i_state & I_DIRTY) &&
++ (wbc->sync_mode != WB_SYNC_ALL ||
++ !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
+ goto out;
+ inode->i_state |= I_SYNC;
+ spin_unlock(&inode->i_lock);
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 12987666e5f0..630db362a2d1 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1610,10 +1610,22 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
+ if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
+ ogid = ngid = NO_GID_QUOTA_CHANGE;
+
+- error = gfs2_quota_lock(ip, nuid, ngid);
++ error = get_write_access(inode);
+ if (error)
+ return error;
+
++ error = gfs2_rs_alloc(ip);
++ if (error)
++ goto out;
++
++ error = gfs2_rindex_update(sdp);
++ if (error)
++ goto out;
++
++ error = gfs2_quota_lock(ip, nuid, ngid);
++ if (error)
++ goto out;
++
+ if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
+ !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
+ error = gfs2_quota_check(ip, nuid, ngid);
+@@ -1640,6 +1652,8 @@ out_end_trans:
+ gfs2_trans_end(sdp);
+ out_gunlock_q:
+ gfs2_quota_unlock(ip);
++out:
++ put_write_access(inode);
+ return error;
+ }
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index da5c49483430..84447dbcb650 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2888,7 +2888,7 @@ bool fs_fully_visible(struct file_system_type *type)
+ struct inode *inode = child->mnt_mountpoint->d_inode;
+ if (!S_ISDIR(inode->i_mode))
+ goto next;
+- if (inode->i_nlink != 2)
++ if (inode->i_nlink > 2)
+ goto next;
+ }
+ visible = true;
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 9f6b486b6c01..a1a191634abc 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
+
+ nilfs_clear_logs(&sci->sc_segbufs);
+
+- err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+- if (unlikely(err))
+- return err;
+-
+ if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
+ err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
+ sci->sc_freesegs,
+ sci->sc_nfreesegs,
+ NULL);
+ WARN_ON(err); /* do not happen */
++ sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
+ }
++
++ err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
++ if (unlikely(err))
++ return err;
++
+ nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
+ sci->sc_stage = prev_stage;
+ }
+diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
+index fe68a5a98583..7032518f8542 100644
+--- a/include/linux/crash_dump.h
++++ b/include/linux/crash_dump.h
+@@ -6,6 +6,8 @@
+ #include <linux/proc_fs.h>
+ #include <linux/elf.h>
+
++#include <asm/pgtable.h> /* for pgprot_t */
++
+ #define ELFCORE_ADDR_MAX (-1ULL)
+ #define ELFCORE_ADDR_ERR (-2ULL)
+
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 2ab11dc38077..5677fb58e688 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -447,7 +447,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
+ static inline struct i2c_adapter *
+ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
+ {
+-#if IS_ENABLED(I2C_MUX)
++#if IS_ENABLED(CONFIG_I2C_MUX)
+ struct device *parent = adapter->dev.parent;
+
+ if (parent != NULL && parent->type == &i2c_adapter_type)
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 8b6e55ee8855..fed08c0c543b 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -762,11 +762,14 @@ static __always_inline void *lowmem_page_address(const struct page *page)
+ #endif
+
+ #if defined(WANT_PAGE_VIRTUAL)
+-#define page_address(page) ((page)->virtual)
+-#define set_page_address(page, address) \
+- do { \
+- (page)->virtual = (address); \
+- } while(0)
++static inline void *page_address(const struct page *page)
++{
++ return page->virtual;
++}
++static inline void set_page_address(struct page *page, void *address)
++{
++ page->virtual = address;
++}
+ #define page_address_init() do { } while(0)
+ #endif
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 690cfacaed71..458953ca4d50 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1175,7 +1175,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ * do not allow it to share a thread group or signal handlers or
+ * parent with the forking task.
+ */
+- if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
++ if (clone_flags & CLONE_SIGHAND) {
+ if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
+ (task_active_pid_ns(current) !=
+ current->nsproxy->pid_ns_for_children))
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 47962456ed87..292a266e0d42 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1154,7 +1154,7 @@ alloc:
+ new_page = NULL;
+
+ if (unlikely(!new_page)) {
+- if (is_huge_zero_pmd(orig_pmd)) {
++ if (!page) {
+ ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
+ address, pmd, orig_pmd, haddr);
+ } else {
+@@ -1181,7 +1181,7 @@ alloc:
+
+ count_vm_event(THP_FAULT_ALLOC);
+
+- if (is_huge_zero_pmd(orig_pmd))
++ if (!page)
+ clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
+ else
+ copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
+@@ -1207,7 +1207,7 @@ alloc:
+ page_add_new_anon_rmap(new_page, vma, haddr);
+ set_pmd_at(mm, haddr, pmd, entry);
+ update_mmu_cache_pmd(vma, address, pmd);
+- if (is_huge_zero_pmd(orig_pmd)) {
++ if (!page) {
+ add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ put_huge_zero_page();
+ } else {
+diff --git a/mm/util.c b/mm/util.c
+index eaf63fc2c92f..96da2d7c076c 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -387,7 +387,10 @@ struct address_space *page_mapping(struct page *page)
+ {
+ struct address_space *mapping = page->mapping;
+
+- VM_BUG_ON(PageSlab(page));
++ /* This happens if someone calls flush_dcache_page on slab page */
++ if (unlikely(PageSlab(page)))
++ return NULL;
++
+ if (unlikely(PageSwapCache(page))) {
+ swp_entry_t entry;
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 392a0445265c..25d5ebaf25f9 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -220,6 +220,14 @@ static int inode_alloc_security(struct inode *inode)
+ return 0;
+ }
+
++static void inode_free_rcu(struct rcu_head *head)
++{
++ struct inode_security_struct *isec;
++
++ isec = container_of(head, struct inode_security_struct, rcu);
++ kmem_cache_free(sel_inode_cache, isec);
++}
++
+ static void inode_free_security(struct inode *inode)
+ {
+ struct inode_security_struct *isec = inode->i_security;
+@@ -230,8 +238,16 @@ static void inode_free_security(struct inode *inode)
+ list_del_init(&isec->list);
+ spin_unlock(&sbsec->isec_lock);
+
+- inode->i_security = NULL;
+- kmem_cache_free(sel_inode_cache, isec);
++ /*
++ * The inode may still be referenced in a path walk and
++ * a call to selinux_inode_permission() can be made
++ * after inode_free_security() is called. Ideally, the VFS
++ * wouldn't do this, but fixing that is a much harder
++ * job. For now, simply free the i_security via RCU, and
++ * leave the current inode->i_security pointer intact.
++ * The inode will be freed after the RCU grace period too.
++ */
++ call_rcu(&isec->rcu, inode_free_rcu);
+ }
+
+ static int file_alloc_security(struct file *file)
+diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
+index aa47bcabb5f6..6fd9dd256a62 100644
+--- a/security/selinux/include/objsec.h
++++ b/security/selinux/include/objsec.h
+@@ -38,7 +38,10 @@ struct task_security_struct {
+
+ struct inode_security_struct {
+ struct inode *inode; /* back pointer to inode object */
+- struct list_head list; /* list of inode_security_struct */
++ union {
++ struct list_head list; /* list of inode_security_struct */
++ struct rcu_head rcu; /* for freeing the inode_security_struct */
++ };
+ u32 task_sid; /* SID of creating task */
+ u32 sid; /* SID of this object */
+ u16 sclass; /* security class of this object */
diff --git a/1009_linux-3.12.10.patch b/1009_linux-3.12.10.patch
new file mode 100644
index 00000000..9d675cb5
--- /dev/null
+++ b/1009_linux-3.12.10.patch
@@ -0,0 +1,5391 @@
+diff --git a/Documentation/devicetree/bindings/ata/marvell.txt b/Documentation/devicetree/bindings/ata/marvell.txt
+index b5cdd20cde9c..1c8351604d38 100644
+--- a/Documentation/devicetree/bindings/ata/marvell.txt
++++ b/Documentation/devicetree/bindings/ata/marvell.txt
+@@ -1,7 +1,7 @@
+ * Marvell Orion SATA
+
+ Required Properties:
+-- compatibility : "marvell,orion-sata"
++- compatibility : "marvell,orion-sata" or "marvell,armada-370-sata"
+ - reg : Address range of controller
+ - interrupts : Interrupt controller is using
+ - nr-ports : Number of SATA ports in use.
+diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+index 82e8f6f17179..582b4652a82a 100644
+--- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
++++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+@@ -5,7 +5,11 @@ Required properties :
+
+ - reg : Offset and length of the register set for the device
+ - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c"
+- or "marvell,mv78230-i2c"
++ or "marvell,mv78230-i2c" or "marvell,mv78230-a0-i2c"
++ Note: Only use "marvell,mv78230-a0-i2c" for a very rare,
++ initial version of the SoC which had broken offload
++ support. Linux auto-detects this and sets it
++ appropriately.
+ - interrupts : The interrupt number
+
+ Optional properties :
+diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
+index c097e0f020fe..aa959fd22450 100644
+--- a/Documentation/i2c/busses/i2c-piix4
++++ b/Documentation/i2c/busses/i2c-piix4
+@@ -13,7 +13,7 @@ Supported adapters:
+ * AMD SP5100 (SB700 derivative found on some server mainboards)
+ Datasheet: Publicly available at the AMD website
+ http://support.amd.com/us/Embedded_TechDocs/44413.pdf
+- * AMD Hudson-2, CZ
++ * AMD Hudson-2, ML, CZ
+ Datasheet: Not publicly available
+ * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
+ Datasheet: Publicly available at the SMSC website http://www.smsc.com
+diff --git a/Makefile b/Makefile
+index 4ee77eaa7b1f..49b64402f947 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
+index ffb19b7da999..9df43dfe511e 100644
+--- a/arch/alpha/lib/csum_partial_copy.c
++++ b/arch/alpha/lib/csum_partial_copy.c
+@@ -378,6 +378,11 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+ {
+- return csum_partial_copy_from_user((__force const void __user *)src,
+- dst, len, sum, NULL);
++ __wsum checksum;
++ mm_segment_t oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ checksum = csum_partial_copy_from_user((__force const void __user *)src,
++ dst, len, sum, NULL);
++ set_fs(oldfs);
++ return checksum;
+ }
+diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
+index b97ab017d4a2..364a63dce6c5 100644
+--- a/arch/arm/boot/dts/armada-370-xp.dtsi
++++ b/arch/arm/boot/dts/armada-370-xp.dtsi
+@@ -143,7 +143,7 @@
+ };
+
+ sata@a0000 {
+- compatible = "marvell,orion-sata";
++ compatible = "marvell,armada-370-sata";
+ reg = <0xa0000 0x5000>;
+ interrupts = <55>;
+ clocks = <&gateclk 15>, <&gateclk 30>;
+diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
+index c3e514837074..cf7de80377f1 100644
+--- a/arch/arm/boot/dts/at91sam9g45.dtsi
++++ b/arch/arm/boot/dts/at91sam9g45.dtsi
+@@ -618,6 +618,7 @@
+ compatible = "atmel,hsmci";
+ reg = <0xfff80000 0x600>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH 0>;
++ pinctrl-names = "default";
+ dmas = <&dma 1 AT91_DMA_CFG_PER_ID(0)>;
+ dma-names = "rxtx";
+ #address-cells = <1>;
+@@ -629,6 +630,7 @@
+ compatible = "atmel,hsmci";
+ reg = <0xfffd0000 0x600>;
+ interrupts = <29 IRQ_TYPE_LEVEL_HIGH 0>;
++ pinctrl-names = "default";
+ dmas = <&dma 1 AT91_DMA_CFG_PER_ID(13)>;
+ dma-names = "rxtx";
+ #address-cells = <1>;
+diff --git a/arch/arm/mach-at91/sam9_smc.c b/arch/arm/mach-at91/sam9_smc.c
+index 99a0a1d2b7dc..b26156bf15db 100644
+--- a/arch/arm/mach-at91/sam9_smc.c
++++ b/arch/arm/mach-at91/sam9_smc.c
+@@ -101,7 +101,7 @@ static void sam9_smc_cs_read(void __iomem *base,
+ /* Pulse register */
+ val = __raw_readl(base + AT91_SMC_PULSE);
+
+- config->nwe_setup = val & AT91_SMC_NWEPULSE;
++ config->nwe_pulse = val & AT91_SMC_NWEPULSE;
+ config->ncs_write_pulse = (val & AT91_SMC_NCS_WRPULSE) >> 8;
+ config->nrd_pulse = (val & AT91_SMC_NRDPULSE) >> 16;
+ config->ncs_read_pulse = (val & AT91_SMC_NCS_RDPULSE) >> 24;
+diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
+index 2d04f0e21870..878aebe98dcc 100644
+--- a/arch/arm/mach-mvebu/Makefile
++++ b/arch/arm/mach-mvebu/Makefile
+@@ -3,7 +3,7 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
+
+ AFLAGS_coherency_ll.o := -Wa,-march=armv7-a
+
+-obj-y += system-controller.o
++obj-y += system-controller.o mvebu-soc-id.o
+ obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o
+ obj-$(CONFIG_ARCH_MVEBU) += coherency.o coherency_ll.o pmsu.o
+ obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
+index e2acff98e750..f6c9d1d85c14 100644
+--- a/arch/arm/mach-mvebu/armada-370-xp.c
++++ b/arch/arm/mach-mvebu/armada-370-xp.c
+@@ -21,6 +21,7 @@
+ #include <linux/clocksource.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/mbus.h>
++#include <linux/slab.h>
+ #include <asm/hardware/cache-l2x0.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+@@ -28,6 +29,7 @@
+ #include "armada-370-xp.h"
+ #include "common.h"
+ #include "coherency.h"
++#include "mvebu-soc-id.h"
+
+ static void __init armada_370_xp_map_io(void)
+ {
+@@ -45,8 +47,38 @@ static void __init armada_370_xp_timer_and_clk_init(void)
+ #endif
+ }
+
++static void __init i2c_quirk(void)
++{
++ struct device_node *np;
++ u32 dev, rev;
++
++ /*
++ * Only revisons more recent than A0 support the offload
++ * mechanism. We can exit only if we are sure that we can
++ * get the SoC revision and it is more recent than A0.
++ */
++ if (mvebu_get_soc_id(&rev, &dev) == 0 && dev > MV78XX0_A0_REV)
++ return;
++
++ for_each_compatible_node(np, NULL, "marvell,mv78230-i2c") {
++ struct property *new_compat;
++
++ new_compat = kzalloc(sizeof(*new_compat), GFP_KERNEL);
++
++ new_compat->name = kstrdup("compatible", GFP_KERNEL);
++ new_compat->length = sizeof("marvell,mv78230-a0-i2c");
++ new_compat->value = kstrdup("marvell,mv78230-a0-i2c",
++ GFP_KERNEL);
++
++ of_update_property(np, new_compat);
++ }
++ return;
++}
++
+ static void __init armada_370_xp_dt_init(void)
+ {
++ if (of_machine_is_compatible("plathome,openblocks-ax3-4"))
++ i2c_quirk();
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+
+diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
+new file mode 100644
+index 000000000000..fe4fc1cbdfaf
+--- /dev/null
++++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
+@@ -0,0 +1,119 @@
++/*
++ * ID and revision information for mvebu SoCs
++ *
++ * Copyright (C) 2014 Marvell
++ *
++ * Gregory CLEMENT <gregory.clement@free-electrons.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ *
++ * All the mvebu SoCs have information related to their variant and
++ * revision that can be read from the PCI control register. This is
++ * done before the PCI initialization to avoid any conflict. Once the
++ * ID and revision are retrieved, the mapping is freed.
++ */
++
++#define pr_fmt(fmt) "mvebu-soc-id: " fmt
++
++#include <linux/clk.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include "mvebu-soc-id.h"
++
++#define PCIE_DEV_ID_OFF 0x0
++#define PCIE_DEV_REV_OFF 0x8
++
++#define SOC_ID_MASK 0xFFFF0000
++#define SOC_REV_MASK 0xFF
++
++static u32 soc_dev_id;
++static u32 soc_rev;
++static bool is_id_valid;
++
++static const struct of_device_id mvebu_pcie_of_match_table[] = {
++ { .compatible = "marvell,armada-xp-pcie", },
++ { .compatible = "marvell,armada-370-pcie", },
++ {},
++};
++
++int mvebu_get_soc_id(u32 *dev, u32 *rev)
++{
++ if (is_id_valid) {
++ *dev = soc_dev_id;
++ *rev = soc_rev;
++ return 0;
++ } else
++ return -1;
++}
++
++static int __init mvebu_soc_id_init(void)
++{
++ struct device_node *np;
++ int ret = 0;
++ void __iomem *pci_base;
++ struct clk *clk;
++ struct device_node *child;
++
++ np = of_find_matching_node(NULL, mvebu_pcie_of_match_table);
++ if (!np)
++ return ret;
++
++ /*
++ * ID and revision are available from any port, so we
++ * just pick the first one
++ */
++ child = of_get_next_child(np, NULL);
++ if (child == NULL) {
++ pr_err("cannot get pci node\n");
++ ret = -ENOMEM;
++ goto clk_err;
++ }
++
++ clk = of_clk_get_by_name(child, NULL);
++ if (IS_ERR(clk)) {
++ pr_err("cannot get clock\n");
++ ret = -ENOMEM;
++ goto clk_err;
++ }
++
++ ret = clk_prepare_enable(clk);
++ if (ret) {
++ pr_err("cannot enable clock\n");
++ goto clk_err;
++ }
++
++ pci_base = of_iomap(child, 0);
++ if (IS_ERR(pci_base)) {
++ pr_err("cannot map registers\n");
++ ret = -ENOMEM;
++ goto res_ioremap;
++ }
++
++ /* SoC ID */
++ soc_dev_id = readl(pci_base + PCIE_DEV_ID_OFF) >> 16;
++
++ /* SoC revision */
++ soc_rev = readl(pci_base + PCIE_DEV_REV_OFF) & SOC_REV_MASK;
++
++ is_id_valid = true;
++
++ pr_info("MVEBU SoC ID=0x%X, Rev=0x%X\n", soc_dev_id, soc_rev);
++
++ iounmap(pci_base);
++
++res_ioremap:
++ clk_disable_unprepare(clk);
++
++clk_err:
++ of_node_put(child);
++ of_node_put(np);
++
++ return ret;
++}
++core_initcall(mvebu_soc_id_init);
++
+diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.h b/arch/arm/mach-mvebu/mvebu-soc-id.h
+new file mode 100644
+index 000000000000..31654252fe35
+--- /dev/null
++++ b/arch/arm/mach-mvebu/mvebu-soc-id.h
+@@ -0,0 +1,32 @@
++/*
++ * Marvell EBU SoC ID and revision definitions.
++ *
++ * Copyright (C) 2014 Marvell Semiconductor
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#ifndef __LINUX_MVEBU_SOC_ID_H
++#define __LINUX_MVEBU_SOC_ID_H
++
++/* Armada XP ID */
++#define MV78230_DEV_ID 0x7823
++#define MV78260_DEV_ID 0x7826
++#define MV78460_DEV_ID 0x7846
++
++/* Armada XP Revision */
++#define MV78XX0_A0_REV 0x1
++#define MV78XX0_B0_REV 0x2
++
++#ifdef CONFIG_ARCH_MVEBU
++int mvebu_get_soc_id(u32 *dev, u32 *rev);
++#else
++static inline int mvebu_get_soc_id(u32 *dev, u32 *rev)
++{
++ return -1;
++}
++#endif
++
++#endif /* __LINUX_MVEBU_SOC_ID_H */
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index 99b44e0e8d86..8c9106fd8163 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -637,10 +637,10 @@ load_ind:
+ emit(ARM_MUL(r_A, r_A, r_X), ctx);
+ break;
+ case BPF_S_ALU_DIV_K:
+- /* current k == reciprocal_value(userspace k) */
++ if (k == 1)
++ break;
+ emit_mov_i(r_scratch, k, ctx);
+- /* A = top 32 bits of the product */
+- emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
++ emit_udiv(r_A, r_A, r_scratch, ctx);
+ break;
+ case BPF_S_ALU_DIV_X:
+ update_on_xread(ctx);
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index 2f9b751878ba..de65f66ea64e 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -132,7 +132,6 @@ void mark_rodata_ro(void);
+ static inline void *kmap(struct page *page)
+ {
+ might_sleep();
+- flush_dcache_page(page);
+ return page_address(page);
+ }
+
+@@ -144,7 +143,6 @@ static inline void kunmap(struct page *page)
+ static inline void *kmap_atomic(struct page *page)
+ {
+ pagefault_disable();
+- flush_dcache_page(page);
+ return page_address(page);
+ }
+
+diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
+index c53fc63149e8..637fe031aa84 100644
+--- a/arch/parisc/include/asm/page.h
++++ b/arch/parisc/include/asm/page.h
+@@ -29,7 +29,8 @@ struct page;
+ void clear_page_asm(void *page);
+ void copy_page_asm(void *to, void *from);
+ #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
+-#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
++void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
++ struct page *pg);
+
+ /* #define CONFIG_PARISC_TMPALIAS */
+
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index a72545554a31..ac87a40502e6 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -388,6 +388,20 @@ void flush_kernel_dcache_page_addr(void *addr)
+ }
+ EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+
++void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
++ struct page *pg)
++{
++ /* Copy using kernel mapping. No coherency is needed (all in
++ kunmap) for the `to' page. However, the `from' page needs to
++ be flushed through a mapping equivalent to the user mapping
++ before it can be accessed through the kernel mapping. */
++ preempt_disable();
++ flush_dcache_page_asm(__pa(vfrom), vaddr);
++ preempt_enable();
++ copy_page_asm(vto, vfrom);
++}
++EXPORT_SYMBOL(copy_user_page);
++
+ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ {
+ unsigned long flags;
+diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
+index 89e3ef2496ac..d0b5fca6b077 100644
+--- a/arch/powerpc/include/asm/topology.h
++++ b/arch/powerpc/include/asm/topology.h
+@@ -22,7 +22,15 @@ struct device_node;
+
+ static inline int cpu_to_node(int cpu)
+ {
+- return numa_cpu_lookup_table[cpu];
++ int nid;
++
++ nid = numa_cpu_lookup_table[cpu];
++
++ /*
++ * During early boot, the numa-cpu lookup table might not have been
++ * setup for all CPUs yet. In such cases, default to node 0.
++ */
++ return (nid < 0) ? 0 : nid;
+ }
+
+ #define parent_node(node) (node)
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index 654932727873..bfb82365bc7a 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -794,6 +794,9 @@ static void remove_cache_dir(struct cache_dir *cache_dir)
+ {
+ remove_index_dirs(cache_dir);
+
++ /* Remove cache dir from sysfs */
++ kobject_del(cache_dir->kobj);
++
+ kobject_put(cache_dir->kobj);
+
+ kfree(cache_dir);
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 62a2b5ab08ed..e1ab62e0d548 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -82,10 +82,13 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+
+ /* CPU points to the first thread of the core */
+ if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
++#ifdef CONFIG_KVM_XICS
+ int real_cpu = cpu + vcpu->arch.ptid;
+ if (paca[real_cpu].kvm_hstate.xics_phys)
+ xics_wake_cpu(real_cpu);
+- else if (cpu_online(cpu))
++ else
++#endif
++ if (cpu_online(cpu))
+ smp_send_reschedule(cpu);
+ }
+ put_cpu();
+@@ -1089,7 +1092,9 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
+ smp_wmb();
+ #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
+ if (vcpu->arch.ptid) {
++#ifdef CONFIG_KVM_XICS
+ xics_wake_cpu(cpu);
++#endif
+ ++vc->n_woken;
+ }
+ #endif
+diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
+index 6d6f153b6c1d..c17600de7d59 100644
+--- a/arch/powerpc/kvm/e500_mmu.c
++++ b/arch/powerpc/kvm/e500_mmu.c
+@@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
+ }
+
+ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
+- unsigned int eaddr, int as)
++ gva_t eaddr, int as)
+ {
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ unsigned int victim, tsized;
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index c916127f10c3..4788ea2b343a 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -31,6 +31,8 @@
+ #include <asm/sparsemem.h>
+ #include <asm/prom.h>
+ #include <asm/smp.h>
++#include <asm/cputhreads.h>
++#include <asm/topology.h>
+ #include <asm/firmware.h>
+ #include <asm/paca.h>
+ #include <asm/hvcall.h>
+@@ -152,9 +154,22 @@ static void __init get_node_active_region(unsigned long pfn,
+ }
+ }
+
+-static void map_cpu_to_node(int cpu, int node)
++static void reset_numa_cpu_lookup_table(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu)
++ numa_cpu_lookup_table[cpu] = -1;
++}
++
++static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
+ {
+ numa_cpu_lookup_table[cpu] = node;
++}
++
++static void map_cpu_to_node(int cpu, int node)
++{
++ update_numa_cpu_lookup_table(cpu, node);
+
+ dbg("adding cpu %d to node %d\n", cpu, node);
+
+@@ -522,11 +537,24 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
+ */
+ static int numa_setup_cpu(unsigned long lcpu)
+ {
+- int nid = 0;
+- struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
++ int nid;
++ struct device_node *cpu;
++
++ /*
++ * If a valid cpu-to-node mapping is already available, use it
++ * directly instead of querying the firmware, since it represents
++ * the most recent mapping notified to us by the platform (eg: VPHN).
++ */
++ if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
++ map_cpu_to_node(lcpu, nid);
++ return nid;
++ }
++
++ cpu = of_get_cpu_node(lcpu, NULL);
+
+ if (!cpu) {
+ WARN_ON(1);
++ nid = 0;
+ goto out;
+ }
+
+@@ -1068,6 +1096,7 @@ void __init do_init_bootmem(void)
+ */
+ setup_node_to_cpumask_map();
+
++ reset_numa_cpu_lookup_table();
+ register_cpu_notifier(&ppc64_numa_nb);
+ cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
+ (void *)(unsigned long)boot_cpuid);
+@@ -1446,6 +1475,33 @@ static int update_cpu_topology(void *data)
+ return 0;
+ }
+
++static int update_lookup_table(void *data)
++{
++ struct topology_update_data *update;
++
++ if (!data)
++ return -EINVAL;
++
++ /*
++ * Upon topology update, the numa-cpu lookup table needs to be updated
++ * for all threads in the core, including offline CPUs, to ensure that
++ * future hotplug operations respect the cpu-to-node associativity
++ * properly.
++ */
++ for (update = data; update; update = update->next) {
++ int nid, base, j;
++
++ nid = update->new_nid;
++ base = cpu_first_thread_sibling(update->cpu);
++
++ for (j = 0; j < threads_per_core; j++) {
++ update_numa_cpu_lookup_table(base + j, nid);
++ }
++ }
++
++ return 0;
++}
++
+ /*
+ * Update the node maps and sysfs entries for each cpu whose home node
+ * has changed. Returns 1 when the topology has changed, and 0 otherwise.
+@@ -1514,6 +1570,14 @@ int arch_update_cpu_topology(void)
+
+ stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+
++ /*
++ * Update the numa-cpu lookup table with the new mappings, even for
++ * offline CPUs. It is best to perform this update from the stop-
++ * machine context.
++ */
++ stop_machine(update_lookup_table, &updates[0],
++ cpumask_of(raw_smp_processor_id()));
++
+ for (ud = &updates[0]; ud; ud = ud->next) {
+ unregister_cpu_under_node(ud->cpu, ud->old_nid);
+ register_cpu_under_node(ud->cpu, ud->new_nid);
+diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
+index 2345bdb4d917..ebbb2f1408ff 100644
+--- a/arch/powerpc/net/bpf_jit_comp.c
++++ b/arch/powerpc/net/bpf_jit_comp.c
+@@ -209,10 +209,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
+ }
+ PPC_DIVWU(r_A, r_A, r_X);
+ break;
+- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
++ case BPF_S_ALU_DIV_K: /* A /= K */
++ if (K == 1)
++ break;
+ PPC_LI32(r_scratch1, K);
+- /* Top 32 bits of 64bit result -> A */
+- PPC_MULHWU(r_A, r_A, r_scratch1);
++ PPC_DIVWU(r_A, r_A, r_scratch1);
+ break;
+ case BPF_S_ALU_AND_X:
+ ctx->seen |= SEEN_XREG;
+diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
+index a61d538756f2..471eb09184d4 100644
+--- a/arch/s390/include/uapi/asm/statfs.h
++++ b/arch/s390/include/uapi/asm/statfs.h
+@@ -35,11 +35,11 @@ struct statfs {
+ struct statfs64 {
+ unsigned int f_type;
+ unsigned int f_bsize;
+- unsigned long f_blocks;
+- unsigned long f_bfree;
+- unsigned long f_bavail;
+- unsigned long f_files;
+- unsigned long f_ffree;
++ unsigned long long f_blocks;
++ unsigned long long f_bfree;
++ unsigned long long f_bavail;
++ unsigned long long f_files;
++ unsigned long long f_ffree;
+ __kernel_fsid_t f_fsid;
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
+index 3a74d8af0d69..d88e846a58f1 100644
+--- a/arch/s390/kvm/diag.c
++++ b/arch/s390/kvm/diag.c
+@@ -122,7 +122,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
+ * - gpr 4 contains the index on the bus (optionally)
+ */
+ ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
+- vcpu->run->s.regs.gprs[2],
++ vcpu->run->s.regs.gprs[2] & 0xffffffff,
+ 8, &vcpu->run->s.regs.gprs[3],
+ vcpu->run->s.regs.gprs[4]);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+@@ -139,7 +139,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
+
+ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
+ {
+- int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
++ int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
+
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index a5df511e27a2..96a4b150f958 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
+ EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+- /* dr %r4,%r12 */
+- EMIT2(0x1d4c);
++ /* dlr %r4,%r12 */
++ EMIT4(0xb997004c);
+ break;
+- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
+- /* m %r4,<d(K)>(%r13) */
+- EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
+- /* lr %r5,%r4 */
+- EMIT2(0x1854);
++ case BPF_S_ALU_DIV_K: /* A /= K */
++ if (K == 1)
++ break;
++ /* lhi %r4,0 */
++ EMIT4(0xa7480000);
++ /* dl %r4,<d(K)>(%r13) */
++ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
+ break;
+ case BPF_S_ALU_MOD_X: /* A %= X */
+ jit->seen |= SEEN_XREG | SEEN_RET0;
+@@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
+ EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+- /* dr %r4,%r12 */
+- EMIT2(0x1d4c);
++ /* dlr %r4,%r12 */
++ EMIT4(0xb997004c);
+ /* lr %r5,%r4 */
+ EMIT2(0x1854);
+ break;
+ case BPF_S_ALU_MOD_K: /* A %= K */
++ if (K == 1) {
++ /* lhi %r5,0 */
++ EMIT4(0xa7580000);
++ break;
++ }
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+- /* d %r4,<d(K)>(%r13) */
+- EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
++ /* dl %r4,<d(K)>(%r13) */
++ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
+ /* lr %r5,%r4 */
+ EMIT2(0x1854);
+ break;
+diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
+index 218b6b23c378..01fe9946d388 100644
+--- a/arch/sparc/net/bpf_jit_comp.c
++++ b/arch/sparc/net/bpf_jit_comp.c
+@@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp)
+ case BPF_S_ALU_MUL_K: /* A *= K */
+ emit_alu_K(MUL, K);
+ break;
+- case BPF_S_ALU_DIV_K: /* A /= K */
+- emit_alu_K(MUL, K);
+- emit_read_y(r_A);
++ case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/
++ if (K == 1)
++ break;
++ emit_write_y(G0);
++#ifdef CONFIG_SPARC32
++ /* The Sparc v8 architecture requires
++ * three instructions between a %y
++ * register write and the first use.
++ */
++ emit_nop();
++ emit_nop();
++ emit_nop();
++#endif
++ emit_alu_K(DIV, K);
+ break;
+ case BPF_S_ALU_DIV_X: /* A /= X; */
+ emit_cmpi(r_X, 0);
+diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
+index 1df115909758..c7678e43465b 100644
+--- a/arch/x86/include/asm/kvm_para.h
++++ b/arch/x86/include/asm/kvm_para.h
+@@ -85,28 +85,9 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+ return ret;
+ }
+
+-static inline uint32_t kvm_cpuid_base(void)
+-{
+- if (boot_cpu_data.cpuid_level < 0)
+- return 0; /* So we don't blow up on old processors */
+-
+- if (cpu_has_hypervisor)
+- return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
+-
+- return 0;
+-}
+-
+-static inline bool kvm_para_available(void)
+-{
+- return kvm_cpuid_base() != 0;
+-}
+-
+-static inline unsigned int kvm_arch_para_features(void)
+-{
+- return cpuid_eax(KVM_CPUID_FEATURES);
+-}
+-
+ #ifdef CONFIG_KVM_GUEST
++bool kvm_para_available(void);
++unsigned int kvm_arch_para_features(void);
+ void __init kvm_guest_init(void);
+ void kvm_async_pf_task_wait(u32 token);
+ void kvm_async_pf_task_wake(u32 token);
+@@ -126,6 +107,16 @@ static inline void kvm_spinlock_init(void)
+ #define kvm_async_pf_task_wait(T) do {} while(0)
+ #define kvm_async_pf_task_wake(T) do {} while(0)
+
++static inline bool kvm_para_available(void)
++{
++ return 0;
++}
++
++static inline unsigned int kvm_arch_para_features(void)
++{
++ return 0;
++}
++
+ static inline u32 kvm_read_and_reset_pf_reason(void)
+ {
+ return 0;
+diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
+index bb0465090ae5..228d95f6592a 100644
+--- a/arch/x86/include/uapi/asm/msr-index.h
++++ b/arch/x86/include/uapi/asm/msr-index.h
+@@ -182,6 +182,7 @@
+ #define MSR_AMD64_PATCH_LOADER 0xc0010020
+ #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
+ #define MSR_AMD64_OSVW_STATUS 0xc0010141
++#define MSR_AMD64_LS_CFG 0xc0011020
+ #define MSR_AMD64_DC_CFG 0xc0011022
+ #define MSR_AMD64_BU_CFG2 0xc001102a
+ #define MSR_AMD64_IBSFETCHCTL 0xc0011030
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 903a264af981..28233b9e45cc 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -508,6 +508,16 @@ static void early_init_amd(struct cpuinfo_x86 *c)
+ set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
+ }
+ #endif
++
++ /* F16h erratum 793, CVE-2013-6885 */
++ if (c->x86 == 0x16 && c->x86_model <= 0xf) {
++ u64 val;
++
++ rdmsrl(MSR_AMD64_LS_CFG, val);
++ if (!(val & BIT(15)))
++ wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15));
++ }
++
+ }
+
+ static const int amd_erratum_383[];
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index b2046e4d0b59..f022c54a79a4 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -500,6 +500,38 @@ void __init kvm_guest_init(void)
+ #endif
+ }
+
++static noinline uint32_t __kvm_cpuid_base(void)
++{
++ if (boot_cpu_data.cpuid_level < 0)
++ return 0; /* So we don't blow up on old processors */
++
++ if (cpu_has_hypervisor)
++ return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
++
++ return 0;
++}
++
++static inline uint32_t kvm_cpuid_base(void)
++{
++ static int kvm_cpuid_base = -1;
++
++ if (kvm_cpuid_base == -1)
++ kvm_cpuid_base = __kvm_cpuid_base();
++
++ return kvm_cpuid_base;
++}
++
++bool kvm_para_available(void)
++{
++ return kvm_cpuid_base() != 0;
++}
++EXPORT_SYMBOL_GPL(kvm_para_available);
++
++unsigned int kvm_arch_para_features(void)
++{
++ return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
++}
++
+ static uint32_t __init kvm_detect(void)
+ {
+ return kvm_cpuid_base();
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 412a5aa0ef94..518d86471b76 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -37,6 +37,7 @@
+
+ #include "irq.h"
+ #include "i8254.h"
++#include "x86.h"
+
+ #ifndef CONFIG_X86_64
+ #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
+@@ -349,6 +350,23 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
+ atomic_set(&ps->pending, 0);
+ ps->irq_ack = 1;
+
++ /*
++ * Do not allow the guest to program periodic timers with small
++ * interval, since the hrtimers are not throttled by the host
++ * scheduler.
++ */
++ if (ps->is_periodic) {
++ s64 min_period = min_timer_period_us * 1000LL;
++
++ if (ps->period < min_period) {
++ pr_info_ratelimited(
++ "kvm: requested %lld ns "
++ "i8254 timer period limited to %lld ns\n",
++ ps->period, min_period);
++ ps->period = min_period;
++ }
++ }
++
+ hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
+ HRTIMER_MODE_ABS);
+ }
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 1673940cf9c3..d86ff15fc89f 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -71,9 +71,6 @@
+ #define VEC_POS(v) ((v) & (32 - 1))
+ #define REG_POS(v) (((v) >> 5) << 4)
+
+-static unsigned int min_timer_period_us = 500;
+-module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
+-
+ static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
+ {
+ *((u32 *) (apic->regs + reg_off)) = val;
+@@ -1355,7 +1352,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
+ vcpu->arch.apic_base = value;
+
+ /* update jump label if enable bit changes */
+- if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
++ if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
+ if (value & MSR_IA32_APICBASE_ENABLE)
+ static_key_slow_dec_deferred(&apic_hw_disabled);
+ else
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index eb9b9c9fc3d9..16dc55a39fa3 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -94,6 +94,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
+ static bool ignore_msrs = 0;
+ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
+
++unsigned int min_timer_period_us = 500;
++module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
++
+ bool kvm_has_tsc_control;
+ EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
+ u32 kvm_max_guest_tsc_khz;
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index e224f7a671b6..3186542f2fa3 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -124,5 +124,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+
+ extern u64 host_xcr0;
+
++extern unsigned int min_timer_period_us;
++
+ extern struct static_key kvm_no_apic_vcpu;
+ #endif
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 26328e800869..4ed75dd81d05 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp)
+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
+ break;
+ case BPF_S_ALU_MOD_K: /* A %= K; */
++ if (K == 1) {
++ CLEAR_A();
++ break;
++ }
+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
+ EMIT2(0xf7, 0xf1); /* div %ecx */
+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
+ break;
+- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
+- EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
+- EMIT(K, 4);
+- EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
++ case BPF_S_ALU_DIV_K: /* A /= K */
++ if (K == 1)
++ break;
++ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
++ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
++ EMIT2(0xf7, 0xf1); /* div %ecx */
+ break;
+ case BPF_S_ALU_AND_X:
+ seen |= SEEN_XREG;
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 220fa52b9bd0..f19284d87dfe 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -440,7 +440,7 @@ void __init efi_reserve_boot_services(void)
+ * - Not within any part of the kernel
+ * - Not the bios reserved area
+ */
+- if ((start+size >= __pa_symbol(_text)
++ if ((start + size > __pa_symbol(_text)
+ && start <= __pa_symbol(_end)) ||
+ !e820_all_mapped(start, start+size, E820_RAM) ||
+ memblock_is_region_reserved(start, size)) {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 2c2780a19609..f3c361b5c5e5 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2222,6 +2222,16 @@ int ata_dev_configure(struct ata_device *dev)
+ if (rc)
+ return rc;
+
++ /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
++ if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
++ (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
++ dev->horkage |= ATA_HORKAGE_NOLPM;
++
++ if (dev->horkage & ATA_HORKAGE_NOLPM) {
++ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
++ dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
++ }
++
+ /* let ACPI work its magic */
+ rc = ata_acpi_on_devcfg(dev);
+ if (rc)
+@@ -4216,6 +4226,23 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
++ /*
++ * Some WD SATA-I drives spin up and down erratically when the link
++ * is put into the slumber mode. We don't have full list of the
++ * affected devices. Disable LPM if the device matches one of the
++ * known prefixes and is SATA-1. As a side effect LPM partial is
++ * lost too.
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=57211
++ */
++ { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++
+ /* End Marker */
+ { }
+ };
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 377eb889f555..ef8567de6a75 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -111,12 +111,14 @@ static const char *ata_lpm_policy_names[] = {
+ [ATA_LPM_MIN_POWER] = "min_power",
+ };
+
+-static ssize_t ata_scsi_lpm_store(struct device *dev,
++static ssize_t ata_scsi_lpm_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+- struct Scsi_Host *shost = class_to_shost(dev);
++ struct Scsi_Host *shost = class_to_shost(device);
+ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ata_link *link;
++ struct ata_device *dev;
+ enum ata_lpm_policy policy;
+ unsigned long flags;
+
+@@ -132,10 +134,20 @@ static ssize_t ata_scsi_lpm_store(struct device *dev,
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
++
++ ata_for_each_link(link, ap, EDGE) {
++ ata_for_each_dev(dev, &ap->link, ENABLED) {
++ if (dev->horkage & ATA_HORKAGE_NOLPM) {
++ count = -EOPNOTSUPP;
++ goto out_unlock;
++ }
++ }
++ }
++
+ ap->target_lpm_policy = policy;
+ ata_port_schedule_eh(ap);
++out_unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+-
+ return count;
+ }
+
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 56be31819897..dc9d4b1ea4ec 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -304,6 +304,7 @@ enum {
+ MV5_LTMODE = 0x30,
+ MV5_PHY_CTL = 0x0C,
+ SATA_IFCFG = 0x050,
++ LP_PHY_CTL = 0x058,
+
+ MV_M2_PREAMP_MASK = 0x7e0,
+
+@@ -431,6 +432,7 @@ enum {
+ MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
+ MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
+ MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
++ MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */
+
+ /* Port private flags (pp_flags) */
+ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
+@@ -1358,6 +1360,7 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
+
+ if (ofs != 0xffffffffU) {
+ void __iomem *addr = mv_ap_base(link->ap) + ofs;
++ struct mv_host_priv *hpriv = link->ap->host->private_data;
+ if (sc_reg_in == SCR_CONTROL) {
+ /*
+ * Workaround for 88SX60x1 FEr SATA#26:
+@@ -1374,6 +1377,18 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
+ */
+ if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
+ val |= 0xf000;
++
++ if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
++ void __iomem *lp_phy_addr =
++ mv_ap_base(link->ap) + LP_PHY_CTL;
++ /*
++ * Set PHY speed according to SControl speed.
++ */
++ if ((val & 0xf0) == 0x10)
++ writelfl(0x7, lp_phy_addr);
++ else
++ writelfl(0x227, lp_phy_addr);
++ }
+ }
+ writelfl(val, addr);
+ return 0;
+@@ -4110,6 +4125,15 @@ static int mv_platform_probe(struct platform_device *pdev)
+ if (rc)
+ goto err;
+
++ /*
++ * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
++ * updated in the LP_PHY_CTL register.
++ */
++ if (pdev->dev.of_node &&
++ of_device_is_compatible(pdev->dev.of_node,
++ "marvell,armada-370-sata"))
++ hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
++
+ /* initialize adapter */
+ rc = mv_init_host(host);
+ if (rc)
+@@ -4209,6 +4233,7 @@ static int mv_platform_resume(struct platform_device *pdev)
+
+ #ifdef CONFIG_OF
+ static struct of_device_id mv_sata_dt_ids[] = {
++ { .compatible = "marvell,armada-370-sata", },
+ { .compatible = "marvell,orion-sata", },
+ {},
+ };
+diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
+index 5bb8e2ddd3b3..156bd3c72770 100644
+--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
+@@ -410,6 +410,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+ &chip->vendor.read_queue)
+ == 0) {
+ burstcnt = get_burstcount(chip);
++ if (burstcnt < 0)
++ return burstcnt;
+ len = min_t(int, burstcnt, count - size);
+ I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
+ size += len;
+@@ -451,7 +453,8 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
+ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
+ size_t len)
+ {
+- u32 status, burstcnt = 0, i, size;
++ u32 status, i, size;
++ int burstcnt = 0;
+ int ret;
+ u8 data;
+ struct i2c_client *client;
+@@ -482,6 +485,8 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
+
+ for (i = 0; i < len - 1;) {
+ burstcnt = get_burstcount(chip);
++ if (burstcnt < 0)
++ return burstcnt;
+ size = min_t(int, len - i - 1, burstcnt);
+ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
+ if (ret < 0)
+diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
+index 57a818b2b5f2..811ad1e4d802 100644
+--- a/drivers/char/tpm/tpm_ppi.c
++++ b/drivers/char/tpm/tpm_ppi.c
+@@ -172,7 +172,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
+ * is updated with function index from SUBREQ to SUBREQ2 since PPI
+ * version 1.1
+ */
+- if (strcmp(version, "1.1") == -1)
++ if (strcmp(version, "1.1") < 0)
+ params[2].integer.value = TPM_PPI_FN_SUBREQ;
+ else
+ params[2].integer.value = TPM_PPI_FN_SUBREQ2;
+@@ -182,7 +182,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
+ * string/package type. For PPI version 1.0 and 1.1, use buffer type
+ * for compatibility, and use package type since 1.2 according to spec.
+ */
+- if (strcmp(version, "1.2") == -1) {
++ if (strcmp(version, "1.2") < 0) {
+ params[3].type = ACPI_TYPE_BUFFER;
+ params[3].buffer.length = sizeof(req);
+ sscanf(buf, "%d", &req);
+@@ -248,7 +248,7 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
+ * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
+ * compatibility, define params[3].type as buffer, if PPI version < 1.2
+ */
+- if (strcmp(version, "1.2") == -1) {
++ if (strcmp(version, "1.2") < 0) {
+ params[3].type = ACPI_TYPE_BUFFER;
+ params[3].buffer.length = 0;
+ params[3].buffer.pointer = NULL;
+@@ -390,7 +390,7 @@ static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
+ kfree(output.pointer);
+ output.length = ACPI_ALLOCATE_BUFFER;
+ output.pointer = NULL;
+- if (strcmp(version, "1.2") == -1)
++ if (strcmp(version, "1.2") < 0)
+ return -EPERM;
+
+ params[2].integer.value = TPM_PPI_FN_GETOPR;
+diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
+index 644fec54681f..f1e9d7bd131f 100644
+--- a/drivers/edac/e752x_edac.c
++++ b/drivers/edac/e752x_edac.c
+@@ -1182,9 +1182,11 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
+ pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev, pvt->bridge_ck);
+
+- if (pvt->bridge_ck == NULL)
++ if (pvt->bridge_ck == NULL) {
+ pvt->bridge_ck = pci_scan_single_device(pdev->bus,
+ PCI_DEVFN(0, 1));
++ pci_dev_get(pvt->bridge_ck);
++ }
+
+ if (pvt->bridge_ck == NULL) {
+ e752x_printk(KERN_ERR, "error reporting device not found:"
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+index 3dc7a997b795..a36f9c1d265c 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+@@ -365,13 +365,13 @@ static u16
+ init_script(struct nouveau_bios *bios, int index)
+ {
+ struct nvbios_init init = { .bios = bios };
+- u16 data;
++ u16 bmp_ver = bmp_version(bios), data;
+
+- if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
+- if (index > 1)
++ if (bmp_ver && bmp_ver < 0x0510) {
++ if (index > 1 || bmp_ver < 0x0100)
+ return 0x0000;
+
+- data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
++ data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
+ return nv_ro16(bios, data + (index * 2));
+ }
+
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index d65f3fd895dd..758774f4454c 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -210,6 +210,7 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
+ {}
+ };
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index cdcbd8368ed3..ff758eded96f 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -151,6 +151,7 @@ config I2C_PIIX4
+ ATI SB700/SP5100
+ ATI SB800
+ AMD Hudson-2
++ AMD ML
+ AMD CZ
+ Serverworks OSB4
+ Serverworks CSB5
+diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
+index d3e9cc3153a9..6f02eb883e5e 100644
+--- a/drivers/i2c/busses/i2c-mv64xxx.c
++++ b/drivers/i2c/busses/i2c-mv64xxx.c
+@@ -692,6 +692,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
+ { .compatible = "allwinner,sun4i-i2c", .data = &mv64xxx_i2c_regs_sun4i},
+ { .compatible = "marvell,mv64xxx-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
+ { .compatible = "marvell,mv78230-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
++ { .compatible = "marvell,mv78230-a0-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
+@@ -783,6 +784,10 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
+ drv_data->errata_delay = true;
+ }
+
++ if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
++ drv_data->offload_enabled = false;
++ drv_data->errata_delay = true;
++ }
+ out:
+ return rc;
+ #endif
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index a028617b8f13..f71b4d381c00 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -22,7 +22,7 @@
+ Intel PIIX4, 440MX
+ Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
+ ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
+- AMD Hudson-2, CZ
++ AMD Hudson-2, ML, CZ
+ SMSC Victory66
+
+ Note: we assume there can only be one device, with one or more
+@@ -235,7 +235,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ {
+ unsigned short piix4_smba;
+ unsigned short smba_idx = 0xcd6;
+- u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en;
++ u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status;
++ u8 i2ccfg, i2ccfg_offset = 0x10;
+
+ /* SB800 and later SMBus does not support forcing address */
+ if (force || force_addr) {
+@@ -245,7 +246,15 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ }
+
+ /* Determine the address of the SMBus areas */
+- smb_en = (aux) ? 0x28 : 0x2c;
++ if ((PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
++ PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
++ PIIX4_dev->revision >= 0x41) ||
++ (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
++ PIIX4_dev->device == 0x790b &&
++ PIIX4_dev->revision >= 0x49))
++ smb_en = 0x00;
++ else
++ smb_en = (aux) ? 0x28 : 0x2c;
+
+ if (!request_region(smba_idx, 2, "smba_idx")) {
+ dev_err(&PIIX4_dev->dev, "SMBus base address index region "
+@@ -258,13 +267,22 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ smba_en_hi = inb_p(smba_idx + 1);
+ release_region(smba_idx, 2);
+
+- if ((smba_en_lo & 1) == 0) {
++ if (!smb_en) {
++ smb_en_status = smba_en_lo & 0x10;
++ piix4_smba = smba_en_hi << 8;
++ if (aux)
++ piix4_smba |= 0x20;
++ } else {
++ smb_en_status = smba_en_lo & 0x01;
++ piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
++ }
++
++ if (!smb_en_status) {
+ dev_err(&PIIX4_dev->dev,
+ "Host SMBus controller not enabled!\n");
+ return -ENODEV;
+ }
+
+- piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
+ if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
+ return -ENODEV;
+
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index 22d1ae72c282..26fdcac56ad1 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -935,7 +935,7 @@ static void sort_key_next(struct btree_iter *iter,
+ *i = iter->data[--iter->used];
+ }
+
+-static void btree_sort_fixup(struct btree_iter *iter)
++static struct bkey *btree_sort_fixup(struct btree_iter *iter, struct bkey *tmp)
+ {
+ while (iter->used > 1) {
+ struct btree_iter_set *top = iter->data, *i = top + 1;
+@@ -963,9 +963,22 @@ static void btree_sort_fixup(struct btree_iter *iter)
+ } else {
+ /* can't happen because of comparison func */
+ BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+- bch_cut_back(&START_KEY(i->k), top->k);
++
++ if (bkey_cmp(i->k, top->k) < 0) {
++ bkey_copy(tmp, top->k);
++
++ bch_cut_back(&START_KEY(i->k), tmp);
++ bch_cut_front(i->k, top->k);
++ heap_sift(iter, 0, btree_iter_cmp);
++
++ return tmp;
++ } else {
++ bch_cut_back(&START_KEY(i->k), top->k);
++ }
+ }
+ }
++
++ return NULL;
+ }
+
+ static void btree_mergesort(struct btree *b, struct bset *out,
+@@ -973,15 +986,20 @@ static void btree_mergesort(struct btree *b, struct bset *out,
+ bool fixup, bool remove_stale)
+ {
+ struct bkey *k, *last = NULL;
++ BKEY_PADDED(k) tmp;
+ bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+ ? bch_ptr_bad
+ : bch_ptr_invalid;
+
+ while (!btree_iter_end(iter)) {
+ if (fixup && !b->level)
+- btree_sort_fixup(iter);
++ k = btree_sort_fixup(iter, &tmp.k);
++ else
++ k = NULL;
++
++ if (!k)
++ k = bch_btree_iter_next(iter);
+
+- k = bch_btree_iter_next(iter);
+ if (bad(b, k))
+ continue;
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 93174c6ab37c..10e9e46108fd 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2004,6 +2004,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
+ set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
+ } else {
+ if (!uptodate) {
++ set_bit(STRIPE_DEGRADED, &sh->state);
+ set_bit(WriteErrorSeen, &rdev->flags);
+ set_bit(R5_WriteError, &sh->dev[i].flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
+index 522be67b2e68..108453b75ccc 100644
+--- a/drivers/mfd/max77686.c
++++ b/drivers/mfd/max77686.c
+@@ -103,7 +103,7 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
+ max77686->irq_gpio = pdata->irq_gpio;
+ max77686->irq = i2c->irq;
+
+- max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
++ max77686->regmap = devm_regmap_init_i2c(i2c, &max77686_regmap_config);
+ if (IS_ERR(max77686->regmap)) {
+ ret = PTR_ERR(max77686->regmap);
+ dev_err(max77686->dev, "Failed to allocate register map: %d\n",
+diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
+index 0a0448326e9d..98f95614b5b1 100644
+--- a/drivers/misc/mei/hbm.c
++++ b/drivers/misc/mei/hbm.c
+@@ -128,6 +128,17 @@ static bool is_treat_specially_client(struct mei_cl *cl,
+ return false;
+ }
+
++/**
++ * mei_hbm_idle - set hbm to idle state
++ *
++ * @dev: the device structure
++ */
++void mei_hbm_idle(struct mei_device *dev)
++{
++ dev->init_clients_timer = 0;
++ dev->hbm_state = MEI_HBM_IDLE;
++}
++
+ int mei_hbm_start_wait(struct mei_device *dev)
+ {
+ int ret;
+@@ -577,6 +588,14 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+ mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
+ mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
+
++ /* ignore spurious message and prevent reset nesting
++ * hbm is put to idle during system reset
++ */
++ if (dev->hbm_state == MEI_HBM_IDLE) {
++ dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n");
++ return 0;
++ }
++
+ switch (mei_msg->hbm_cmd) {
+ case HOST_START_RES_CMD:
+ version_res = (struct hbm_host_version_response *)mei_msg;
+diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
+index 4ae2e56e404f..210757701207 100644
+--- a/drivers/misc/mei/hbm.h
++++ b/drivers/misc/mei/hbm.h
+@@ -49,6 +49,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
+ hdr->reserved = 0;
+ }
+
++void mei_hbm_idle(struct mei_device *dev);
+ int mei_hbm_start_req(struct mei_device *dev);
+ int mei_hbm_start_wait(struct mei_device *dev);
+ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
+diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
+index 6197018e2f16..3fd43b0e69d1 100644
+--- a/drivers/misc/mei/init.c
++++ b/drivers/misc/mei/init.c
+@@ -139,14 +139,19 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
+ dev->dev_state != MEI_DEV_POWER_DOWN &&
+ dev->dev_state != MEI_DEV_POWER_UP);
+
++ /* we're already in reset, cancel the init timer
++ * if the reset was called due the hbm protocol error
++ * we need to call it before hw start
++ * so the hbm watchdog won't kick in
++ */
++ mei_hbm_idle(dev);
++
+ ret = mei_hw_reset(dev, interrupts_enabled);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n");
+ interrupts_enabled = false;
+- dev->dev_state = MEI_DEV_DISABLED;
+ }
+
+- dev->hbm_state = MEI_HBM_IDLE;
+
+ if (dev->dev_state != MEI_DEV_INITIALIZING &&
+ dev->dev_state != MEI_DEV_POWER_UP) {
+@@ -175,8 +180,6 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
+ memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
+ }
+
+- /* we're already in reset, cancel the init timer */
+- dev->init_clients_timer = 0;
+
+ dev->me_clients_num = 0;
+ dev->rd_msg_hdr = 0;
+@@ -188,6 +191,7 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
+
+ if (!interrupts_enabled) {
+ dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n");
++ dev->dev_state = MEI_DEV_DISABLED;
+ return;
+ }
+
+diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
+index 4b59cb742dee..1b922e971d3e 100644
+--- a/drivers/misc/mei/interrupt.c
++++ b/drivers/misc/mei/interrupt.c
+@@ -536,7 +536,6 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler);
+ *
+ * @work: pointer to the work_struct structure
+ *
+- * NOTE: This function is called by timer interrupt work
+ */
+ void mei_timer(struct work_struct *work)
+ {
+@@ -551,18 +550,24 @@ void mei_timer(struct work_struct *work)
+
+
+ mutex_lock(&dev->device_lock);
+- if (dev->dev_state != MEI_DEV_ENABLED) {
+- if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
+- if (dev->init_clients_timer) {
+- if (--dev->init_clients_timer == 0) {
+- dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n",
+- dev->hbm_state);
+- mei_reset(dev, 1);
+- }
++
++ /* Catch interrupt stalls during HBM init handshake */
++ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
++ dev->hbm_state != MEI_HBM_IDLE) {
++
++ if (dev->init_clients_timer) {
++ if (--dev->init_clients_timer == 0) {
++ dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n",
++ dev->hbm_state);
++ mei_reset(dev, 1);
++ goto out;
+ }
+ }
+- goto out;
+ }
++
++ if (dev->dev_state != MEI_DEV_ENABLED)
++ goto out;
++
+ /*** connect/disconnect timeouts ***/
+ list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
+ if (cl_pos->timer_count) {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 4ab4c89c60cd..0399458e6d44 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ struct sk_buff *skb = tx_buf->skb;
+ u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+ int nbd;
++ u16 split_bd_len = 0;
+
+ /* prefetch skb end pointer to speedup dev_kfree_skb() */
+ prefetch(&skb->end);
+@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ txdata->txq_index, idx, tx_buf, skb);
+
+- /* unmap first bd */
+ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
+- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
+
+ nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+ #ifdef BNX2X_STOP_ON_ERROR
+@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+- /* ...and the TSO split header bd since they have no mapping */
++ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
+ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
++ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
++ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
++ /* unmap first bd */
++ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
++ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
++ DMA_TO_DEVICE);
++
+ /* now free frags */
+ while (nbd > 0) {
+
+diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
+index da2be59505c0..20e71f4ca426 100644
+--- a/drivers/net/ethernet/intel/e1000e/phy.c
++++ b/drivers/net/ethernet/intel/e1000e/phy.c
+@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ * it across the board.
+ */
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
+- if (ret_val)
++ if (ret_val) {
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+- udelay(usec_interval);
++ if (usec_interval >= 1000)
++ msleep(usec_interval / 1000);
++ else
++ udelay(usec_interval);
++ }
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & BMSR_LSTATUS)
+ break;
+ if (usec_interval >= 1000)
+- mdelay(usec_interval / 1000);
++ msleep(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
+diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
+index bdf697b184ae..f3ae9d94fe71 100644
+--- a/drivers/net/ethernet/via/via-rhine.c
++++ b/drivers/net/ethernet/via/via-rhine.c
+@@ -1615,6 +1615,7 @@ static void rhine_reset_task(struct work_struct *work)
+ goto out_unlock;
+
+ napi_disable(&rp->napi);
++ netif_tx_disable(dev);
+ spin_lock_bh(&rp->lock);
+
+ /* clear all descriptors */
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 8494bb53ebdc..aba04f561760 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1245,7 +1245,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
+ return -ENOMEM;
+
+ urb->num_sgs = num_sgs;
+- sg_init_table(urb->sg, urb->num_sgs);
++ sg_init_table(urb->sg, urb->num_sgs + 1);
+
+ sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
+ total_len += skb_headlen(skb);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 146236891889..32c45c3d820d 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1051,6 +1051,15 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ if (!vs)
+ goto drop;
+
++ /* If the NIC driver gave us an encapsulated packet
++ * with the encapsulation mark, the device checksummed it
++ * for us. Otherwise force the upper layers to verify it.
++ */
++ if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation)
++ skb->ip_summed = CHECKSUM_NONE;
++
++ skb->encapsulation = 0;
++
+ vs->rcv(vs, skb, vxh->vx_vni);
+ return 0;
+
+@@ -1109,17 +1118,6 @@ static void vxlan_rcv(struct vxlan_sock *vs,
+
+ skb_reset_network_header(skb);
+
+- /* If the NIC driver gave us an encapsulated packet with
+- * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
+- * leave the CHECKSUM_UNNECESSARY, the device checksummed it
+- * for us. Otherwise force the upper layers to verify it.
+- */
+- if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
+- !(vxlan->dev->features & NETIF_F_RXCSUM))
+- skb->ip_summed = CHECKSUM_NONE;
+-
+- skb->encapsulation = 0;
+-
+ if (oip6)
+ err = IP6_ECN_decapsulate(oip6, skb);
+ if (oip)
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index ecc6ec4a1edb..4e0a9429e192 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -2608,13 +2608,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
+ ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+ pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
+
+- /*
+- * Fast channel change across bands is available
+- * only for AR9462 and AR9565.
+- */
+- if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+- pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index ab9e3a8410bc..a1ab4ff46818 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -848,20 +848,15 @@ static int ath9k_process_rate(struct ath_common *common,
+ enum ieee80211_band band;
+ unsigned int i = 0;
+ struct ath_softc __maybe_unused *sc = common->priv;
++ struct ath_hw *ah = sc->sc_ah;
+
+- band = hw->conf.chandef.chan->band;
++ band = ah->curchan->chan->band;
+ sband = hw->wiphy->bands[band];
+
+- switch (hw->conf.chandef.width) {
+- case NL80211_CHAN_WIDTH_5:
++ if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_5MHZ;
+- break;
+- case NL80211_CHAN_WIDTH_10:
++ else if (IS_CHAN_HALF_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_10MHZ;
+- break;
+- default:
+- break;
+- }
+
+ if (rx_stats->rs_rate & 0x80) {
+ /* HT rate */
+@@ -1175,6 +1170,14 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ ath_start_rx_poll(sc, 3);
+ }
+
++ /*
++ * This shouldn't happen, but have a safety check anyway.
++ */
++ if (WARN_ON(!ah->curchan)) {
++ ret = -EINVAL;
++ goto exit;
++ }
++
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
+ ret =-EINVAL;
+ goto exit;
+@@ -1182,8 +1185,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+
+ ath9k_process_rssi(common, hw, rx_stats, rx_status);
+
+- rx_status->band = hw->conf.chandef.chan->band;
+- rx_status->freq = hw->conf.chandef.chan->center_freq;
++ rx_status->band = ah->curchan->chan->band;
++ rx_status->freq = ah->curchan->chan->center_freq;
+ rx_status->antenna = rx_stats->rs_antenna;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
+
+diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
+index 7f3d461f7e8d..54376fddfaf9 100644
+--- a/drivers/net/wireless/b43/b43.h
++++ b/drivers/net/wireless/b43/b43.h
+@@ -731,8 +731,6 @@ enum b43_firmware_file_type {
+ struct b43_request_fw_context {
+ /* The device we are requesting the fw for. */
+ struct b43_wldev *dev;
+- /* a completion event structure needed if this call is asynchronous */
+- struct completion fw_load_complete;
+ /* a pointer to the firmware object */
+ const struct firmware *blob;
+ /* The type of firmware to request. */
+@@ -809,6 +807,8 @@ enum {
+ struct b43_wldev {
+ struct b43_bus_dev *dev;
+ struct b43_wl *wl;
++ /* a completion event structure needed if this call is asynchronous */
++ struct completion fw_load_complete;
+
+ /* The device initialization status.
+ * Use b43_status() to query. */
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index ccd24f0acb8d..c75237eb55a1 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -2070,6 +2070,7 @@ void b43_do_release_fw(struct b43_firmware_file *fw)
+
+ static void b43_release_firmware(struct b43_wldev *dev)
+ {
++ complete(&dev->fw_load_complete);
+ b43_do_release_fw(&dev->fw.ucode);
+ b43_do_release_fw(&dev->fw.pcm);
+ b43_do_release_fw(&dev->fw.initvals);
+@@ -2095,7 +2096,7 @@ static void b43_fw_cb(const struct firmware *firmware, void *context)
+ struct b43_request_fw_context *ctx = context;
+
+ ctx->blob = firmware;
+- complete(&ctx->fw_load_complete);
++ complete(&ctx->dev->fw_load_complete);
+ }
+
+ int b43_do_request_fw(struct b43_request_fw_context *ctx,
+@@ -2142,7 +2143,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
+ }
+ if (async) {
+ /* do this part asynchronously */
+- init_completion(&ctx->fw_load_complete);
++ init_completion(&ctx->dev->fw_load_complete);
+ err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
+ ctx->dev->dev->dev, GFP_KERNEL,
+ ctx, b43_fw_cb);
+@@ -2150,12 +2151,11 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
+ pr_err("Unable to load firmware\n");
+ return err;
+ }
+- /* stall here until fw ready */
+- wait_for_completion(&ctx->fw_load_complete);
++ wait_for_completion(&ctx->dev->fw_load_complete);
+ if (ctx->blob)
+ goto fw_ready;
+ /* On some ARM systems, the async request will fail, but the next sync
+- * request works. For this reason, we dall through here
++ * request works. For this reason, we fall through here
+ */
+ }
+ err = request_firmware(&ctx->blob, ctx->fwname,
+@@ -2424,6 +2424,7 @@ error:
+
+ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl);
+ static void b43_one_core_detach(struct b43_bus_dev *dev);
++static int b43_rng_init(struct b43_wl *wl);
+
+ static void b43_request_firmware(struct work_struct *work)
+ {
+@@ -2475,6 +2476,10 @@ start_ieee80211:
+ goto err_one_core_detach;
+ wl->hw_registred = true;
+ b43_leds_register(wl->current_dev);
++
++ /* Register HW RNG driver */
++ b43_rng_init(wl);
++
+ goto out;
+
+ err_one_core_detach:
+@@ -4636,9 +4641,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
+ if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
+ return;
+
+- /* Unregister HW RNG driver */
+- b43_rng_exit(dev->wl);
+-
+ b43_set_status(dev, B43_STAT_UNINIT);
+
+ /* Stop the microcode PSM. */
+@@ -4795,9 +4797,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
+
+ b43_set_status(dev, B43_STAT_INITIALIZED);
+
+- /* Register HW RNG driver */
+- b43_rng_init(dev->wl);
+-
+ out:
+ return err;
+
+@@ -5464,6 +5463,9 @@ static void b43_bcma_remove(struct bcma_device *core)
+
+ b43_one_core_detach(wldev->dev);
+
++ /* Unregister HW RNG driver */
++ b43_rng_exit(wl);
++
+ b43_leds_unregister(wl);
+
+ ieee80211_free_hw(wl->hw);
+@@ -5541,6 +5543,9 @@ static void b43_ssb_remove(struct ssb_device *sdev)
+
+ b43_one_core_detach(dev);
+
++ /* Unregister HW RNG driver */
++ b43_rng_exit(wl);
++
+ if (list_empty(&wl->devlist)) {
+ b43_leds_unregister(wl);
+ /* Last core on the chip unregistered.
+diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
+index 8cb206a89083..e85d34b76039 100644
+--- a/drivers/net/wireless/b43/xmit.c
++++ b/drivers/net/wireless/b43/xmit.c
+@@ -821,10 +821,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
+ * channel number in b43. */
+ if (chanstat & B43_RX_CHAN_5GHZ) {
+ status.band = IEEE80211_BAND_5GHZ;
+- status.freq = b43_freq_to_channel_5ghz(chanid);
++ status.freq = b43_channel_to_freq_5ghz(chanid);
+ } else {
+ status.band = IEEE80211_BAND_2GHZ;
+- status.freq = b43_freq_to_channel_2ghz(chanid);
++ status.freq = b43_channel_to_freq_2ghz(chanid);
+ }
+ break;
+ default:
+diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
+index 572668821862..349c77605231 100644
+--- a/drivers/net/wireless/b43legacy/main.c
++++ b/drivers/net/wireless/b43legacy/main.c
+@@ -3919,6 +3919,7 @@ static void b43legacy_remove(struct ssb_device *dev)
+ * as the ieee80211 unreg will destroy the workqueue. */
+ cancel_work_sync(&wldev->restart_work);
+ cancel_work_sync(&wl->firmware_load);
++ complete(&wldev->fw_load_complete);
+
+ B43legacy_WARN_ON(!wl);
+ if (!wldev->fw.ucode)
+diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
+index ff8cc75c189d..4e6dfbafc2a1 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
++++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
+@@ -272,4 +272,8 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
+
+ /*********************** END TX SCHEDULER *************************************/
+
++/* Oscillator clock */
++#define OSC_CLK (0xa04068)
++#define OSC_CLK_FORCE_CONTROL (0x8)
++
+ #endif /* __iwl_prph_h__ */
+diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
+index 2fcc8ef88a68..1fd08baa0d32 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
+@@ -446,6 +446,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+
+ out_unregister:
+ ieee80211_unregister_hw(mvm->hw);
++ iwl_mvm_leds_exit(mvm);
+ out_free:
+ iwl_phy_db_free(mvm->phy_db);
+ kfree(mvm->scan_cmd);
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 6bc31003a32c..7bdaf06b8f5a 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -206,6 +206,28 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
+ goto out;
+ }
+
++ if (trans->cfg->host_interrupt_operation_mode) {
++ /*
++ * This is a bit of an abuse - This is needed for 7260 / 3160
++ * only check host_interrupt_operation_mode even if this is
++ * not related to host_interrupt_operation_mode.
++ *
++ * Enable the oscillator to count wake up time for L1 exit. This
++ * consumes slightly more power (100uA) - but allows to be sure
++ * that we wake up from L1 on time.
++ *
++ * This looks weird: read twice the same register, discard the
++ * value, set a bit, and yet again, read that same register
++ * just to discard the value. But that's the way the hardware
++ * seems to like it.
++ */
++ iwl_read_prph(trans, OSC_CLK);
++ iwl_read_prph(trans, OSC_CLK);
++ iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
++ iwl_read_prph(trans, OSC_CLK);
++ iwl_read_prph(trans, OSC_CLK);
++ }
++
+ /*
+ * Enable DMA clock and wait for it to stabilize.
+ *
+diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
+index f80f30b6160e..14f63c9f6220 100644
+--- a/drivers/net/wireless/mwifiex/fw.h
++++ b/drivers/net/wireless/mwifiex/fw.h
+@@ -226,7 +226,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
+
+ /* HW_SPEC fw_cap_info */
+
+-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14)))
++#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
+
+ #define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
+ #define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
+diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
+index 8cf7d50a7603..ba48e64673d9 100644
+--- a/drivers/net/wireless/mwifiex/scan.c
++++ b/drivers/net/wireless/mwifiex/scan.c
+@@ -1681,7 +1681,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ const u8 *ie_buf;
+ size_t ie_len;
+ u16 channel = 0;
+- u64 fw_tsf = 0;
++ __le64 fw_tsf = 0;
+ u16 beacon_size = 0;
+ u32 curr_bcn_bytes;
+ u32 freq;
+@@ -1815,7 +1815,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ ie_buf, ie_len, rssi, GFP_KERNEL);
+ bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+ bss_priv->band = band;
+- bss_priv->fw_tsf = fw_tsf;
++ bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
+ if (priv->media_connected &&
+ !memcmp(bssid,
+ priv->curr_bss_params.bss_descriptor
+diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
+index 9fa90a252b96..e99d8b1aa3bd 100644
+--- a/drivers/net/wireless/rtlwifi/base.c
++++ b/drivers/net/wireless/rtlwifi/base.c
+@@ -1437,7 +1437,8 @@ void rtl_watchdog_wq_callback(void *data)
+ /* if we can't recv beacon for 6s, we should
+ * reconnect this AP
+ */
+- if (rtlpriv->link_info.roam_times >= 3) {
++ if ((rtlpriv->link_info.roam_times >= 3) &&
++ !is_zero_ether_addr(rtlpriv->mac80211.bssid)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "AP off, try to reconnect now\n");
+ rtlpriv->link_info.roam_times = 0;
+diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
+index 733b7ce7f0e2..3550e8ae34eb 100644
+--- a/drivers/net/wireless/rtlwifi/core.c
++++ b/drivers/net/wireless/rtlwifi/core.c
+@@ -46,10 +46,20 @@ void rtl_fw_cb(const struct firmware *firmware, void *context)
+ "Firmware callback routine entered!\n");
+ complete(&rtlpriv->firmware_loading_complete);
+ if (!firmware) {
++ if (rtlpriv->cfg->alt_fw_name) {
++ err = request_firmware(&firmware,
++ rtlpriv->cfg->alt_fw_name,
++ rtlpriv->io.dev);
++ pr_info("Loading alternative firmware %s\n",
++ rtlpriv->cfg->alt_fw_name);
++ if (!err)
++ goto found_alt;
++ }
+ pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ rtlpriv->max_fw_size = 0;
+ return;
+ }
++found_alt:
+ if (firmware->size > rtlpriv->max_fw_size) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Firmware is too big!\n");
+@@ -184,6 +194,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS]);
+ }
++ mac->link_state = MAC80211_LINKED;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+index 21a5cf060677..a6184b6e1d57 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
++++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+@@ -1078,7 +1078,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
+ rtldm->swing_flag_ofdm = true;
+ }
+
+- if (rtldm->swing_idx_cck != rtldm->swing_idx_cck) {
++ if (rtldm->swing_idx_cck_cur != rtldm->swing_idx_cck) {
+ rtldm->swing_idx_cck_cur = rtldm->swing_idx_cck;
+ rtldm->swing_flag_cck = true;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+index d2d57a27a7c1..06cb94cf5085 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+@@ -158,6 +158,42 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
+ };
+
++static u32 power_index_reg[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a};
++
++void dm_restorepowerindex(struct ieee80211_hw *hw)
++{
++ struct rtl_priv *rtlpriv = rtl_priv(hw);
++ u8 index;
++
++ for (index = 0; index < 6; index++)
++ rtl_write_byte(rtlpriv, power_index_reg[index],
++ rtlpriv->dm.powerindex_backup[index]);
++}
++EXPORT_SYMBOL_GPL(dm_restorepowerindex);
++
++void dm_writepowerindex(struct ieee80211_hw *hw, u8 value)
++{
++ struct rtl_priv *rtlpriv = rtl_priv(hw);
++ u8 index;
++
++ for (index = 0; index < 6; index++)
++ rtl_write_byte(rtlpriv, power_index_reg[index], value);
++}
++EXPORT_SYMBOL_GPL(dm_writepowerindex);
++
++void dm_savepowerindex(struct ieee80211_hw *hw)
++{
++ struct rtl_priv *rtlpriv = rtl_priv(hw);
++ u8 index;
++ u8 tmp;
++
++ for (index = 0; index < 6; index++) {
++ tmp = rtl_read_byte(rtlpriv, power_index_reg[index]);
++ rtlpriv->dm.powerindex_backup[index] = tmp;
++ }
++}
++EXPORT_SYMBOL_GPL(dm_savepowerindex);
++
+ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+index 518e208c0180..4f232a063636 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+@@ -91,6 +91,17 @@
+ #define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+ #define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+
++#define DYNAMIC_FUNC_DISABLE 0x0
++#define DYNAMIC_FUNC_DIG BIT(0)
++#define DYNAMIC_FUNC_HP BIT(1)
++#define DYNAMIC_FUNC_SS BIT(2) /*Tx Power Tracking*/
++#define DYNAMIC_FUNC_BT BIT(3)
++#define DYNAMIC_FUNC_ANT_DIV BIT(4)
++
++#define RSSI_CCK 0
++#define RSSI_OFDM 1
++#define RSSI_DEFAULT 2
++
+ struct swat_t {
+ u8 failure_cnt;
+ u8 try_flag;
+@@ -167,5 +178,8 @@ void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw);
+ void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw);
++void dm_savepowerindex(struct ieee80211_hw *hw);
++void dm_writepowerindex(struct ieee80211_hw *hw, u8 value);
++void dm_restorepowerindex(struct ieee80211_hw *hw);
+
+ #endif
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+index 16a0b9e59acf..c16209a336ea 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+@@ -101,6 +101,15 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
+ rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
++ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
++ TXHIGHPWRLEVEL_NORMAL)
++ dm_restorepowerindex(hw);
++ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
++ TXHIGHPWRLEVEL_LEVEL1)
++ dm_writepowerindex(hw, 0x14);
++ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
++ TXHIGHPWRLEVEL_LEVEL2)
++ dm_writepowerindex(hw, 0x10);
+ }
+
+ rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
+index d947e7d350bb..fafa6bac2a3f 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
+@@ -30,3 +30,6 @@
+ #include "../rtl8192ce/dm.h"
+
+ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
++void dm_savepowerindex(struct ieee80211_hw *hw);
++void dm_writepowerindex(struct ieee80211_hw *hw, u8 value);
++void dm_restorepowerindex(struct ieee80211_hw *hw);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+index 2119313a737b..b878d56d2f4d 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+@@ -85,17 +85,15 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ if (mac->act_scanning) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+- if (turbo_scanoff) {
+- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+- tx_agc[idx1] = ppowerlevel[idx1] |
+- (ppowerlevel[idx1] << 8) |
+- (ppowerlevel[idx1] << 16) |
+- (ppowerlevel[idx1] << 24);
+- if (rtlhal->interface == INTF_USB) {
+- if (tx_agc[idx1] > 0x20 &&
+- rtlefuse->external_pa)
+- tx_agc[idx1] = 0x20;
+- }
++ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
++ tx_agc[idx1] = ppowerlevel[idx1] |
++ (ppowerlevel[idx1] << 8) |
++ (ppowerlevel[idx1] << 16) |
++ (ppowerlevel[idx1] << 24);
++ if (rtlhal->interface == INTF_USB) {
++ if (tx_agc[idx1] > 0x20 &&
++ rtlefuse->external_pa)
++ tx_agc[idx1] = 0x20;
+ }
+ }
+ } else {
+@@ -107,7 +105,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ TXHIGHPWRLEVEL_LEVEL2) {
+ tx_agc[RF90_PATH_A] = 0x00000000;
+ tx_agc[RF90_PATH_B] = 0x00000000;
+- } else{
++ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+@@ -373,7 +371,12 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ regoffset == RTXAGC_B_MCS07_MCS04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+- writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
++ if (i != 2)
++ writeVal = (writeVal > 8) ?
++ (writeVal - 8) : 0;
++ else
++ writeVal = (writeVal > 6) ?
++ (writeVal - 6) : 0;
+ rtl_write_byte(rtlpriv, (u32)(regoffset + i),
+ (u8)writeVal);
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 2bd598526217..8188dcb512f0 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -49,6 +49,9 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
+ MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
++MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
++MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
++MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
+
+ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+ {
+@@ -68,14 +71,21 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+ "Can't alloc buffer for fw\n");
+ return 1;
+ }
+-
++ if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
++ !IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
++ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
++ } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
++ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
++ } else {
++ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
++ }
++ /* provide name of alternative file */
++ rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
+ pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ rtlpriv->max_fw_size = 0x4000;
+ err = request_firmware_nowait(THIS_MODULE, 1,
+ rtlpriv->cfg->fw_name, rtlpriv->io.dev,
+ GFP_KERNEL, hw, rtl_fw_cb);
+-
+-
+ return err;
+ }
+
+@@ -306,6 +316,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
++ {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
+ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
+ /* HP - Lite-On ,8188CUS Slim Combo */
+diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c
+index 8ed31744a054..4f083fc1d360 100644
+--- a/drivers/net/wireless/rtlwifi/stats.c
++++ b/drivers/net/wireless/rtlwifi/stats.c
+@@ -176,6 +176,7 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+ struct rtl_sta_info *drv_priv = NULL;
+ struct ieee80211_sta *sta = NULL;
+ long undec_sm_pwdb;
++ long undec_sm_cck;
+
+ rcu_read_lock();
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+@@ -185,12 +186,16 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+ if (sta) {
+ drv_priv = (struct rtl_sta_info *) sta->drv_priv;
+ undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
++ undec_sm_cck = drv_priv->rssi_stat.undec_sm_cck;
+ } else {
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
++ undec_sm_cck = rtlpriv->dm.undec_sm_cck;
+ }
+
+ if (undec_sm_pwdb < 0)
+ undec_sm_pwdb = pstatus->rx_pwdb_all;
++ if (undec_sm_cck < 0)
++ undec_sm_cck = pstatus->rx_pwdb_all;
+ if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) {
+ undec_sm_pwdb = (((undec_sm_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+@@ -200,6 +205,15 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+ undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
++ if (pstatus->rx_pwdb_all > (u32) undec_sm_cck) {
++ undec_sm_cck = (((undec_sm_pwdb) *
++ (RX_SMOOTH_FACTOR - 1)) +
++ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
++ undec_sm_cck = undec_sm_cck + 1;
++ } else {
++ undec_sm_pwdb = (((undec_sm_cck) * (RX_SMOOTH_FACTOR - 1)) +
++ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
++ }
+
+ if (sta) {
+ drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb;
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index e56778cac9bf..97924743ecf6 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -484,6 +484,8 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
+ if (unicast)
+ rtlpriv->link_info.num_rx_inperiod++;
+ }
++ /* static bcn for roaming */
++ rtl_beacon_statistic(hw, skb);
+ }
+ }
+
+@@ -555,7 +557,7 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+ }
+ }
+
+-#define __RX_SKB_MAX_QUEUED 32
++#define __RX_SKB_MAX_QUEUED 64
+
+ static void _rtl_rx_work(unsigned long param)
+ {
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index ad9c37a42709..e576a927fde7 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -1035,6 +1035,7 @@ struct rtl_ht_agg {
+
+ struct rssi_sta {
+ long undec_sm_pwdb;
++ long undec_sm_cck;
+ };
+
+ struct rtl_tid_data {
+@@ -1325,8 +1326,10 @@ struct fast_ant_training {
+ struct rtl_dm {
+ /*PHY status for Dynamic Management */
+ long entry_min_undec_sm_pwdb;
++ long undec_sm_cck;
+ long undec_sm_pwdb; /*out dm */
+ long entry_max_undec_sm_pwdb;
++ s32 ofdm_pkt_cnt;
+ bool dm_initialgain_enable;
+ bool dynamic_txpower_enable;
+ bool current_turbo_edca;
+@@ -1341,6 +1344,7 @@ struct rtl_dm {
+ bool inform_fw_driverctrldm;
+ bool current_mrc_switch;
+ u8 txpowercount;
++ u8 powerindex_backup[6];
+
+ u8 thermalvalue_rxgain;
+ u8 thermalvalue_iqk;
+@@ -1352,7 +1356,9 @@ struct rtl_dm {
+ bool done_txpower;
+ u8 dynamic_txhighpower_lvl; /*Tx high power level */
+ u8 dm_flag; /*Indicate each dynamic mechanism's status. */
++ u8 dm_flag_tmp;
+ u8 dm_type;
++ u8 dm_rssi_sel;
+ u8 txpower_track_control;
+ bool interrupt_migration;
+ bool disable_tx_int;
+@@ -1806,6 +1812,7 @@ struct rtl_hal_cfg {
+ bool write_readback;
+ char *name;
+ char *fw_name;
++ char *alt_fw_name;
+ struct rtl_hal_ops *ops;
+ struct rtl_mod_params *mod_params;
+ struct rtl_hal_usbint_cfg *usb_interface_cfg;
+@@ -1950,6 +1957,7 @@ struct dig_t {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+ u8 large_fa_hit;
++ u8 dig_dynamic_min;
+ u8 forbidden_igi;
+ u8 dig_state;
+ u8 dig_highpwrstate;
+@@ -2030,22 +2038,15 @@ struct rtl_priv {
+ struct dig_t dm_digtable;
+ struct ps_t dm_pstable;
+
+- /* section shared by individual drivers */
+- union {
+- struct { /* data buffer pointer for USB reads */
+- __le32 *usb_data;
+- int usb_data_index;
+- bool initialized;
+- };
+- struct { /* section for 8723ae */
+- bool reg_init; /* true if regs saved */
+- u32 reg_874;
+- u32 reg_c70;
+- u32 reg_85c;
+- u32 reg_a74;
+- bool bt_operation_on;
+- };
+- };
++ u32 reg_874;
++ u32 reg_c70;
++ u32 reg_85c;
++ u32 reg_a74;
++ bool reg_init; /* true if regs saved */
++ bool bt_operation_on;
++ __le32 *usb_data;
++ int usb_data_index;
++ bool initialized;
+ bool enter_ps; /* true when entering PS */
+ u8 rate_mask[5];
+
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 36808bf25677..3f0f20081979 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -117,6 +117,7 @@ struct netfront_info {
+ } tx_skbs[NET_TX_RING_SIZE];
+ grant_ref_t gref_tx_head;
+ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
++ struct page *grant_tx_page[NET_TX_RING_SIZE];
+ unsigned tx_skb_freelist;
+
+ spinlock_t rx_lock ____cacheline_aligned_in_smp;
+@@ -395,6 +396,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
+ gnttab_release_grant_reference(
+ &np->gref_tx_head, np->grant_tx_ref[id]);
+ np->grant_tx_ref[id] = GRANT_INVALID_REF;
++ np->grant_tx_page[id] = NULL;
+ add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
+ dev_kfree_skb_irq(skb);
+ }
+@@ -451,6 +453,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
++ np->grant_tx_page[id] = virt_to_page(data);
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = len;
+@@ -496,6 +499,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
++ np->grant_tx_page[id] = page;
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = bytes;
+@@ -595,6 +599,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ mfn = virt_to_mfn(data);
+ gnttab_grant_foreign_access_ref(
+ ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
++ np->grant_tx_page[id] = virt_to_page(data);
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = len;
+@@ -1119,10 +1124,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
+ continue;
+
+ skb = np->tx_skbs[i].skb;
+- gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
+- GNTMAP_readonly);
+- gnttab_release_grant_reference(&np->gref_tx_head,
+- np->grant_tx_ref[i]);
++ get_page(np->grant_tx_page[i]);
++ gnttab_end_foreign_access(np->grant_tx_ref[i],
++ GNTMAP_readonly,
++ (unsigned long)page_address(np->grant_tx_page[i]));
++ np->grant_tx_page[i] = NULL;
+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
+ dev_kfree_skb_irq(skb);
+@@ -1131,78 +1137,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
+
+ static void xennet_release_rx_bufs(struct netfront_info *np)
+ {
+- struct mmu_update *mmu = np->rx_mmu;
+- struct multicall_entry *mcl = np->rx_mcl;
+- struct sk_buff_head free_list;
+- struct sk_buff *skb;
+- unsigned long mfn;
+- int xfer = 0, noxfer = 0, unused = 0;
+ int id, ref;
+
+- dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
+- __func__);
+- return;
+-
+- skb_queue_head_init(&free_list);
+-
+ spin_lock_bh(&np->rx_lock);
+
+ for (id = 0; id < NET_RX_RING_SIZE; id++) {
+- ref = np->grant_rx_ref[id];
+- if (ref == GRANT_INVALID_REF) {
+- unused++;
+- continue;
+- }
++ struct sk_buff *skb;
++ struct page *page;
+
+ skb = np->rx_skbs[id];
+- mfn = gnttab_end_foreign_transfer_ref(ref);
+- gnttab_release_grant_reference(&np->gref_rx_head, ref);
+- np->grant_rx_ref[id] = GRANT_INVALID_REF;
+-
+- if (0 == mfn) {
+- skb_shinfo(skb)->nr_frags = 0;
+- dev_kfree_skb(skb);
+- noxfer++;
++ if (!skb)
+ continue;
+- }
+
+- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+- /* Remap the page. */
+- const struct page *page =
+- skb_frag_page(&skb_shinfo(skb)->frags[0]);
+- unsigned long pfn = page_to_pfn(page);
+- void *vaddr = page_address(page);
++ ref = np->grant_rx_ref[id];
++ if (ref == GRANT_INVALID_REF)
++ continue;
+
+- MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
+- mfn_pte(mfn, PAGE_KERNEL),
+- 0);
+- mcl++;
+- mmu->ptr = ((u64)mfn << PAGE_SHIFT)
+- | MMU_MACHPHYS_UPDATE;
+- mmu->val = pfn;
+- mmu++;
++ page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
+
+- set_phys_to_machine(pfn, mfn);
+- }
+- __skb_queue_tail(&free_list, skb);
+- xfer++;
+- }
+-
+- dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
+- __func__, xfer, noxfer, unused);
++ /* gnttab_end_foreign_access() needs a page ref until
++ * foreign access is ended (which may be deferred).
++ */
++ get_page(page);
++ gnttab_end_foreign_access(ref, 0,
++ (unsigned long)page_address(page));
++ np->grant_rx_ref[id] = GRANT_INVALID_REF;
+
+- if (xfer) {
+- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+- /* Do all the remapping work and M2P updates. */
+- MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
+- NULL, DOMID_SELF);
+- mcl++;
+- HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
+- }
++ kfree_skb(skb);
+ }
+
+- __skb_queue_purge(&free_list);
+-
+ spin_unlock_bh(&np->rx_lock);
+ }
+
+@@ -1349,6 +1312,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
+ for (i = 0; i < NET_RX_RING_SIZE; i++) {
+ np->rx_skbs[i] = NULL;
+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ np->grant_tx_page[i] = NULL;
+ }
+
+ /* A grant for every tx ring slot */
+diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
+index 903e1285fda0..b0a0d5389f41 100644
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -2596,8 +2596,6 @@ enum parport_pc_pci_cards {
+ syba_2p_epp,
+ syba_1p_ecp,
+ titan_010l,
+- titan_1284p1,
+- titan_1284p2,
+ avlab_1p,
+ avlab_2p,
+ oxsemi_952,
+@@ -2656,8 +2654,6 @@ static struct parport_pc_pci {
+ /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
+ /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
+ /* titan_010l */ { 1, { { 3, -1 }, } },
+- /* titan_1284p1 */ { 1, { { 0, 1 }, } },
+- /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* avlab_1p */ { 1, { { 0, 1}, } },
+ /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
+ /* The Oxford Semi cards are unusual: 954 doesn't support ECP,
+@@ -2673,8 +2669,8 @@ static struct parport_pc_pci {
+ /* netmos_9705 */ { 1, { { 0, -1 }, } },
+ /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
+ /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
+- /* netmos_9805 */ { 1, { { 0, -1 }, } },
+- /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
++ /* netmos_9805 */ { 1, { { 0, 1 }, } },
++ /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* netmos_9901 */ { 1, { { 0, -1 }, } },
+ /* netmos_9865 */ { 1, { { 0, -1 }, } },
+ /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
+@@ -2718,8 +2714,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
+- { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
+- { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
+ /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
+ /* AFAVLAB_TK9902 */
+ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
+diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
+index 119d2ddedfe7..6ebf3067bde4 100644
+--- a/drivers/pinctrl/pinctrl-sunxi.c
++++ b/drivers/pinctrl/pinctrl-sunxi.c
+@@ -469,12 +469,6 @@ static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
+ return val;
+ }
+
+-static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
+- unsigned offset, int value)
+-{
+- return pinctrl_gpio_direction_output(chip->base + offset);
+-}
+-
+ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+ {
+@@ -498,6 +492,13 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
+ spin_unlock_irqrestore(&pctl->lock, flags);
+ }
+
++static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
++ unsigned offset, int value)
++{
++ sunxi_pinctrl_gpio_set(chip, offset, value);
++ return pinctrl_gpio_direction_output(chip->base + offset);
++}
++
+ static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+index a8e43cf70fac..0ed96df20162 100644
+--- a/drivers/platform/x86/hp_accel.c
++++ b/drivers/platform/x86/hp_accel.c
+@@ -77,6 +77,7 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
+ static struct acpi_device_id lis3lv02d_device_ids[] = {
+ {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
+ {"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
++ {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
+ {"", 0},
+ };
+ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
+diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c
+index 8e45b3c4aa2f..3032178bd9e6 100644
+--- a/drivers/rtc/rtc-max8907.c
++++ b/drivers/rtc/rtc-max8907.c
+@@ -51,7 +51,7 @@ static irqreturn_t max8907_irq_handler(int irq, void *data)
+ {
+ struct max8907_rtc *rtc = data;
+
+- regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0);
++ regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0);
+
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+@@ -64,7 +64,7 @@ static void regs_to_tm(u8 *regs, struct rtc_time *tm)
+ bcd2bin(regs[RTC_YEAR1]) - 1900;
+ tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1;
+ tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f);
+- tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07) - 1;
++ tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07);
+ if (regs[RTC_HOUR] & HOUR_12) {
+ tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f);
+ if (tm->tm_hour == 12)
+@@ -88,7 +88,7 @@ static void tm_to_regs(struct rtc_time *tm, u8 *regs)
+ regs[RTC_YEAR1] = bin2bcd(low);
+ regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1);
+ regs[RTC_DATE] = bin2bcd(tm->tm_mday);
+- regs[RTC_WEEKDAY] = tm->tm_wday + 1;
++ regs[RTC_WEEKDAY] = tm->tm_wday;
+ regs[RTC_HOUR] = bin2bcd(tm->tm_hour);
+ regs[RTC_MIN] = bin2bcd(tm->tm_min);
+ regs[RTC_SEC] = bin2bcd(tm->tm_sec);
+@@ -153,7 +153,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ tm_to_regs(&alrm->time, regs);
+
+ /* Disable alarm while we update the target time */
+- ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0);
++ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0);
+ if (ret < 0)
+ return ret;
+
+@@ -163,8 +163,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ return ret;
+
+ if (alrm->enabled)
+- ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL,
+- 0x7f, 0x7f);
++ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x77);
+
+ return ret;
+ }
+diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
+index f8ca7becacca..8ebab9c64be4 100644
+--- a/drivers/scsi/bfa/bfad.c
++++ b/drivers/scsi/bfa/bfad.c
+@@ -1832,7 +1832,7 @@ out:
+ static u32 *
+ bfad_load_fwimg(struct pci_dev *pdev)
+ {
+- if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
++ if (bfa_asic_id_ct2(pdev->device)) {
+ if (bfi_image_ct2_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_ct2,
+ &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
+@@ -1842,12 +1842,14 @@ bfad_load_fwimg(struct pci_dev *pdev)
+ bfad_read_firmware(pdev, &bfi_image_ct,
+ &bfi_image_ct_size, BFAD_FW_FILE_CT);
+ return bfi_image_ct;
+- } else {
++ } else if (bfa_asic_id_cb(pdev->device)) {
+ if (bfi_image_cb_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_cb,
+ &bfi_image_cb_size, BFAD_FW_FILE_CB);
+ return bfi_image_cb;
+ }
++
++ return NULL;
+ }
+
+ static void
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index 74b88efde6ad..b26f1a5cc0ec 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -957,6 +957,10 @@ static void virtscsi_remove(struct virtio_device *vdev)
+ #ifdef CONFIG_PM
+ static int virtscsi_freeze(struct virtio_device *vdev)
+ {
++ struct Scsi_Host *sh = virtio_scsi_host(vdev);
++ struct virtio_scsi *vscsi = shost_priv(sh);
++
++ unregister_hotcpu_notifier(&vscsi->nb);
+ virtscsi_remove_vqs(vdev);
+ return 0;
+ }
+@@ -965,8 +969,17 @@ static int virtscsi_restore(struct virtio_device *vdev)
+ {
+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
++ int err;
++
++ err = virtscsi_init(vdev, vscsi);
++ if (err)
++ return err;
++
++ err = register_hotcpu_notifier(&vscsi->nb);
++ if (err)
++ vdev->config->del_vqs(vdev);
+
+- return virtscsi_init(vdev, vscsi);
++ return err;
+ }
+ #endif
+
+diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+index a0e009717a5a..d6047b9535ae 100644
+--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
++++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+@@ -179,7 +179,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
+ LNET_UNLINK, LNET_INS_AFTER, &me_h);
+ if (rc != 0) {
+ CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n",
+- desc->bd_export->exp_obd->obd_name, xid,
++ desc->bd_import->imp_obd->obd_name, xid,
+ posted_md, rc);
+ break;
+ }
+@@ -189,7 +189,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
+ &desc->bd_mds[posted_md]);
+ if (rc != 0) {
+ CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n",
+- desc->bd_export->exp_obd->obd_name, xid,
++ desc->bd_import->imp_obd->obd_name, xid,
+ posted_md, rc);
+ rc2 = LNetMEUnlink(me_h);
+ LASSERT(rc2 == 0);
+@@ -219,7 +219,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
+ /* Holler if peer manages to touch buffers before he knows the xid */
+ if (desc->bd_md_count != total_md)
+ CWARN("%s: Peer %s touched %d buffers while I registered\n",
+- desc->bd_export->exp_obd->obd_name, libcfs_id2str(peer),
++ desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
+ total_md - desc->bd_md_count);
+ spin_unlock(&desc->bd_lock);
+
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index c812d6c7dc31..e3a005da776b 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -358,6 +358,10 @@ static u8 key_2char2num(u8 hch, u8 lch)
+ return (hex_to_bin(hch) << 4) | hex_to_bin(lch);
+ }
+
++static const struct device_type wlan_type = {
++ .name = "wlan",
++};
++
+ /*
+ * drv_init() - a device potentially for us
+ *
+@@ -393,6 +397,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ padapter->pusb_intf = pusb_intf;
+ usb_set_intfdata(pusb_intf, pnetdev);
+ SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
++ pnetdev->dev.type = &wlan_type;
+ /* step 2. */
+ padapter->dvobj_init = &r8712_usb_dvobj_init;
+ padapter->dvobj_deinit = &r8712_usb_dvobj_deinit;
+diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
+index 4aa5ef54b683..22e117440f94 100644
+--- a/drivers/staging/vt6656/baseband.c
++++ b/drivers/staging/vt6656/baseband.c
+@@ -1464,7 +1464,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
+
+ if( bScanning )
+ { // need Max sensitivity //RSSI -69, -70,....
+- if(pDevice->byBBPreEDIndex == 0) break;
+ pDevice->byBBPreEDIndex = 0;
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
+@@ -1607,7 +1606,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
+
+ if( bScanning )
+ { // need Max sensitivity //RSSI -69, -70, ...
+- if(pDevice->byBBPreEDIndex == 0) break;
+ pDevice->byBBPreEDIndex = 0;
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x24); //CR206(0xCE)
+@@ -1759,7 +1757,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
+ case RF_VT3342A0: //RobertYu:20060627, testing table
+ if( bScanning )
+ { // need Max sensitivity //RSSI -67, -68, ...
+- if(pDevice->byBBPreEDIndex == 0) break;
+ pDevice->byBBPreEDIndex = 0;
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x38); //CR206(0xCE)
+diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
+index 19d3cf451b88..34a24b78c1a7 100644
+--- a/drivers/staging/vt6656/card.c
++++ b/drivers/staging/vt6656/card.c
+@@ -731,7 +731,7 @@ u64 CARDqGetNextTBTT(u64 qwTSF, u16 wBeaconInterval)
+
+ uBeaconInterval = wBeaconInterval * 1024;
+ // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval
+- uLowNextTBTT = ((qwTSF & 0xffffffffU) >> 10) << 10;
++ uLowNextTBTT = ((qwTSF & 0xffffffffULL) >> 10) << 10;
+ uLowRemain = (uLowNextTBTT) % uBeaconInterval;
+ uHighRemain = ((0x80000000 % uBeaconInterval) * 2 * (u32)(qwTSF >> 32))
+ % uBeaconInterval;
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index e06ec4393803..d7ac040e0dc1 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -552,14 +552,14 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
+ size_t index;
+ struct zram_meta *meta;
+
+- flush_work(&zram->free_work);
+-
+ down_write(&zram->init_lock);
+ if (!zram->init_done) {
+ up_write(&zram->init_lock);
+ return;
+ }
+
++ flush_work(&zram->free_work);
++
+ meta = zram->meta;
+ zram->init_done = 0;
+
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index d5c724b317aa..e12f2aab3c87 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -52,7 +52,7 @@
+ static LIST_HEAD(g_tiqn_list);
+ static LIST_HEAD(g_np_list);
+ static DEFINE_SPINLOCK(tiqn_lock);
+-static DEFINE_SPINLOCK(np_lock);
++static DEFINE_MUTEX(np_lock);
+
+ static struct idr tiqn_idr;
+ struct idr sess_idr;
+@@ -307,6 +307,9 @@ bool iscsit_check_np_match(
+ return false;
+ }
+
++/*
++ * Called with mutex np_lock held
++ */
+ static struct iscsi_np *iscsit_get_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+@@ -314,11 +317,10 @@ static struct iscsi_np *iscsit_get_np(
+ struct iscsi_np *np;
+ bool match;
+
+- spin_lock_bh(&np_lock);
+ list_for_each_entry(np, &g_np_list, np_list) {
+- spin_lock(&np->np_thread_lock);
++ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+- spin_unlock(&np->np_thread_lock);
++ spin_unlock_bh(&np->np_thread_lock);
+ continue;
+ }
+
+@@ -330,13 +332,11 @@ static struct iscsi_np *iscsit_get_np(
+ * while iscsi_tpg_add_network_portal() is called.
+ */
+ np->np_exports++;
+- spin_unlock(&np->np_thread_lock);
+- spin_unlock_bh(&np_lock);
++ spin_unlock_bh(&np->np_thread_lock);
+ return np;
+ }
+- spin_unlock(&np->np_thread_lock);
++ spin_unlock_bh(&np->np_thread_lock);
+ }
+- spin_unlock_bh(&np_lock);
+
+ return NULL;
+ }
+@@ -350,16 +350,22 @@ struct iscsi_np *iscsit_add_np(
+ struct sockaddr_in6 *sock_in6;
+ struct iscsi_np *np;
+ int ret;
++
++ mutex_lock(&np_lock);
++
+ /*
+ * Locate the existing struct iscsi_np if already active..
+ */
+ np = iscsit_get_np(sockaddr, network_transport);
+- if (np)
++ if (np) {
++ mutex_unlock(&np_lock);
+ return np;
++ }
+
+ np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
+ if (!np) {
+ pr_err("Unable to allocate memory for struct iscsi_np\n");
++ mutex_unlock(&np_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -382,6 +388,7 @@ struct iscsi_np *iscsit_add_np(
+ ret = iscsi_target_setup_login_socket(np, sockaddr);
+ if (ret != 0) {
+ kfree(np);
++ mutex_unlock(&np_lock);
+ return ERR_PTR(ret);
+ }
+
+@@ -390,6 +397,7 @@ struct iscsi_np *iscsit_add_np(
+ pr_err("Unable to create kthread: iscsi_np\n");
+ ret = PTR_ERR(np->np_thread);
+ kfree(np);
++ mutex_unlock(&np_lock);
+ return ERR_PTR(ret);
+ }
+ /*
+@@ -400,10 +408,10 @@ struct iscsi_np *iscsit_add_np(
+ * point because iscsi_np has not been added to g_np_list yet.
+ */
+ np->np_exports = 1;
++ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+
+- spin_lock_bh(&np_lock);
+ list_add_tail(&np->np_list, &g_np_list);
+- spin_unlock_bh(&np_lock);
++ mutex_unlock(&np_lock);
+
+ pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, np->np_transport->name);
+@@ -470,9 +478,9 @@ int iscsit_del_np(struct iscsi_np *np)
+
+ np->np_transport->iscsit_free_np(np);
+
+- spin_lock_bh(&np_lock);
++ mutex_lock(&np_lock);
+ list_del(&np->np_list);
+- spin_unlock_bh(&np_lock);
++ mutex_unlock(&np_lock);
+
+ pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, np->np_transport->name);
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index a972145dbaaa..76dc32fc5e1b 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -1192,7 +1192,7 @@ get_target:
+ */
+ alloc_tags:
+ tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
+- tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
++ tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
+ tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
+
+ ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 570df9d2a5d2..4f6e01cf67f7 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -2670,6 +2670,10 @@ static void serial8250_config_port(struct uart_port *port, int flags)
+ if (port->type == PORT_16550A && port->iotype == UPIO_AU)
+ up->bugs |= UART_BUG_NOMSR;
+
++ /* HW bugs may trigger IRQ while IIR == NO_INT */
++ if (port->type == PORT_TEGRA)
++ up->bugs |= UART_BUG_NOMSR;
++
+ if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
+ autoconfig_irq(up);
+
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index c810da7c7a88..f5df8b7067ad 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1260,10 +1260,10 @@ static int pci_quatech_init(struct pci_dev *dev)
+ unsigned long base = pci_resource_start(dev, 0);
+ if (base) {
+ u32 tmp;
+- outl(inl(base + 0x38), base + 0x38);
++ outl(inl(base + 0x38) | 0x00002000, base + 0x38);
+ tmp = inl(base + 0x3c);
+ outl(tmp | 0x01000000, base + 0x3c);
+- outl(tmp, base + 0x3c);
++ outl(tmp &= ~0x01000000, base + 0x3c);
+ }
+ }
+ return 0;
+@@ -1545,6 +1545,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_TITAN_800E 0xA014
+ #define PCI_DEVICE_ID_TITAN_200EI 0xA016
+ #define PCI_DEVICE_ID_TITAN_200EISI 0xA017
++#define PCI_DEVICE_ID_TITAN_200V3 0xA306
+ #define PCI_DEVICE_ID_TITAN_400V3 0xA310
+ #define PCI_DEVICE_ID_TITAN_410V3 0xA312
+ #define PCI_DEVICE_ID_TITAN_800V3 0xA314
+@@ -4140,6 +4141,9 @@ static struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
++ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_b0_bt_2_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 6b0f75eac8a2..41bb8387e80d 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -824,9 +824,6 @@ static void atmel_release_rx_dma(struct uart_port *port)
+ atmel_port->desc_rx = NULL;
+ atmel_port->chan_rx = NULL;
+ atmel_port->cookie_rx = -EINVAL;
+-
+- if (!atmel_port->is_usart)
+- del_timer_sync(&atmel_port->uart_timer);
+ }
+
+ static void atmel_rx_from_dma(struct uart_port *port)
+@@ -1228,9 +1225,6 @@ static void atmel_release_rx_pdc(struct uart_port *port)
+ DMA_FROM_DEVICE);
+ kfree(pdc->buf);
+ }
+-
+- if (!atmel_port->is_usart)
+- del_timer_sync(&atmel_port->uart_timer);
+ }
+
+ static void atmel_rx_from_pdc(struct uart_port *port)
+@@ -1587,12 +1581,13 @@ static int atmel_startup(struct uart_port *port)
+ /* enable xmit & rcvr */
+ UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+
++ setup_timer(&atmel_port->uart_timer,
++ atmel_uart_timer_callback,
++ (unsigned long)port);
++
+ if (atmel_use_pdc_rx(port)) {
+ /* set UART timeout */
+ if (!atmel_port->is_usart) {
+- setup_timer(&atmel_port->uart_timer,
+- atmel_uart_timer_callback,
+- (unsigned long)port);
+ mod_timer(&atmel_port->uart_timer,
+ jiffies + uart_poll_timeout(port));
+ /* set USART timeout */
+@@ -1607,9 +1602,6 @@ static int atmel_startup(struct uart_port *port)
+ } else if (atmel_use_dma_rx(port)) {
+ /* set UART timeout */
+ if (!atmel_port->is_usart) {
+- setup_timer(&atmel_port->uart_timer,
+- atmel_uart_timer_callback,
+- (unsigned long)port);
+ mod_timer(&atmel_port->uart_timer,
+ jiffies + uart_poll_timeout(port));
+ /* set USART timeout */
+@@ -1633,12 +1625,30 @@ static int atmel_startup(struct uart_port *port)
+ static void atmel_shutdown(struct uart_port *port)
+ {
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
++
+ /*
+- * Ensure everything is stopped.
++ * Prevent any tasklets being scheduled during
++ * cleanup
++ */
++ del_timer_sync(&atmel_port->uart_timer);
++
++ /*
++ * Clear out any scheduled tasklets before
++ * we destroy the buffers
++ */
++ tasklet_kill(&atmel_port->tasklet);
++
++ /*
++ * Ensure everything is stopped and
++ * disable all interrupts, port and break condition.
+ */
+ atmel_stop_rx(port);
+ atmel_stop_tx(port);
+
++ UART_PUT_CR(port, ATMEL_US_RSTSTA);
++ UART_PUT_IDR(port, -1);
++
++
+ /*
+ * Shut-down the DMA.
+ */
+@@ -1648,10 +1658,10 @@ static void atmel_shutdown(struct uart_port *port)
+ atmel_port->release_tx(port);
+
+ /*
+- * Disable all interrupts, port and break condition.
++ * Reset ring buffer pointers
+ */
+- UART_PUT_CR(port, ATMEL_US_RSTSTA);
+- UART_PUT_IDR(port, -1);
++ atmel_port->rx_ring.head = 0;
++ atmel_port->rx_ring.tail = 0;
+
+ /*
+ * Free the interrupt
+@@ -2424,11 +2434,12 @@ static int atmel_serial_remove(struct platform_device *pdev)
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ int ret = 0;
+
++ tasklet_kill(&atmel_port->tasklet);
++
+ device_init_wakeup(&pdev->dev, 0);
+
+ ret = uart_remove_one_port(&atmel_uart, port);
+
+- tasklet_kill(&atmel_port->tasklet);
+ kfree(atmel_port->rx_ring.buf);
+
+ /* "port" is allocated statically, so we shouldn't free it */
+diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
+index 1c94fc5257f4..5ad448d0fb7d 100644
+--- a/drivers/usb/chipidea/ci.h
++++ b/drivers/usb/chipidea/ci.h
+@@ -135,6 +135,7 @@ struct hw_bank {
+ * @id_event: indicates there is an id event, and handled at ci_otg_work
+ * @b_sess_valid_event: indicates there is a vbus event, and handled
+ * at ci_otg_work
++ * @imx28_write_fix: Freescale imx28 needs swp instruction for writing
+ */
+ struct ci_hdrc {
+ struct device *dev;
+@@ -173,6 +174,7 @@ struct ci_hdrc {
+ struct dentry *debugfs;
+ bool id_event;
+ bool b_sess_valid_event;
++ bool imx28_write_fix;
+ };
+
+ static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
+@@ -253,6 +255,26 @@ static inline u32 hw_read(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask)
+ return ioread32(ci->hw_bank.regmap[reg]) & mask;
+ }
+
++#ifdef CONFIG_SOC_IMX28
++static inline void imx28_ci_writel(u32 val, volatile void __iomem *addr)
++{
++ __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
++}
++#else
++static inline void imx28_ci_writel(u32 val, volatile void __iomem *addr)
++{
++}
++#endif
++
++static inline void __hw_write(struct ci_hdrc *ci, u32 val,
++ void __iomem *addr)
++{
++ if (ci->imx28_write_fix)
++ imx28_ci_writel(val, addr);
++ else
++ iowrite32(val, addr);
++}
++
+ /**
+ * hw_write: writes to a hw register
+ * @reg: register index
+@@ -266,7 +288,7 @@ static inline void hw_write(struct ci_hdrc *ci, enum ci_hw_regs reg,
+ data = (ioread32(ci->hw_bank.regmap[reg]) & ~mask)
+ | (data & mask);
+
+- iowrite32(data, ci->hw_bank.regmap[reg]);
++ __hw_write(ci, data, ci->hw_bank.regmap[reg]);
+ }
+
+ /**
+@@ -281,7 +303,7 @@ static inline u32 hw_test_and_clear(struct ci_hdrc *ci, enum ci_hw_regs reg,
+ {
+ u32 val = ioread32(ci->hw_bank.regmap[reg]) & mask;
+
+- iowrite32(val, ci->hw_bank.regmap[reg]);
++ __hw_write(ci, val, ci->hw_bank.regmap[reg]);
+ return val;
+ }
+
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index be822a2c1776..76a67fb6ea32 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -23,6 +23,26 @@
+ #include "ci.h"
+ #include "ci_hdrc_imx.h"
+
++#define CI_HDRC_IMX_IMX28_WRITE_FIX BIT(0)
++
++struct ci_hdrc_imx_platform_flag {
++ unsigned int flags;
++};
++
++static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
++};
++
++static const struct ci_hdrc_imx_platform_flag imx28_usb_data = {
++ .flags = CI_HDRC_IMX_IMX28_WRITE_FIX,
++};
++
++static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
++ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
++ { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
++
+ struct ci_hdrc_imx_data {
+ struct usb_phy *phy;
+ struct platform_device *ci_pdev;
+@@ -82,6 +102,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ CI_HDRC_DISABLE_STREAMING,
+ };
+ int ret;
++ const struct of_device_id *of_id =
++ of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
++ const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+@@ -121,6 +144,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+
+ pdata.phy = data->phy;
+
++ if (imx_platform_flag->flags & CI_HDRC_IMX_IMX28_WRITE_FIX)
++ pdata.flags |= CI_HDRC_IMX28_WRITE_FIX;
++
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+@@ -187,12 +213,6 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+- { .compatible = "fsl,imx27-usb", },
+- { /* sentinel */ }
+-};
+-MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+-
+ static struct platform_driver ci_hdrc_imx_driver = {
+ .probe = ci_hdrc_imx_probe,
+ .remove = ci_hdrc_imx_remove,
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index d6a50b7bb4ca..7e8dceb4c634 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -497,6 +497,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ ci->transceiver = ci->platdata->phy;
+ else
+ ci->global_phy = true;
++ ci->imx28_write_fix = !!(ci->platdata->flags &
++ CI_HDRC_IMX28_WRITE_FIX);
+
+ ret = hw_device_init(ci, base);
+ if (ret < 0) {
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 5d874d5cf500..e3b9f98dfcd6 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -65,6 +65,7 @@ static int host_start(struct ci_hdrc *ci)
+ ehci->caps = ci->hw_bank.cap;
+ ehci->has_hostpc = ci->hw_bank.lpm;
+ ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
++ ehci->imx28_write_fix = ci->imx28_write_fix;
+
+ if (ci->platdata->reg_vbus) {
+ ret = regulator_enable(ci->platdata->reg_vbus);
+diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
+index 2d9f090733bc..449bee07f4fe 100644
+--- a/drivers/usb/chipidea/otg.h
++++ b/drivers/usb/chipidea/otg.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc.
+ *
+ * Author: Peter Chen
+ *
+@@ -19,12 +19,12 @@ static inline void ci_clear_otg_interrupt(struct ci_hdrc *ci, u32 bits)
+
+ static inline void ci_enable_otg_interrupt(struct ci_hdrc *ci, u32 bits)
+ {
+- hw_write(ci, OP_OTGSC, bits, bits);
++ hw_write(ci, OP_OTGSC, bits | OTGSC_INT_STATUS_BITS, bits);
+ }
+
+ static inline void ci_disable_otg_interrupt(struct ci_hdrc *ci, u32 bits)
+ {
+- hw_write(ci, OP_OTGSC, bits, 0);
++ hw_write(ci, OP_OTGSC, bits | OTGSC_INT_STATUS_BITS, 0);
+ }
+
+ int ci_hdrc_otg_init(struct ci_hdrc *ci);
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index d98fa254eaaf..94c26acfd5a4 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -394,6 +394,14 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
+ node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
+ node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
+ node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
++ if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
++ u32 mul = hwreq->req.length / hwep->ep.maxpacket;
++
++ if (hwreq->req.length == 0
++ || hwreq->req.length % hwep->ep.maxpacket)
++ mul++;
++ node->ptr->token |= mul << __ffs(TD_MULTO);
++ }
+
+ temp = (u32) (hwreq->req.dma + hwreq->req.actual);
+ if (length) {
+@@ -516,10 +524,11 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+ hwep->qh.ptr->td.token &=
+ cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
+
+- if (hwep->type == USB_ENDPOINT_XFER_ISOC) {
++ if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
+ u32 mul = hwreq->req.length / hwep->ep.maxpacket;
+
+- if (hwreq->req.length % hwep->ep.maxpacket)
++ if (hwreq->req.length == 0
++ || hwreq->req.length % hwep->ep.maxpacket)
+ mul++;
+ hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
+ }
+@@ -1172,6 +1181,12 @@ static int ep_enable(struct usb_ep *ep,
+ if (hwep->num)
+ cap |= QH_ZLT;
+ cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
++ /*
++ * For ISO-TX, we set mult at QH as the largest value, and use
++ * MultO at TD as real mult value.
++ */
++ if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
++ cap |= 3 << __ffs(QH_MULT);
+
+ hwep->qh.ptr->cap = cpu_to_le32(cap);
+
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index a6b2cabe7930..548d1996590f 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -651,10 +651,6 @@ void usb_destroy_configuration(struct usb_device *dev)
+ *
+ * hub-only!! ... and only in reset path, or usb_new_device()
+ * (used by real hubs and virtual root hubs)
+- *
+- * NOTE: if this is a WUSB device and is not authorized, we skip the
+- * whole thing. A non-authorized USB device has no
+- * configurations.
+ */
+ int usb_get_configuration(struct usb_device *dev)
+ {
+@@ -666,8 +662,6 @@ int usb_get_configuration(struct usb_device *dev)
+ struct usb_config_descriptor *desc;
+
+ cfgno = 0;
+- if (dev->authorized == 0) /* Not really an error */
+- goto out_not_authorized;
+ result = -ENOMEM;
+ if (ncfg > USB_MAXCONFIG) {
+ dev_warn(ddev, "too many configurations: %d, "
+@@ -751,7 +745,6 @@ int usb_get_configuration(struct usb_device *dev)
+
+ err:
+ kfree(desc);
+-out_not_authorized:
+ dev->descriptor.bNumConfigurations = cfgno;
+ err2:
+ if (result == -ENOMEM)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index c5c366790e6a..e3f7e41818f8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1610,7 +1610,7 @@ static void hub_disconnect(struct usb_interface *intf)
+ {
+ struct usb_hub *hub = usb_get_intfdata(intf);
+ struct usb_device *hdev = interface_to_usbdev(intf);
+- int i;
++ int port1;
+
+ /* Take the hub off the event list and don't let it be added again */
+ spin_lock_irq(&hub_event_lock);
+@@ -1625,11 +1625,15 @@ static void hub_disconnect(struct usb_interface *intf)
+ hub->error = 0;
+ hub_quiesce(hub, HUB_DISCONNECT);
+
+- usb_set_intfdata (intf, NULL);
++ /* Avoid races with recursively_mark_NOTATTACHED() */
++ spin_lock_irq(&device_state_lock);
++ port1 = hdev->maxchild;
++ hdev->maxchild = 0;
++ usb_set_intfdata(intf, NULL);
++ spin_unlock_irq(&device_state_lock);
+
+- for (i = 0; i < hdev->maxchild; i++)
+- usb_hub_remove_port_device(hub, i + 1);
+- hub->hdev->maxchild = 0;
++ for (; port1 > 0; --port1)
++ usb_hub_remove_port_device(hub, port1);
+
+ if (hub->hdev->speed == USB_SPEED_HIGH)
+ highspeed_hubs--;
+@@ -2238,18 +2242,13 @@ static int usb_enumerate_device(struct usb_device *udev)
+ return err;
+ }
+ }
+- if (udev->wusb == 1 && udev->authorized == 0) {
+- udev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+- udev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+- udev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+- }
+- else {
+- /* read the standard strings and cache them if present */
+- udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
+- udev->manufacturer = usb_cache_string(udev,
+- udev->descriptor.iManufacturer);
+- udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
+- }
++
++ /* read the standard strings and cache them if present */
++ udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
++ udev->manufacturer = usb_cache_string(udev,
++ udev->descriptor.iManufacturer);
++ udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
++
+ err = usb_enumerate_device_otg(udev);
+ if (err < 0)
+ return err;
+@@ -2431,16 +2430,6 @@ int usb_deauthorize_device(struct usb_device *usb_dev)
+ usb_dev->authorized = 0;
+ usb_set_configuration(usb_dev, -1);
+
+- kfree(usb_dev->product);
+- usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+- kfree(usb_dev->manufacturer);
+- usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+- kfree(usb_dev->serial);
+- usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+-
+- usb_destroy_configuration(usb_dev);
+- usb_dev->descriptor.bNumConfigurations = 0;
+-
+ out_unauthorized:
+ usb_unlock_device(usb_dev);
+ return 0;
+@@ -2468,17 +2457,7 @@ int usb_authorize_device(struct usb_device *usb_dev)
+ goto error_device_descriptor;
+ }
+
+- kfree(usb_dev->product);
+- usb_dev->product = NULL;
+- kfree(usb_dev->manufacturer);
+- usb_dev->manufacturer = NULL;
+- kfree(usb_dev->serial);
+- usb_dev->serial = NULL;
+-
+ usb_dev->authorized = 1;
+- result = usb_enumerate_device(usb_dev);
+- if (result < 0)
+- goto error_enumerate;
+ /* Choose and set the configuration. This registers the interfaces
+ * with the driver core and lets interface drivers bind to them.
+ */
+@@ -2494,7 +2473,6 @@ int usb_authorize_device(struct usb_device *usb_dev)
+ }
+ dev_info(&usb_dev->dev, "authorized to connect\n");
+
+-error_enumerate:
+ error_device_descriptor:
+ usb_autosuspend_device(usb_dev);
+ error_autoresume:
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index 291db7d09f22..2a1f38d209f0 100644
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -203,6 +203,7 @@ struct ehci_hcd { /* one per controller */
+ unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+ unsigned need_oc_pp_cycle:1; /* MPC834X port power */
++ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
+
+ /* required for usb32 quirk */
+ #define OHCI_CTRL_HCFS (3 << 6)
+@@ -679,6 +680,18 @@ static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
+ #endif
+ }
+
++#ifdef CONFIG_SOC_IMX28
++static inline void imx28_ehci_writel(const unsigned int val,
++ volatile __u32 __iomem *addr)
++{
++ __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
++}
++#else
++static inline void imx28_ehci_writel(const unsigned int val,
++ volatile __u32 __iomem *addr)
++{
++}
++#endif
+ static inline void ehci_writel(const struct ehci_hcd *ehci,
+ const unsigned int val, __u32 __iomem *regs)
+ {
+@@ -687,7 +700,10 @@ static inline void ehci_writel(const struct ehci_hcd *ehci,
+ writel_be(val, regs) :
+ writel(val, regs);
+ #else
+- writel(val, regs);
++ if (ehci->imx28_write_fix)
++ imx28_ehci_writel(val, regs);
++ else
++ writel(val, regs);
+ #endif
+ }
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 55fc0c39b7e1..fec3c8654ccd 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2964,7 +2964,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ if (num_trbs >= TRBS_PER_SEGMENT) {
+ xhci_err(xhci, "Too many fragments %d, max %d\n",
+ num_trbs, TRBS_PER_SEGMENT - 1);
+- return -ENOMEM;
++ return -EINVAL;
+ }
+
+ nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index ed6c186a5393..1dceec25223e 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -321,6 +321,9 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
++ if (xhci->quirks & XHCI_PLAT)
++ return;
++
+ xhci_free_irq(xhci);
+
+ if (xhci->msix_entries) {
+@@ -4724,8 +4727,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ struct device *dev = hcd->self.controller;
+ int retval;
+
+- /* Accept arbitrarily long scatter-gather lists */
+- hcd->self.sg_tablesize = ~0;
++ /* Limit the block layer scatter-gather lists to half a segment. */
++ hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2;
+
+ /* support to build packet from discontinuous buffers */
+ hcd->self.no_sg_constraint = 1;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index ed3a425de8ce..6b3164c75c98 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1262,7 +1262,7 @@ union xhci_trb {
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+-#define TRBS_PER_SEGMENT 64
++#define TRBS_PER_SEGMENT 256
+ /* Allow two commands + a link TRB, along with any reserved command TRBs */
+ #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
+ #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
+index b461311a2ae7..ce13e61b7d55 100644
+--- a/drivers/usb/serial/cypress_m8.h
++++ b/drivers/usb/serial/cypress_m8.h
+@@ -63,7 +63,7 @@
+ #define UART_DSR 0x20 /* data set ready - flow control - device to host */
+ #define CONTROL_RTS 0x10 /* request to send - flow control - host to device */
+ #define UART_CTS 0x10 /* clear to send - flow control - device to host */
+-#define UART_RI 0x10 /* ring indicator - modem - device to host */
++#define UART_RI 0x80 /* ring indicator - modem - device to host */
+ #define UART_CD 0x40 /* carrier detect - modem - device to host */
+ #define CYP_ERROR 0x08 /* received from input report - device to host */
+ /* Note - the below has nothing to do with the "feature report" reset */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index dccb4db98ea1..6f1cbbf51ae8 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -2116,10 +2116,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
+ }
+
+ /*
+- * All FTDI UART chips are limited to CS7/8. We won't pretend to
++ * All FTDI UART chips are limited to CS7/8. We shouldn't pretend to
+ * support CS5/6 and revert the CSIZE setting instead.
++ *
++ * CS5 however is used to control some smartcard readers which abuse
++ * this limitation to switch modes. Original FTDI chips fall back to
++ * eight data bits.
++ *
++ * TODO: Implement a quirk to only allow this with mentioned
++ * readers. One I know of (Argolis Smartreader V1)
++ * returns "USB smartcard server" as iInterface string.
++ * The vendor didn't bother with a custom VID/PID of
++ * course.
+ */
+- if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
++ if (C_CSIZE(tty) == CS6) {
+ dev_warn(ddev, "requested CSIZE setting not supported\n");
+
+ termios->c_cflag &= ~CSIZE;
+@@ -2166,6 +2176,9 @@ no_skip:
+ urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
+ }
+ switch (cflag & CSIZE) {
++ case CS5:
++ dev_dbg(ddev, "Setting CS5 quirk\n");
++ break;
+ case CS7:
+ urb_value |= 7;
+ dev_dbg(ddev, "Setting CS7\n");
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index cc7a24154490..5c86f57e4afa 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -320,6 +320,9 @@ static void option_instat_callback(struct urb *urb);
+ * It seems to contain a Qualcomm QSC6240/6290 chipset */
+ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+
++/* iBall 3.5G connect wireless modem */
++#define IBALL_3_5G_CONNECT 0x9605
++
+ /* Zoom */
+ #define ZOOM_PRODUCT_4597 0x9607
+
+@@ -1447,6 +1450,17 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8e, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8f, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff90, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff91, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
+
+ /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
+@@ -1489,6 +1503,7 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+ },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
++ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+ /* Pirelli */
+ { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 1e3318dfa1cb..beb8edce4ef2 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -142,6 +142,8 @@ struct pl2303_private {
+ spinlock_t lock;
+ u8 line_control;
+ u8 line_status;
++
++ u8 line_settings[7];
+ };
+
+ static int pl2303_vendor_read(__u16 value, __u16 index,
+@@ -339,11 +341,6 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ int i;
+ u8 control;
+
+- /*
+- * The PL2303 is reported to lose bytes if you change serial settings
+- * even to the same values as before. Thus we actually need to filter
+- * in this specific case.
+- */
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
+ return;
+
+@@ -428,10 +425,29 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ dev_dbg(&port->dev, "parity = none\n");
+ }
+
+- i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+- SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
+- 0, 0, buf, 7, 100);
+- dev_dbg(&port->dev, "0x21:0x20:0:0 %d\n", i);
++ /*
++ * Some PL2303 are known to lose bytes if you change serial settings
++ * even to the same values as before. Thus we actually need to filter
++ * in this specific case.
++ *
++ * Note that the tty_termios_hw_change check above is not sufficient
++ * as a previously requested baud rate may differ from the one
++ * actually used (and stored in old_termios).
++ *
++ * NOTE: No additional locking needed for line_settings as it is
++ * only used in set_termios, which is serialised against itself.
++ */
++ if (!old_termios || memcmp(buf, priv->line_settings, 7)) {
++ i = usb_control_msg(serial->dev,
++ usb_sndctrlpipe(serial->dev, 0),
++ SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
++ 0, 0, buf, 7, 100);
++
++ dev_dbg(&port->dev, "0x21:0x20:0:0 %d\n", i);
++
++ if (i == 7)
++ memcpy(priv->line_settings, buf, 7);
++ }
+
+ /* change control lines if we are switching to or from B0 */
+ spin_lock_irqsave(&priv->lock, flags);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index de32cfa5bfa6..ad06255c2ade 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -234,6 +234,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
++/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
++UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
++ "Nokia",
++ "502",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64 ),
++
+ #ifdef NO_SDDR09
+ UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
+ "Microtech",
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index d58bef130a41..b256ddc1cb53 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -7718,7 +7718,7 @@ out:
+ */
+ if (!for_reloc && root_dropped == false)
+ btrfs_add_dead_root(root);
+- if (err)
++ if (err && err != -EAGAIN)
+ btrfs_std_error(root->fs_info, err);
+ return err;
+ }
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8747feb77ec9..669eb53273c0 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1548,6 +1548,12 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
+ printk(KERN_INFO "btrfs: Snapshot src from "
+ "another FS\n");
+ ret = -EINVAL;
++ } else if (!inode_owner_or_capable(src_inode)) {
++ /*
++ * Subvolume creation is not restricted, but snapshots
++ * are limited to own subvolumes only
++ */
++ ret = -EPERM;
+ } else {
+ ret = btrfs_mksubvol(&file->f_path, name, namelen,
+ BTRFS_I(src_inode)->root,
+diff --git a/fs/dcache.c b/fs/dcache.c
+index f27c1d12a1fa..4021e0172602 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -3140,7 +3140,6 @@ restart:
+ read_seqbegin_or_lock(&rename_lock, &seq);
+ while (!IS_ROOT(dentry)) {
+ struct dentry *parent = dentry->d_parent;
+- int error;
+
+ prefetch(parent);
+ error = prepend_name(&end, &len, &dentry->d_name);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index d9ecbf1113a7..46b366897553 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1925,9 +1925,11 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+ }
+
+ /* Clear the content within i_blocks. */
+- if (i_size < EXT4_MIN_INLINE_DATA_SIZE)
+- memset(ext4_raw_inode(&is.iloc)->i_block + i_size, 0,
+- EXT4_MIN_INLINE_DATA_SIZE - i_size);
++ if (i_size < EXT4_MIN_INLINE_DATA_SIZE) {
++ void *p = (void *) ext4_raw_inode(&is.iloc)->i_block;
++ memset(p + i_size, 0,
++ EXT4_MIN_INLINE_DATA_SIZE - i_size);
++ }
+
+ EXT4_I(inode)->i_inline_size = i_size <
+ EXT4_MIN_INLINE_DATA_SIZE ?
+diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
+index cdb84a838068..58b5106186d0 100644
+--- a/fs/hpfs/alloc.c
++++ b/fs/hpfs/alloc.c
+@@ -8,6 +8,58 @@
+
+ #include "hpfs_fn.h"
+
++static void hpfs_claim_alloc(struct super_block *s, secno sec)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free != (unsigned)-1) {
++ if (unlikely(!sbi->sb_n_free)) {
++ hpfs_error(s, "free count underflow, allocating sector %08x", sec);
++ sbi->sb_n_free = -1;
++ return;
++ }
++ sbi->sb_n_free--;
++ }
++}
++
++static void hpfs_claim_free(struct super_block *s, secno sec)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free != (unsigned)-1) {
++ if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) {
++ hpfs_error(s, "free count overflow, freeing sector %08x", sec);
++ sbi->sb_n_free = -1;
++ return;
++ }
++ sbi->sb_n_free++;
++ }
++}
++
++static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free_dnodes != (unsigned)-1) {
++ if (unlikely(!sbi->sb_n_free_dnodes)) {
++ hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec);
++ sbi->sb_n_free_dnodes = -1;
++ return;
++ }
++ sbi->sb_n_free_dnodes--;
++ }
++}
++
++static void hpfs_claim_dirband_free(struct super_block *s, secno sec)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free_dnodes != (unsigned)-1) {
++ if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) {
++ hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec);
++ sbi->sb_n_free_dnodes = -1;
++ return;
++ }
++ sbi->sb_n_free_dnodes++;
++ }
++}
++
+ /*
+ * Check if a sector is allocated in bitmap
+ * This is really slow. Turned on only if chk==2
+@@ -203,9 +255,15 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
+ }
+ sec = 0;
+ ret:
++ if (sec) {
++ i = 0;
++ do
++ hpfs_claim_alloc(s, sec + i);
++ while (unlikely(++i < n));
++ }
+ if (sec && f_p) {
+ for (i = 0; i < forward; i++) {
+- if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
++ if (!hpfs_alloc_if_possible(s, sec + n + i)) {
+ hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
+ sec = 0;
+ break;
+@@ -228,6 +286,7 @@ static secno alloc_in_dirband(struct super_block *s, secno near)
+ nr >>= 2;
+ sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
+ if (!sec) return 0;
++ hpfs_claim_dirband_alloc(s, sec);
+ return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
+ }
+
+@@ -242,6 +301,7 @@ int hpfs_alloc_if_possible(struct super_block *s, secno sec)
+ bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
+ hpfs_mark_4buffers_dirty(&qbh);
+ hpfs_brelse4(&qbh);
++ hpfs_claim_alloc(s, sec);
+ return 1;
+ }
+ hpfs_brelse4(&qbh);
+@@ -275,6 +335,7 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
+ return;
+ }
+ bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
++ hpfs_claim_free(s, sec);
+ if (!--n) {
+ hpfs_mark_4buffers_dirty(&qbh);
+ hpfs_brelse4(&qbh);
+@@ -359,6 +420,7 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
+ bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
+ hpfs_mark_4buffers_dirty(&qbh);
+ hpfs_brelse4(&qbh);
++ hpfs_claim_dirband_free(s, dno);
+ }
+ }
+
+@@ -366,7 +428,7 @@ struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
+ dnode_secno *dno, struct quad_buffer_head *qbh)
+ {
+ struct dnode *d;
+- if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
++ if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) {
+ if (!(*dno = alloc_in_dirband(s, near)))
+ if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
+ } else {
+diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
+index 1b398636e990..c65f0bf326a1 100644
+--- a/fs/hpfs/hpfs_fn.h
++++ b/fs/hpfs/hpfs_fn.h
+@@ -311,7 +311,7 @@ static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb)
+ __printf(2, 3)
+ void hpfs_error(struct super_block *, const char *, ...);
+ int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
+-unsigned hpfs_count_one_bitmap(struct super_block *, secno);
++unsigned hpfs_get_free_dnodes(struct super_block *);
+
+ /*
+ * local time (HPFS) to GMT (Unix)
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 4334cda8dba1..3d6f8972d06e 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -115,7 +115,7 @@ static void hpfs_put_super(struct super_block *s)
+ kfree(sbi);
+ }
+
+-unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
++static unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
+ {
+ struct quad_buffer_head qbh;
+ unsigned long *bits;
+@@ -123,7 +123,7 @@ unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
+
+ bits = hpfs_map_4sectors(s, secno, &qbh, 0);
+ if (!bits)
+- return 0;
++ return (unsigned)-1;
+ count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
+ hpfs_brelse4(&qbh);
+ return count;
+@@ -138,30 +138,45 @@ static unsigned count_bitmaps(struct super_block *s)
+ hpfs_prefetch_bitmap(s, n);
+ }
+ for (n = 0; n < n_bands; n++) {
++ unsigned c;
+ hpfs_prefetch_bitmap(s, n + COUNT_RD_AHEAD);
+- count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
++ c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
++ if (c != (unsigned)-1)
++ count += c;
+ }
+ return count;
+ }
+
++unsigned hpfs_get_free_dnodes(struct super_block *s)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free_dnodes == (unsigned)-1) {
++ unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap);
++ if (c == (unsigned)-1)
++ return 0;
++ sbi->sb_n_free_dnodes = c;
++ }
++ return sbi->sb_n_free_dnodes;
++}
++
+ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ struct super_block *s = dentry->d_sb;
+ struct hpfs_sb_info *sbi = hpfs_sb(s);
+ u64 id = huge_encode_dev(s->s_bdev->bd_dev);
++
+ hpfs_lock(s);
+
+- /*if (sbi->sb_n_free == -1) {*/
++ if (sbi->sb_n_free == (unsigned)-1)
+ sbi->sb_n_free = count_bitmaps(s);
+- sbi->sb_n_free_dnodes = hpfs_count_one_bitmap(s, sbi->sb_dmap);
+- /*}*/
++
+ buf->f_type = s->s_magic;
+ buf->f_bsize = 512;
+ buf->f_blocks = sbi->sb_fs_size;
+ buf->f_bfree = sbi->sb_n_free;
+ buf->f_bavail = sbi->sb_n_free;
+ buf->f_files = sbi->sb_dirband_size / 4;
+- buf->f_ffree = sbi->sb_n_free_dnodes;
++ buf->f_ffree = hpfs_get_free_dnodes(s);
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+ buf->f_namelen = 254;
+diff --git a/fs/mount.h b/fs/mount.h
+index 64a858143ff9..68d80bdcd081 100644
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -73,7 +73,7 @@ static inline int mnt_has_parent(struct mount *mnt)
+ static inline int is_mounted(struct vfsmount *mnt)
+ {
+ /* neither detached nor internal? */
+- return !IS_ERR_OR_NULL(real_mount(mnt));
++ return !IS_ERR_OR_NULL(real_mount(mnt)->mnt_ns);
+ }
+
+ extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 0393270466c3..6125579b5207 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
+ void hugepage_put_subpool(struct hugepage_subpool *spool);
+
+ int PageHuge(struct page *page);
++int PageHeadHuge(struct page *page_head);
+
+ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+ int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+@@ -104,6 +105,11 @@ static inline int PageHuge(struct page *page)
+ return 0;
+ }
+
++static inline int PageHeadHuge(struct page *page_head)
++{
++ return 0;
++}
++
+ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+ {
+ }
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 9b503376738f..bec6dbe939a0 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -419,6 +419,8 @@ enum {
+ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
+ ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
+ ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
++ ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
++ ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
+
+ /* DMA mask for user DMA control: User visible values; DO NOT
+ renumber */
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index 7d399671a566..708bd119627f 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -24,6 +24,7 @@ struct ci_hdrc_platform_data {
+ * but otg is not supported (no register otgsc).
+ */
+ #define CI_HDRC_DUAL_ROLE_NOT_OTG BIT(4)
++#define CI_HDRC_IMX28_WRITE_FIX BIT(5)
+ enum usb_dr_mode dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT 0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0b7656e804d1..f0a4ca4cc219 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -736,6 +736,23 @@ int PageHuge(struct page *page)
+ }
+ EXPORT_SYMBOL_GPL(PageHuge);
+
++/*
++ * PageHeadHuge() only returns true for hugetlbfs head page, but not for
++ * normal or transparent huge pages.
++ */
++int PageHeadHuge(struct page *page_head)
++{
++ compound_page_dtor *dtor;
++
++ if (!PageHead(page_head))
++ return 0;
++
++ dtor = get_compound_page_dtor(page_head);
++
++ return dtor == free_huge_page;
++}
++EXPORT_SYMBOL_GPL(PageHeadHuge);
++
+ pgoff_t __basepage_index(struct page *page)
+ {
+ struct page *page_head = compound_head(page);
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 6b22d8f9bfb8..927a69cf354a 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2857,7 +2857,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ */
+ VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
+
+- if (!pol || pol == &default_policy)
++ if (!pol || pol == &default_policy || (pol->flags & MPOL_F_MORON))
+ mode = MPOL_DEFAULT;
+ else
+ mode = pol->mode;
+diff --git a/mm/swap.c b/mm/swap.c
+index 759c3caf44bd..0c8f7a471925 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -82,19 +82,6 @@ static void __put_compound_page(struct page *page)
+
+ static void put_compound_page(struct page *page)
+ {
+- /*
+- * hugetlbfs pages cannot be split from under us. If this is a
+- * hugetlbfs page, check refcount on head page and release the page if
+- * the refcount becomes zero.
+- */
+- if (PageHuge(page)) {
+- page = compound_head(page);
+- if (put_page_testzero(page))
+- __put_compound_page(page);
+-
+- return;
+- }
+-
+ if (unlikely(PageTail(page))) {
+ /* __split_huge_page_refcount can run under us */
+ struct page *page_head = compound_trans_head(page);
+@@ -111,14 +98,31 @@ static void put_compound_page(struct page *page)
+ * still hot on arches that do not support
+ * this_cpu_cmpxchg_double().
+ */
+- if (PageSlab(page_head)) {
+- if (PageTail(page)) {
++ if (PageSlab(page_head) || PageHeadHuge(page_head)) {
++ if (likely(PageTail(page))) {
++ /*
++ * __split_huge_page_refcount
++ * cannot race here.
++ */
++ VM_BUG_ON(!PageHead(page_head));
++ atomic_dec(&page->_mapcount);
+ if (put_page_testzero(page_head))
+ VM_BUG_ON(1);
+-
+- atomic_dec(&page->_mapcount);
+- goto skip_lock_tail;
++ if (put_page_testzero(page_head))
++ __put_compound_page(page_head);
++ return;
+ } else
++ /*
++ * __split_huge_page_refcount
++ * run before us, "page" was a
++ * THP tail. The split
++ * page_head has been freed
++ * and reallocated as slab or
++ * hugetlbfs page of smaller
++ * order (only possible if
++ * reallocated as slab on
++ * x86).
++ */
+ goto skip_lock;
+ }
+ /*
+@@ -132,8 +136,27 @@ static void put_compound_page(struct page *page)
+ /* __split_huge_page_refcount run before us */
+ compound_unlock_irqrestore(page_head, flags);
+ skip_lock:
+- if (put_page_testzero(page_head))
+- __put_single_page(page_head);
++ if (put_page_testzero(page_head)) {
++ /*
++ * The head page may have been
++ * freed and reallocated as a
++ * compound page of smaller
++ * order and then freed again.
++ * All we know is that it
++ * cannot have become: a THP
++ * page, a compound page of
++ * higher order, a tail page.
++ * That is because we still
++ * hold the refcount of the
++ * split THP tail and
++ * page_head was the THP head
++ * before the split.
++ */
++ if (PageHead(page_head))
++ __put_compound_page(page_head);
++ else
++ __put_single_page(page_head);
++ }
+ out_put_single:
+ if (put_page_testzero(page))
+ __put_single_page(page);
+@@ -155,7 +178,6 @@ out_put_single:
+ VM_BUG_ON(atomic_read(&page->_count) != 0);
+ compound_unlock_irqrestore(page_head, flags);
+
+-skip_lock_tail:
+ if (put_page_testzero(page_head)) {
+ if (PageHead(page_head))
+ __put_compound_page(page_head);
+@@ -198,51 +220,52 @@ bool __get_page_tail(struct page *page)
+ * proper PT lock that already serializes against
+ * split_huge_page().
+ */
++ unsigned long flags;
+ bool got = false;
+- struct page *page_head;
+-
+- /*
+- * If this is a hugetlbfs page it cannot be split under us. Simply
+- * increment refcount for the head page.
+- */
+- if (PageHuge(page)) {
+- page_head = compound_head(page);
+- atomic_inc(&page_head->_count);
+- got = true;
+- } else {
+- unsigned long flags;
++ struct page *page_head = compound_trans_head(page);
+
+- page_head = compound_trans_head(page);
+- if (likely(page != page_head &&
+- get_page_unless_zero(page_head))) {
+-
+- /* Ref to put_compound_page() comment. */
+- if (PageSlab(page_head)) {
+- if (likely(PageTail(page))) {
+- __get_page_tail_foll(page, false);
+- return true;
+- } else {
+- put_page(page_head);
+- return false;
+- }
+- }
+-
+- /*
+- * page_head wasn't a dangling pointer but it
+- * may not be a head page anymore by the time
+- * we obtain the lock. That is ok as long as it
+- * can't be freed from under us.
+- */
+- flags = compound_lock_irqsave(page_head);
+- /* here __split_huge_page_refcount won't run anymore */
++ if (likely(page != page_head && get_page_unless_zero(page_head))) {
++ /* Ref to put_compound_page() comment. */
++ if (PageSlab(page_head) || PageHeadHuge(page_head)) {
+ if (likely(PageTail(page))) {
++ /*
++ * This is a hugetlbfs page or a slab
++ * page. __split_huge_page_refcount
++ * cannot race here.
++ */
++ VM_BUG_ON(!PageHead(page_head));
+ __get_page_tail_foll(page, false);
+- got = true;
+- }
+- compound_unlock_irqrestore(page_head, flags);
+- if (unlikely(!got))
++ return true;
++ } else {
++ /*
++ * __split_huge_page_refcount run
++ * before us, "page" was a THP
++ * tail. The split page_head has been
++ * freed and reallocated as slab or
++ * hugetlbfs page of smaller order
++ * (only possible if reallocated as
++ * slab on x86).
++ */
+ put_page(page_head);
++ return false;
++ }
++ }
++
++ /*
++ * page_head wasn't a dangling pointer but it
++ * may not be a head page anymore by the time
++ * we obtain the lock. That is ok as long as it
++ * can't be freed from under us.
++ */
++ flags = compound_lock_irqsave(page_head);
++ /* here __split_huge_page_refcount won't run anymore */
++ if (likely(PageTail(page))) {
++ __get_page_tail_foll(page, false);
++ got = true;
+ }
++ compound_unlock_irqrestore(page_head, flags);
++ if (unlikely(!got))
++ put_page(page_head);
+ }
+ return got;
+ }
+diff --git a/net/compat.c b/net/compat.c
+index dd32e34c1e2c..f50161fb812e 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -780,21 +780,16 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
+- if (COMPAT_USE_64BIT_TIME)
+- return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+- flags | MSG_CMSG_COMPAT,
+- (struct timespec *) timeout);
+-
+ if (timeout == NULL)
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT, NULL);
+
+- if (get_compat_timespec(&ktspec, timeout))
++ if (compat_get_timespec(&ktspec, timeout))
+ return -EFAULT;
+
+ datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT, &ktspec);
+- if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
++ if (datagrams > 0 && compat_put_timespec(&ktspec, timeout))
+ datagrams = -EFAULT;
+
+ return datagrams;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 01b780856db2..ad30d626a5bd 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -36,7 +36,6 @@
+ #include <asm/uaccess.h>
+ #include <asm/unaligned.h>
+ #include <linux/filter.h>
+-#include <linux/reciprocal_div.h>
+ #include <linux/ratelimit.h>
+ #include <linux/seccomp.h>
+ #include <linux/if_vlan.h>
+@@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
+ A /= X;
+ continue;
+ case BPF_S_ALU_DIV_K:
+- A = reciprocal_divide(A, K);
++ A /= K;
+ continue;
+ case BPF_S_ALU_MOD_X:
+ if (X == 0)
+@@ -553,11 +552,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
+ /* Some instructions need special checks */
+ switch (code) {
+ case BPF_S_ALU_DIV_K:
+- /* check for division by zero */
+- if (ftest->k == 0)
+- return -EINVAL;
+- ftest->k = reciprocal_value(ftest->k);
+- break;
+ case BPF_S_ALU_MOD_K:
+ /* check for division by zero */
+ if (ftest->k == 0)
+@@ -853,27 +847,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
+ to->code = decodes[code];
+ to->jt = filt->jt;
+ to->jf = filt->jf;
+-
+- if (code == BPF_S_ALU_DIV_K) {
+- /*
+- * When loaded this rule user gave us X, which was
+- * translated into R = r(X). Now we calculate the
+- * RR = r(R) and report it back. If next time this
+- * value is loaded and RRR = r(RR) is calculated
+- * then the R == RRR will be true.
+- *
+- * One exception. X == 1 translates into R == 0 and
+- * we can't calculate RR out of it with r().
+- */
+-
+- if (filt->k == 0)
+- to->k = 1;
+- else
+- to->k = reciprocal_value(filt->k);
+-
+- BUG_ON(reciprocal_value(to->k) != filt->k);
+- } else
+- to->k = filt->k;
++ to->k = filt->k;
+ }
+
+ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
+diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
+index 22b1a7058fd3..4efd2375d7e1 100644
+--- a/net/ieee802154/nl-phy.c
++++ b/net/ieee802154/nl-phy.c
+@@ -224,8 +224,10 @@ static int ieee802154_add_iface(struct sk_buff *skb,
+
+ if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
+ type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
+- if (type >= __IEEE802154_DEV_MAX)
+- return -EINVAL;
++ if (type >= __IEEE802154_DEV_MAX) {
++ rc = -EINVAL;
++ goto nla_put_failure;
++ }
+ }
+
+ dev = phy->add_iface(phy, devname, type);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index b3f627ac4ed8..f7f8cff67344 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1049,6 +1049,8 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
+ }
+
+ in_dev = __in_dev_get_rtnl(dev);
++ if (!in_dev)
++ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 31cf54d18221..45dbdab915e2 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -961,7 +961,7 @@ next_normal:
+ ++num;
+ }
+
+- if (r->idiag_states & TCPF_TIME_WAIT) {
++ if (r->idiag_states & (TCPF_TIME_WAIT | TCPF_FIN_WAIT2)) {
+ struct inet_timewait_sock *tw;
+
+ inet_twsk_for_each(tw, node,
+@@ -971,6 +971,8 @@ next_normal:
+
+ if (num < s_num)
+ goto next_dying;
++ if (!(r->idiag_states & (1 << tw->tw_substate)))
++ goto next_dying;
+ if (r->sdiag_family != AF_UNSPEC &&
+ tw->tw_family != r->sdiag_family)
+ goto next_dying;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index e560ef34cf4b..d30636080a11 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -178,7 +178,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
+ else
+ itn = net_generic(net, ipgre_net_id);
+
+- iph = (const struct iphdr *)skb->data;
++ iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
+ t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
+ iph->daddr, iph->saddr, tpi->key);
+
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index 054a3e97d822..3d4da2c16b6a 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -314,7 +314,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
+ const struct iphdr *iph = ip_hdr(skb);
+ struct rtable *rt;
+
+- if (sysctl_ip_early_demux && !skb_dst(skb)) {
++ if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
+ const struct net_protocol *ipprot;
+ int protocol = iph->protocol;
+
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 254f11c24aa5..995a0bb33a65 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -618,6 +618,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
+ tunnel->err_count--;
+
++ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ dst_link_failure(skb);
+ } else
+ tunnel->err_count = 0;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 62212c772a4b..1672409f5ba5 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+ static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
+ struct mr_table **mrt)
+ {
+- struct ipmr_result res;
+- struct fib_lookup_arg arg = { .result = &res, };
+ int err;
++ struct ipmr_result res;
++ struct fib_lookup_arg arg = {
++ .result = &res,
++ .flags = FIB_LOOKUP_NOREF,
++ };
+
+ err = fib_rules_lookup(net->ipv4.mr_rules_ops,
+ flowi4_to_flowi(flp4), 0, &arg);
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index 310711433358..75c7f5391fb1 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -22,6 +22,9 @@
+
+ int sysctl_tcp_nometrics_save __read_mostly;
+
++static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
++ struct net *net, unsigned int hash);
++
+ struct tcp_fastopen_metrics {
+ u16 mss;
+ u16 syn_loss:10; /* Recurring Fast Open SYN losses */
+@@ -130,16 +133,41 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
+ }
+ }
+
++#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
++
++static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
++{
++ if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
++ tcpm_suck_dst(tm, dst, false);
++}
++
++#define TCP_METRICS_RECLAIM_DEPTH 5
++#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
++
+ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
+ struct inetpeer_addr *addr,
+- unsigned int hash,
+- bool reclaim)
++ unsigned int hash)
+ {
+ struct tcp_metrics_block *tm;
+ struct net *net;
++ bool reclaim = false;
+
+ spin_lock_bh(&tcp_metrics_lock);
+ net = dev_net(dst->dev);
++
++ /* While waiting for the spin-lock the cache might have been populated
++ * with this entry and so we have to check again.
++ */
++ tm = __tcp_get_metrics(addr, net, hash);
++ if (tm == TCP_METRICS_RECLAIM_PTR) {
++ reclaim = true;
++ tm = NULL;
++ }
++ if (tm) {
++ tcpm_check_stamp(tm, dst);
++ goto out_unlock;
++ }
++
+ if (unlikely(reclaim)) {
+ struct tcp_metrics_block *oldest;
+
+@@ -169,17 +197,6 @@ out_unlock:
+ return tm;
+ }
+
+-#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
+-
+-static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+-{
+- if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+- tcpm_suck_dst(tm, dst, false);
+-}
+-
+-#define TCP_METRICS_RECLAIM_DEPTH 5
+-#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
+-
+ static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
+ {
+ if (tm)
+@@ -280,7 +297,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
+ struct inetpeer_addr addr;
+ unsigned int hash;
+ struct net *net;
+- bool reclaim;
+
+ addr.family = sk->sk_family;
+ switch (addr.family) {
+@@ -300,13 +316,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
+ hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+
+ tm = __tcp_get_metrics(&addr, net, hash);
+- reclaim = false;
+- if (tm == TCP_METRICS_RECLAIM_PTR) {
+- reclaim = true;
++ if (tm == TCP_METRICS_RECLAIM_PTR)
+ tm = NULL;
+- }
+ if (!tm && create)
+- tm = tcpm_new(dst, &addr, hash, reclaim);
++ tm = tcpm_new(dst, &addr, hash);
+ else
+ tcpm_check_stamp(tm, dst);
+
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index 302d6fb1ff2b..51d54dc376f3 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -49,7 +49,7 @@
+
+ int ip6_rcv_finish(struct sk_buff *skb)
+ {
+- if (sysctl_ip_early_demux && !skb_dst(skb)) {
++ if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
+ const struct inet6_protocol *ipprot;
+
+ ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index f365310bfcca..0eb4038a4d63 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
+ static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
+ struct mr6_table **mrt)
+ {
+- struct ip6mr_result res;
+- struct fib_lookup_arg arg = { .result = &res, };
+ int err;
++ struct ip6mr_result res;
++ struct fib_lookup_arg arg = {
++ .result = &res,
++ .flags = FIB_LOOKUP_NOREF,
++ };
+
+ err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
+ flowi6_to_flowi(flp6), 0, &arg);
+diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
+index 8eb9501e3d60..b7ebe23cdedf 100644
+--- a/net/rds/ib_recv.c
++++ b/net/rds/ib_recv.c
+@@ -421,8 +421,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
+ struct rds_ib_refill_cache *cache)
+ {
+ unsigned long flags;
+- struct list_head *old;
+- struct list_head __percpu *chpfirst;
++ struct list_head *old, *chpfirst;
+
+ local_irq_save(flags);
+
+@@ -432,7 +431,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
+ else /* put on front */
+ list_add_tail(new_item, chpfirst);
+
+- __this_cpu_write(chpfirst, new_item);
++ __this_cpu_write(cache->percpu->first, new_item);
+ __this_cpu_inc(cache->percpu->count);
+
+ if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
+@@ -452,7 +451,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
+ } while (old);
+
+
+- __this_cpu_write(chpfirst, NULL);
++ __this_cpu_write(cache->percpu->first, NULL);
+ __this_cpu_write(cache->percpu->count, 0);
+ end:
+ local_irq_restore(flags);
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 6b9087115da2..d04785144601 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -170,7 +170,7 @@ int snd_card_create(int idx, const char *xid,
+ if (idx < 0) {
+ for (idx2 = 0; idx2 < SNDRV_CARDS; idx2++) {
+ /* idx == -1 == 0xffff means: take any free slot */
+- if (idx2 < sizeof(int) && !(idx & (1U << idx2)))
++ if (idx2 < 32 && !(idx & (1U << idx2)))
+ continue;
+ if (!test_bit(idx2, snd_cards_lock)) {
+ if (module_slot_match(module, idx2)) {
+@@ -183,7 +183,7 @@ int snd_card_create(int idx, const char *xid,
+ if (idx < 0) {
+ for (idx2 = 0; idx2 < SNDRV_CARDS; idx2++) {
+ /* idx == -1 == 0xffff means: take any free slot */
+- if (idx2 < sizeof(int) && !(idx & (1U << idx2)))
++ if (idx2 < 32 && !(idx & (1U << idx2)))
+ continue;
+ if (!test_bit(idx2, snd_cards_lock)) {
+ if (!slots[idx2] || !*slots[idx2]) {
+diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
+index 46ed9e8ae0fd..cc9fd67a50df 100644
+--- a/sound/pci/Kconfig
++++ b/sound/pci/Kconfig
+@@ -25,6 +25,7 @@ config SND_ALS300
+ select SND_PCM
+ select SND_AC97_CODEC
+ select SND_OPL3_LIB
++ select ZONE_DMA
+ help
+ Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+
+
+@@ -49,6 +50,7 @@ config SND_ALI5451
+ tristate "ALi M5451 PCI Audio Controller"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for the integrated AC97 sound
+ device on motherboards using the ALi M5451 Audio Controller
+@@ -153,6 +155,7 @@ config SND_AZT3328
+ select SND_PCM
+ select SND_RAWMIDI
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for Aztech AZF3328 (PCI168)
+ soundcards.
+@@ -458,6 +461,7 @@ config SND_EMU10K1
+ select SND_HWDEP
+ select SND_RAWMIDI
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y to include support for Sound Blaster PCI 512, Live!,
+ Audigy and E-mu APS (partially supported) soundcards.
+@@ -473,6 +477,7 @@ config SND_EMU10K1X
+ tristate "Emu10k1X (Dell OEM Version)"
+ select SND_AC97_CODEC
+ select SND_RAWMIDI
++ select ZONE_DMA
+ help
+ Say Y here to include support for the Dell OEM version of the
+ Sound Blaster Live!.
+@@ -506,6 +511,7 @@ config SND_ES1938
+ select SND_OPL3_LIB
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Solo-1
+ (ES1938, ES1946, ES1969) chips.
+@@ -517,6 +523,7 @@ config SND_ES1968
+ tristate "ESS ES1968/1978 (Maestro-1/2/2E)"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Maestro
+ 1/2/2E chips.
+@@ -605,6 +612,7 @@ config SND_ICE1712
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
+ select BITREVERSE
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on the
+ ICE1712 (Envy24) chip.
+@@ -692,6 +700,7 @@ config SND_LX6464ES
+ config SND_MAESTRO3
+ tristate "ESS Allegro/Maestro3"
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Maestro 3
+ (Allegro) chips.
+@@ -788,6 +797,7 @@ config SND_SIS7019
+ tristate "SiS 7019 Audio Accelerator"
+ depends on X86 && !X86_64
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for the SiS 7019 Audio Accelerator.
+
+@@ -799,6 +809,7 @@ config SND_SONICVIBES
+ select SND_OPL3_LIB
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on the S3
+ SonicVibes chip.
+@@ -810,6 +821,7 @@ config SND_TRIDENT
+ tristate "Trident 4D-Wave DX/NX; SiS 7018"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on Trident
+ 4D-Wave DX/NX or SiS 7018 chips.
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index c7f6d1cab606..7d4ccfa48008 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -2857,9 +2857,11 @@ static bool look_for_mix_leaf_ctls(struct hda_codec *codec, hda_nid_t mix_nid,
+ if (num_conns < idx)
+ return false;
+ nid = list[idx];
+- if (!*mix_val && nid_has_volume(codec, nid, HDA_OUTPUT))
++ if (!*mix_val && nid_has_volume(codec, nid, HDA_OUTPUT) &&
++ !is_ctl_associated(codec, nid, HDA_OUTPUT, 0, NID_PATH_VOL_CTL))
+ *mix_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+- if (!*mute_val && nid_has_mute(codec, nid, HDA_OUTPUT))
++ if (!*mute_val && nid_has_mute(codec, nid, HDA_OUTPUT) &&
++ !is_ctl_associated(codec, nid, HDA_OUTPUT, 0, NID_PATH_MUTE_CTL))
+ *mute_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+
+ return *mix_val || *mute_val;
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 38aa080681a3..31230c68b603 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -171,7 +171,7 @@ static const struct hda_codec_ops ad198x_auto_patch_ops = {
+ };
+
+
+-static int ad198x_parse_auto_config(struct hda_codec *codec)
++static int ad198x_parse_auto_config(struct hda_codec *codec, bool indep_hp)
+ {
+ struct ad198x_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->gen.autocfg;
+@@ -181,7 +181,7 @@ static int ad198x_parse_auto_config(struct hda_codec *codec)
+ codec->no_trigger_sense = 1;
+ codec->no_sticky_stream = 1;
+
+- spec->gen.indep_hp = 1;
++ spec->gen.indep_hp = indep_hp;
+
+ err = snd_hda_parse_pin_defcfg(codec, cfg, NULL, 0);
+ if (err < 0)
+@@ -264,11 +264,11 @@ static const struct hda_fixup ad1986a_fixups[] = {
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x02214021 }, /* headphone */
+ { 0x1b, 0x01014011 }, /* front */
+- { 0x1c, 0x01013012 }, /* surround */
+- { 0x1d, 0x01019015 }, /* clfe */
++ { 0x1c, 0x01813030 }, /* line-in */
++ { 0x1d, 0x01a19020 }, /* rear mic */
+ { 0x1e, 0x411111f0 }, /* N/A */
+ { 0x1f, 0x02a190f0 }, /* mic */
+- { 0x20, 0x018130f0 }, /* line-in */
++ { 0x20, 0x411111f0 }, /* N/A */
+ {}
+ },
+ },
+@@ -362,7 +362,7 @@ static int patch_ad1986a(struct hda_codec *codec)
+ ad1986a_fixups);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
+- err = ad198x_parse_auto_config(codec);
++ err = ad198x_parse_auto_config(codec, false);
+ if (err < 0) {
+ snd_hda_gen_free(codec);
+ return err;
+@@ -464,7 +464,7 @@ static int patch_ad1983(struct hda_codec *codec)
+
+ spec->gen.beep_nid = 0x10;
+ set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+- err = ad198x_parse_auto_config(codec);
++ err = ad198x_parse_auto_config(codec, false);
+ if (err < 0)
+ goto error;
+ err = ad1983_add_spdif_mux_ctl(codec);
+@@ -564,7 +564,7 @@ static int patch_ad1981(struct hda_codec *codec)
+ snd_hda_pick_fixup(codec, NULL, ad1981_fixup_tbl, ad1981_fixups);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
+- err = ad198x_parse_auto_config(codec);
++ err = ad198x_parse_auto_config(codec, false);
+ if (err < 0)
+ goto error;
+ err = ad1983_add_spdif_mux_ctl(codec);
+@@ -890,7 +890,7 @@ static int patch_ad1988(struct hda_codec *codec)
+ snd_hda_pick_fixup(codec, ad1988_fixup_models, NULL, ad1988_fixups);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
+- err = ad198x_parse_auto_config(codec);
++ err = ad198x_parse_auto_config(codec, true);
+ if (err < 0)
+ goto error;
+ err = ad1988_add_spdif_mux_ctl(codec);
+@@ -1064,7 +1064,7 @@ static int patch_ad1884(struct hda_codec *codec)
+ snd_hda_pick_fixup(codec, NULL, ad1884_fixup_tbl, ad1884_fixups);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
+- err = ad198x_parse_auto_config(codec);
++ err = ad198x_parse_auto_config(codec, true);
+ if (err < 0)
+ goto error;
+ err = ad1983_add_spdif_mux_ctl(codec);
+@@ -1106,7 +1106,7 @@ static int patch_ad1882(struct hda_codec *codec)
+ spec->gen.mixer_merge_nid = 0x21;
+ spec->gen.beep_nid = 0x10;
+ set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+- err = ad198x_parse_auto_config(codec);
++ err = ad198x_parse_auto_config(codec, true);
+ if (err < 0)
+ goto error;
+ err = ad1988_add_spdif_mux_ctl(codec);
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index f26c42c92db7..417e0fc2d119 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -88,6 +88,9 @@ struct hdmi_spec {
+ unsigned int channels_max; /* max over all cvts */
+
+ struct hdmi_eld temp_eld;
++
++ bool dyn_pin_out;
++
+ /*
+ * Non-generic ATI/NVIDIA specific
+ */
+@@ -452,15 +455,25 @@ static void hdmi_write_dip_byte(struct hda_codec *codec, hda_nid_t pin_nid,
+
+ static void hdmi_init_pin(struct hda_codec *codec, hda_nid_t pin_nid)
+ {
++ struct hdmi_spec *spec = codec->spec;
++ int pin_out;
++
+ /* Unmute */
+ if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
+ snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+- /* Enable pin out: some machines with GM965 gets broken output when
+- * the pin is disabled or changed while using with HDMI
+- */
++
++ if (spec->dyn_pin_out)
++ /* Disable pin out until stream is active */
++ pin_out = 0;
++ else
++ /* Enable pin out: some machines with GM965 gets broken output
++ * when the pin is disabled or changed while using with HDMI
++ */
++ pin_out = PIN_OUT;
++
+ snd_hda_codec_write(codec, pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++ AC_VERB_SET_PIN_WIDGET_CONTROL, pin_out);
+ }
+
+ static int hdmi_get_channel_count(struct hda_codec *codec, hda_nid_t cvt_nid)
+@@ -1535,6 +1548,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+ hda_nid_t pin_nid = per_pin->pin_nid;
+ bool non_pcm;
++ int pinctl;
+
+ non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
+ per_pin->channels = substream->runtime->channels;
+@@ -1544,6 +1558,14 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+
+ hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
+
++ if (spec->dyn_pin_out) {
++ pinctl = snd_hda_codec_read(codec, pin_nid, 0,
++ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
++ snd_hda_codec_write(codec, pin_nid, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL,
++ pinctl | PIN_OUT);
++ }
++
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ }
+
+@@ -1563,6 +1585,7 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ int cvt_idx, pin_idx;
+ struct hdmi_spec_per_cvt *per_cvt;
+ struct hdmi_spec_per_pin *per_pin;
++ int pinctl;
+
+ if (hinfo->nid) {
+ cvt_idx = cvt_nid_to_cvt_index(spec, hinfo->nid);
+@@ -1579,6 +1602,14 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ return -EINVAL;
+ per_pin = get_pin(spec, pin_idx);
+
++ if (spec->dyn_pin_out) {
++ pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
++ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
++ snd_hda_codec_write(codec, per_pin->pin_nid, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL,
++ pinctl & ~PIN_OUT);
++ }
++
+ snd_hda_spdif_ctls_unassign(codec, pin_idx);
+ per_pin->chmap_set = false;
+ memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
+@@ -2560,6 +2591,21 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec)
+ return 0;
+ }
+
++static int patch_nvhdmi(struct hda_codec *codec)
++{
++ struct hdmi_spec *spec;
++ int err;
++
++ err = patch_generic_hdmi(codec);
++ if (err)
++ return err;
++
++ spec = codec->spec;
++ spec->dyn_pin_out = true;
++
++ return 0;
++}
++
+ /*
+ * ATI-specific implementations
+ *
+@@ -2632,30 +2678,30 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
+ { .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
+ { .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x },
+-{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_generic_hdmi },
+-{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_generic_hdmi },
++{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_nvhdmi },
++{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi },
+ /* 17 is known to be absent */
+-{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_generic_hdmi },
+-{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_generic_hdmi },
++{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi },
++{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi },
+ { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
+ { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
+ { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index dce47c414ea7..56b62555eef4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1769,6 +1769,7 @@ enum {
+ ALC889_FIXUP_DAC_ROUTE,
+ ALC889_FIXUP_MBP_VREF,
+ ALC889_FIXUP_IMAC91_VREF,
++ ALC889_FIXUP_MBA11_VREF,
+ ALC889_FIXUP_MBA21_VREF,
+ ALC882_FIXUP_INV_DMIC,
+ ALC882_FIXUP_NO_PRIMARY_HP,
+@@ -1898,6 +1899,16 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec,
+ alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
+ }
+
++/* Set VREF on speaker pins on mba11 */
++static void alc889_fixup_mba11_vref(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ static hda_nid_t nids[1] = { 0x18 };
++
++ if (action == HDA_FIXUP_ACT_INIT)
++ alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
++}
++
+ /* Set VREF on speaker pins on mba21 */
+ static void alc889_fixup_mba21_vref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+@@ -2104,6 +2115,12 @@ static const struct hda_fixup alc882_fixups[] = {
+ .chained = true,
+ .chain_id = ALC882_FIXUP_GPIO1,
+ },
++ [ALC889_FIXUP_MBA11_VREF] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc889_fixup_mba11_vref,
++ .chained = true,
++ .chain_id = ALC889_FIXUP_MBP_VREF,
++ },
+ [ALC889_FIXUP_MBA21_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc889_fixup_mba21_vref,
+@@ -2173,7 +2190,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD),
+- SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF),
++ SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBA11_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBA21_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
+diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
+index b96d9e1adf6d..3717f2dd30be 100644
+--- a/sound/pci/rme9652/rme9652.c
++++ b/sound/pci/rme9652/rme9652.c
+@@ -285,7 +285,7 @@ static char channel_map_9636_ds[26] = {
+ /* ADAT channels are remapped */
+ 1, 3, 5, 7, 9, 11, 13, 15,
+ /* channels 8 and 9 are S/PDIF */
+- 24, 25
++ 24, 25,
+ /* others don't exist */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+ };
+diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
+index ebff1128be59..adee866f463f 100644
+--- a/sound/soc/codecs/adau1701.c
++++ b/sound/soc/codecs/adau1701.c
+@@ -71,7 +71,7 @@
+
+ #define ADAU1701_SEROCTL_WORD_LEN_24 0x0000
+ #define ADAU1701_SEROCTL_WORD_LEN_20 0x0001
+-#define ADAU1701_SEROCTL_WORD_LEN_16 0x0010
++#define ADAU1701_SEROCTL_WORD_LEN_16 0x0002
+ #define ADAU1701_SEROCTL_WORD_LEN_MASK 0x0003
+
+ #define ADAU1701_AUXNPOW_VBPD 0x40
+diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
+index 831a34d7cc72..c09a5305d601 100644
+--- a/sound/soc/codecs/wm5110.c
++++ b/sound/soc/codecs/wm5110.c
+@@ -43,6 +43,54 @@ static const struct reg_default wm5110_sysclk_revd_patch[] = {
+ { 0x3133, 0x1201 },
+ { 0x3183, 0x1501 },
+ { 0x31D3, 0x1401 },
++ { 0x0049, 0x01ea },
++ { 0x004a, 0x01f2 },
++ { 0x0057, 0x01e7 },
++ { 0x0058, 0x01fb },
++ { 0x33ce, 0xc4f5 },
++ { 0x33cf, 0x1361 },
++ { 0x33d0, 0x0402 },
++ { 0x33d1, 0x4700 },
++ { 0x33d2, 0x026d },
++ { 0x33d3, 0xff00 },
++ { 0x33d4, 0x026d },
++ { 0x33d5, 0x0101 },
++ { 0x33d6, 0xc4f5 },
++ { 0x33d7, 0x0361 },
++ { 0x33d8, 0x0402 },
++ { 0x33d9, 0x6701 },
++ { 0x33da, 0xc4f5 },
++ { 0x33db, 0x136f },
++ { 0x33dc, 0xc4f5 },
++ { 0x33dd, 0x134f },
++ { 0x33de, 0xc4f5 },
++ { 0x33df, 0x131f },
++ { 0x33e0, 0x026d },
++ { 0x33e1, 0x4f01 },
++ { 0x33e2, 0x026d },
++ { 0x33e3, 0xf100 },
++ { 0x33e4, 0x026d },
++ { 0x33e5, 0x0001 },
++ { 0x33e6, 0xc4f5 },
++ { 0x33e7, 0x0361 },
++ { 0x33e8, 0x0402 },
++ { 0x33e9, 0x6601 },
++ { 0x33ea, 0xc4f5 },
++ { 0x33eb, 0x136f },
++ { 0x33ec, 0xc4f5 },
++ { 0x33ed, 0x134f },
++ { 0x33ee, 0xc4f5 },
++ { 0x33ef, 0x131f },
++ { 0x33f0, 0x026d },
++ { 0x33f1, 0x4e01 },
++ { 0x33f2, 0x026d },
++ { 0x33f3, 0xf000 },
++ { 0x33f6, 0xc4f5 },
++ { 0x33f7, 0x1361 },
++ { 0x33f8, 0x0402 },
++ { 0x33f9, 0x4600 },
++ { 0x33fa, 0x026d },
++ { 0x33fb, 0xfe00 },
+ };
+
+ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 568b750c01f6..9d78c70be71e 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -870,6 +870,7 @@ static struct machine *
+ struct perf_sample *sample)
+ {
+ const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
++ struct machine *machine;
+
+ if (perf_guest &&
+ ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
+@@ -882,7 +883,11 @@ static struct machine *
+ else
+ pid = sample->pid;
+
+- return perf_session__findnew_machine(session, pid);
++ machine = perf_session__find_machine(session, pid);
++ if (!machine)
++ machine = perf_session__findnew_machine(session,
++ DEFAULT_GUEST_KERNEL_ID);
++ return machine;
+ }
+
+ return &session->machines.host;
diff --git a/1010_linux-3.12.11.patch b/1010_linux-3.12.11.patch
new file mode 100644
index 00000000..bf6dfc80
--- /dev/null
+++ b/1010_linux-3.12.11.patch
@@ -0,0 +1,4640 @@
+diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
+index 823c95faebd2..c4564e1b42e6 100644
+--- a/Documentation/filesystems/proc.txt
++++ b/Documentation/filesystems/proc.txt
+@@ -1376,8 +1376,8 @@ may allocate from based on an estimation of its current memory and swap use.
+ For example, if a task is using all allowed memory, its badness score will be
+ 1000. If it is using half of its allowed memory, its score will be 500.
+
+-There is an additional factor included in the badness score: root
+-processes are given 3% extra memory over other tasks.
++There is an additional factor included in the badness score: the current memory
++and swap usage is discounted by 3% for root processes.
+
+ The amount of "allowed" memory depends on the context in which the oom killer
+ was called. If it is due to the memory assigned to the allocating task's cpuset
+diff --git a/Makefile b/Makefile
+index 49b64402f947..b9e092666bf9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
+index fe4fc1cbdfaf..f3b325f6cbd4 100644
+--- a/arch/arm/mach-mvebu/mvebu-soc-id.c
++++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
+@@ -88,7 +88,7 @@ static int __init mvebu_soc_id_init(void)
+ }
+
+ pci_base = of_iomap(child, 0);
+- if (IS_ERR(pci_base)) {
++ if (pci_base == NULL) {
+ pr_err("cannot map registers\n");
+ ret = -ENOMEM;
+ goto res_ioremap;
+diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c
+index c492e1b3dfdb..807df142444b 100644
+--- a/arch/arm/plat-orion/irq.c
++++ b/arch/arm/plat-orion/irq.c
+@@ -15,8 +15,51 @@
+ #include <linux/io.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <asm/exception.h>
+ #include <plat/irq.h>
+ #include <plat/orion-gpio.h>
++#include <mach/bridge-regs.h>
++
++#ifdef CONFIG_MULTI_IRQ_HANDLER
++/*
++ * Compiling with both non-DT and DT support enabled, will
++ * break asm irq handler used by non-DT boards. Therefore,
++ * we provide a C-style irq handler even for non-DT boards,
++ * if MULTI_IRQ_HANDLER is set.
++ *
++ * Notes:
++ * - this is prepared for Kirkwood and Dove only, update
++ * accordingly if you add Orion5x or MV78x00.
++ * - Orion5x uses different macro names and has only one
++ * set of CAUSE/MASK registers.
++ * - MV78x00 uses the same macro names but has a third
++ * set of CAUSE/MASK registers.
++ *
++ */
++
++static void __iomem *orion_irq_base = IRQ_VIRT_BASE;
++
++asmlinkage void
++__exception_irq_entry orion_legacy_handle_irq(struct pt_regs *regs)
++{
++ u32 stat;
++
++ stat = readl_relaxed(orion_irq_base + IRQ_CAUSE_LOW_OFF);
++ stat &= readl_relaxed(orion_irq_base + IRQ_MASK_LOW_OFF);
++ if (stat) {
++ unsigned int hwirq = __fls(stat);
++ handle_IRQ(hwirq, regs);
++ return;
++ }
++ stat = readl_relaxed(orion_irq_base + IRQ_CAUSE_HIGH_OFF);
++ stat &= readl_relaxed(orion_irq_base + IRQ_MASK_HIGH_OFF);
++ if (stat) {
++ unsigned int hwirq = 32 + __fls(stat);
++ handle_IRQ(hwirq, regs);
++ return;
++ }
++}
++#endif
+
+ void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
+ {
+@@ -35,6 +78,10 @@ void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
++
++#ifdef CONFIG_MULTI_IRQ_HANDLER
++ set_handle_irq(orion_legacy_handle_irq);
++#endif
+ }
+
+ #ifdef CONFIG_OF
+diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
+index 38b313909ac9..adad46e41a1d 100644
+--- a/arch/sh/kernel/kgdb.c
++++ b/arch/sh/kernel/kgdb.c
+@@ -13,6 +13,7 @@
+ #include <linux/kdebug.h>
+ #include <linux/irq.h>
+ #include <linux/io.h>
++#include <linux/sched.h>
+ #include <asm/cacheflush.h>
+ #include <asm/traps.h>
+
+diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
+index 78f1f2ded86c..ffd4493efc78 100644
+--- a/arch/tile/include/asm/compat.h
++++ b/arch/tile/include/asm/compat.h
+@@ -281,7 +281,6 @@ long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
+ u32 dummy, u32 low, u32 high);
+ long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
+ u32 dummy, u32 low, u32 high);
+-long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
+ long compat_sys_sync_file_range2(int fd, unsigned int flags,
+ u32 offset_lo, u32 offset_hi,
+ u32 nbytes_lo, u32 nbytes_hi);
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 0ecac257fb26..840c127a938e 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -121,7 +121,8 @@
+
+ /* Set of bits not changed in pte_modify */
+ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
+- _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
++ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
++ _PAGE_SOFT_DIRTY)
+ #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
+
+ #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
+diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
+index 0a7852483ffe..ab84ac198a9a 100644
+--- a/arch/x86/xen/platform-pci-unplug.c
++++ b/arch/x86/xen/platform-pci-unplug.c
+@@ -69,6 +69,80 @@ static int check_platform_magic(void)
+ return 0;
+ }
+
++bool xen_has_pv_devices()
++{
++ if (!xen_domain())
++ return false;
++
++ /* PV domains always have them. */
++ if (xen_pv_domain())
++ return true;
++
++ /* And user has xen_platform_pci=0 set in guest config as
++ * driver did not modify the value. */
++ if (xen_platform_pci_unplug == 0)
++ return false;
++
++ if (xen_platform_pci_unplug & XEN_UNPLUG_NEVER)
++ return false;
++
++ if (xen_platform_pci_unplug & XEN_UNPLUG_ALL)
++ return true;
++
++ /* This is an odd one - we are going to run legacy
++ * and PV drivers at the same time. */
++ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
++ return true;
++
++ /* And the caller has to follow with xen_pv_{disk,nic}_devices
++ * to be certain which driver can load. */
++ return false;
++}
++EXPORT_SYMBOL_GPL(xen_has_pv_devices);
++
++static bool __xen_has_pv_device(int state)
++{
++ /* HVM domains might or might not */
++ if (xen_hvm_domain() && (xen_platform_pci_unplug & state))
++ return true;
++
++ return xen_has_pv_devices();
++}
++
++bool xen_has_pv_nic_devices(void)
++{
++ return __xen_has_pv_device(XEN_UNPLUG_ALL_NICS | XEN_UNPLUG_ALL);
++}
++EXPORT_SYMBOL_GPL(xen_has_pv_nic_devices);
++
++bool xen_has_pv_disk_devices(void)
++{
++ return __xen_has_pv_device(XEN_UNPLUG_ALL_IDE_DISKS |
++ XEN_UNPLUG_AUX_IDE_DISKS | XEN_UNPLUG_ALL);
++}
++EXPORT_SYMBOL_GPL(xen_has_pv_disk_devices);
++
++/*
++ * This one is odd - it determines whether you want to run PV _and_
++ * legacy (IDE) drivers together. This combination is only possible
++ * under HVM.
++ */
++bool xen_has_pv_and_legacy_disk_devices(void)
++{
++ if (!xen_domain())
++ return false;
++
++ /* N.B. This is only ever used in HVM mode */
++ if (xen_pv_domain())
++ return false;
++
++ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
++ return true;
++
++ return false;
++}
++EXPORT_SYMBOL_GPL(xen_has_pv_and_legacy_disk_devices);
++
+ void xen_unplug_emulated_devices(void)
+ {
+ int r;
+diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
+index 74bb74fa3f87..ea2d39dd912a 100644
+--- a/arch/xtensa/platforms/xtfpga/setup.c
++++ b/arch/xtensa/platforms/xtfpga/setup.c
+@@ -194,7 +194,7 @@ void __init platform_calibrate_ccount(void)
+ * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
+ */
+
+-static struct resource ethoc_res[] __initdata = {
++static struct resource ethoc_res[] = {
+ [0] = { /* register space */
+ .start = OETH_REGS_PADDR,
+ .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
+@@ -212,7 +212,7 @@ static struct resource ethoc_res[] __initdata = {
+ },
+ };
+
+-static struct ethoc_platform_data ethoc_pdata __initdata = {
++static struct ethoc_platform_data ethoc_pdata = {
+ /*
+ * The MAC address for these boards is 00:50:c2:13:6f:xx.
+ * The last byte (here as zero) is read from the DIP switches on the
+@@ -222,7 +222,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = {
+ .phy_id = -1,
+ };
+
+-static struct platform_device ethoc_device __initdata = {
++static struct platform_device ethoc_device = {
+ .name = "ethoc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ethoc_res),
+@@ -236,13 +236,13 @@ static struct platform_device ethoc_device __initdata = {
+ * UART
+ */
+
+-static struct resource serial_resource __initdata = {
++static struct resource serial_resource = {
+ .start = DUART16552_PADDR,
+ .end = DUART16552_PADDR + 0x1f,
+ .flags = IORESOURCE_MEM,
+ };
+
+-static struct plat_serial8250_port serial_platform_data[] __initdata = {
++static struct plat_serial8250_port serial_platform_data[] = {
+ [0] = {
+ .mapbase = DUART16552_PADDR,
+ .irq = DUART16552_INTNUM,
+@@ -255,7 +255,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = {
+ { },
+ };
+
+-static struct platform_device xtavnet_uart __initdata = {
++static struct platform_device xtavnet_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 661a5b7f5104..7d83ef13186f 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -33,6 +33,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/acpi.h>
+ #include <linux/slab.h>
++#include <linux/regulator/machine.h>
+ #ifdef CONFIG_X86
+ #include <asm/mpspec.h>
+ #endif
+@@ -575,6 +576,14 @@ void __init acpi_early_init(void)
+ goto error0;
+ }
+
++ /*
++ * If the system is using ACPI then we can be reasonably
++ * confident that any regulators are managed by the firmware
++ * so tell the regulator core it has everything it needs to
++ * know.
++ */
++ regulator_has_full_constraints();
++
+ return;
+
+ error0:
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index a4660bbee8a6..ed88b3c2e8ea 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1278,7 +1278,7 @@ static int blkfront_probe(struct xenbus_device *dev,
+ char *type;
+ int len;
+ /* no unplug has been done: do not hook devices != xen vbds */
+- if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
++ if (xen_has_pv_and_legacy_disk_devices()) {
+ int major;
+
+ if (!VDEV_IS_EXTENDED(vdevice))
+@@ -2022,7 +2022,7 @@ static int __init xlblk_init(void)
+ if (!xen_domain())
+ return -ENODEV;
+
+- if (xen_hvm_domain() && !xen_platform_pci_unplug)
++ if (!xen_has_pv_disk_devices())
+ return -ENODEV;
+
+ if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
+diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
+index 94c280d36e8b..afa9362f4f4d 100644
+--- a/drivers/char/tpm/xen-tpmfront.c
++++ b/drivers/char/tpm/xen-tpmfront.c
+@@ -17,6 +17,7 @@
+ #include <xen/xenbus.h>
+ #include <xen/page.h>
+ #include "tpm.h"
++#include <xen/platform_pci.h>
+
+ struct tpm_private {
+ struct tpm_chip *chip;
+@@ -423,6 +424,9 @@ static int __init xen_tpmfront_init(void)
+ if (!xen_domain())
+ return -ENODEV;
+
++ if (!xen_has_pv_devices())
++ return -ENODEV;
++
+ return xenbus_register_frontend(&tpmfront_driver);
+ }
+ module_init(xen_tpmfront_init);
+diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
+index 272a3ec35957..0314dde18a5d 100644
+--- a/drivers/eisa/eisa-bus.c
++++ b/drivers/eisa/eisa-bus.c
+@@ -275,11 +275,13 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
+ }
+
+ if (slot) {
++ edev->res[i].name = NULL;
+ edev->res[i].start = SLOT_ADDRESS(root, slot)
+ + (i * 0x400);
+ edev->res[i].end = edev->res[i].start + 0xff;
+ edev->res[i].flags = IORESOURCE_IO;
+ } else {
++ edev->res[i].name = NULL;
+ edev->res[i].start = SLOT_ADDRESS(root, slot)
+ + EISA_VENDOR_ID_OFFSET;
+ edev->res[i].end = edev->res[i].start + 3;
+@@ -326,19 +328,20 @@ static int __init eisa_probe(struct eisa_root_device *root)
+ return -ENOMEM;
+ }
+
+- if (eisa_init_device(root, edev, 0)) {
++ if (eisa_request_resources(root, edev, 0)) {
++ dev_warn(root->dev,
++ "EISA: Cannot allocate resource for mainboard\n");
+ kfree(edev);
+ if (!root->force_probe)
+- return -ENODEV;
++ return -EBUSY;
+ goto force_probe;
+ }
+
+- if (eisa_request_resources(root, edev, 0)) {
+- dev_warn(root->dev,
+- "EISA: Cannot allocate resource for mainboard\n");
++ if (eisa_init_device(root, edev, 0)) {
++ eisa_release_resources(edev);
+ kfree(edev);
+ if (!root->force_probe)
+- return -EBUSY;
++ return -ENODEV;
+ goto force_probe;
+ }
+
+@@ -361,11 +364,6 @@ static int __init eisa_probe(struct eisa_root_device *root)
+ continue;
+ }
+
+- if (eisa_init_device(root, edev, i)) {
+- kfree(edev);
+- continue;
+- }
+-
+ if (eisa_request_resources(root, edev, i)) {
+ dev_warn(root->dev,
+ "Cannot allocate resource for EISA slot %d\n",
+@@ -374,6 +372,12 @@ static int __init eisa_probe(struct eisa_root_device *root)
+ continue;
+ }
+
++ if (eisa_init_device(root, edev, i)) {
++ eisa_release_resources(edev);
++ kfree(edev);
++ continue;
++ }
++
+ if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED))
+ enabled_str = " (forced enabled)";
+ else if (edev->state == EISA_CONFIG_FORCED)
+diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
+index 7b33e14e44aa..a28640f47c27 100644
+--- a/drivers/gpu/drm/ast/ast_fb.c
++++ b/drivers/gpu/drm/ast/ast_fb.c
+@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+- if (!in_interrupt())
++ if (drm_can_sleep())
+ ret = ast_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+index b27e95666fab..86d779a9c245 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
++++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+- if (!in_interrupt())
++ if (drm_can_sleep())
+ ret = cirrus_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
+index 60685b21cc36..379a47ea99f6 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
++++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
+@@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+ sr07 |= 0x11;
+ break;
+ case 16:
+- sr07 |= 0xc1;
+- hdr = 0xc0;
++ sr07 |= 0x17;
++ hdr = 0xc1;
+ break;
+ case 24:
+ sr07 |= 0x15;
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 49293bdc972a..da0c0080ac17 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -129,11 +129,12 @@ int drm_gem_object_init(struct drm_device *dev,
+ {
+ struct file *filp;
+
++ drm_gem_private_object_init(dev, obj, size);
++
+ filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+- drm_gem_private_object_init(dev, obj, size);
+ obj->filp = filp;
+
+ return 0;
+diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
+index 24e8af3d22bf..386de2c9dc86 100644
+--- a/drivers/gpu/drm/gma500/gma_display.c
++++ b/drivers/gpu/drm/gma500/gma_display.c
+@@ -349,6 +349,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ /* If we didn't get a handle then turn the cursor off */
+ if (!handle) {
+ temp = CURSOR_MODE_DISABLE;
++ mutex_lock(&dev->struct_mutex);
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+@@ -365,6 +366,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ gma_crtc->cursor_obj = NULL;
+ }
+
++ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+@@ -374,9 +376,12 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ return -EINVAL;
+ }
+
++ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+- if (!obj)
+- return -ENOENT;
++ if (!obj) {
++ ret = -ENOENT;
++ goto unlock;
++ }
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "Buffer is too small\n");
+@@ -440,10 +445,13 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ }
+
+ gma_crtc->cursor_obj = obj;
++unlock:
++ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+ unref_cursor:
+ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 5a25f2476c3b..50d42daae15f 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1683,6 +1683,7 @@ out_gem_unload:
+
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
++ pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->wq);
+ out_mtrrfree:
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index ab0f2c0a440c..881c9af0971d 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -296,6 +296,7 @@ struct drm_i915_error_state {
+ u64 fence[I915_MAX_NUM_FENCES];
+ struct timeval time;
+ struct drm_i915_error_ring {
++ bool valid;
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index e15a1d90037d..fe4a7d16e261 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -250,7 +250,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
+ }
+
+ sg = st->sgl;
+- sg->offset = offset;
++ sg->offset = 0;
+ sg->length = size;
+
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
+index dae364f0028c..354e3e32b30e 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.c
++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
+@@ -221,6 +221,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+ unsigned ring)
+ {
+ BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
++ if (!error->ring[ring].valid)
++ return;
++
+ err_printf(m, "%s command stream:\n", ring_str(ring));
+ err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+@@ -272,7 +275,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev = error_priv->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error = error_priv->error;
+- struct intel_ring_buffer *ring;
+ int i, j, page, offset, elt;
+
+ if (!error) {
+@@ -306,7 +308,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+ if (INTEL_INFO(dev)->gen == 7)
+ err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+- for_each_ring(ring, dev_priv, i)
++ for (i = 0; i < ARRAY_SIZE(error->ring); i++)
+ i915_ring_error_state(m, dev, error, i);
+
+ if (error->active_bo)
+@@ -363,8 +365,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+ }
+ }
+
+- obj = error->ring[i].ctx;
+- if (obj) {
++ if ((obj = error->ring[i].ctx)) {
+ err_printf(m, "%s --- HW Context = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+@@ -644,7 +645,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+ return NULL;
+
+ obj = ring->scratch.obj;
+- if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
++ if (obj != NULL &&
++ acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+@@ -747,11 +749,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct intel_ring_buffer *ring;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+- for_each_ring(ring, dev_priv, i) {
++ for (i = 0; i < I915_NUM_RINGS; i++) {
++ struct intel_ring_buffer *ring = &dev_priv->ring[i];
++
++ if (ring->dev == NULL)
++ continue;
++
++ error->ring[i].valid = true;
++
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index ef9b35479f01..375abe708268 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1955,9 +1955,13 @@
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+-#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
+-#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
+-#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
++#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
++#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
++#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
++/* VLV DP/HDMI bits again match Bspec */
++#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
++#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
++#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
+ #define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+ #define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+ #define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 1a431377d83b..5a97f7356843 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -2803,18 +2803,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
+ return status;
+ }
+
+- switch (intel_dig_port->port) {
+- case PORT_B:
+- bit = PORTB_HOTPLUG_LIVE_STATUS;
+- break;
+- case PORT_C:
+- bit = PORTC_HOTPLUG_LIVE_STATUS;
+- break;
+- case PORT_D:
+- bit = PORTD_HOTPLUG_LIVE_STATUS;
+- break;
+- default:
+- return connector_status_unknown;
++ if (IS_VALLEYVIEW(dev)) {
++ switch (intel_dig_port->port) {
++ case PORT_B:
++ bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
++ break;
++ case PORT_C:
++ bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
++ break;
++ case PORT_D:
++ bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
++ break;
++ default:
++ return connector_status_unknown;
++ }
++ } else {
++ switch (intel_dig_port->port) {
++ case PORT_B:
++ bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
++ break;
++ case PORT_C:
++ bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
++ break;
++ case PORT_D:
++ bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
++ break;
++ default:
++ return connector_status_unknown;
++ }
+ }
+
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 460ee1026fca..43719bbb2595 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1501,8 +1501,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+ }
+
+-static int __intel_ring_begin(struct intel_ring_buffer *ring,
+- int bytes)
++static int __intel_ring_prepare(struct intel_ring_buffer *ring,
++ int bytes)
+ {
+ int ret;
+
+@@ -1518,7 +1518,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
+ return ret;
+ }
+
+- ring->space -= bytes;
+ return 0;
+ }
+
+@@ -1533,12 +1532,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
+ if (ret)
+ return ret;
+
++ ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
++ if (ret)
++ return ret;
++
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
+
+- return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
++ ring->space -= num_dwords * sizeof(uint32_t);
++ return 0;
+ }
+
+ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
+index 801731aeab61..9f9780b7ddf0 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
++++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
+@@ -22,8 +22,10 @@ static void mga_hide_cursor(struct mga_device *mdev)
+ {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+- mgag200_bo_unpin(mdev->cursor.pixels_1);
+- mgag200_bo_unpin(mdev->cursor.pixels_2);
++ if (mdev->cursor.pixels_1->pin_count)
++ mgag200_bo_unpin(mdev->cursor.pixels_1);
++ if (mdev->cursor.pixels_2->pin_count)
++ mgag200_bo_unpin(mdev->cursor.pixels_2);
+ }
+
+ int mga_crtc_cursor_set(struct drm_crtc *crtc,
+@@ -32,7 +34,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
+ uint32_t width,
+ uint32_t height)
+ {
+- struct drm_device *dev = (struct drm_device *)file_priv->minor->dev;
++ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+ struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
+ struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
+diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
+index 964f58cee5ea..d29bb335cccc 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
++++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
+@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+- if (!in_interrupt())
++ if (drm_can_sleep())
+ ret = mgag200_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
+index 503a414cbdad..1288cd9f67d1 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -1521,11 +1521,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
+ (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (32700 * 1024))) {
+ return MODE_BANDWIDTH;
+- } else if (mode->type == G200_EH &&
++ } else if (mdev->type == G200_EH &&
+ (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (37500 * 1024))) {
+ return MODE_BANDWIDTH;
+- } else if (mode->type == G200_ER &&
++ } else if (mdev->type == G200_ER &&
+ (mga_vga_calculate_mode_bandwidth(mode,
+ bpp) > (55000 * 1024))) {
+ return MODE_BANDWIDTH;
+diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
+index e03fc8e4dc1d..5e077e4ed7f6 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
++++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
+@@ -56,6 +56,16 @@ _nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+ nv_wr32(falcon, falcon->addr + addr, data);
+ }
+
++static void *
++vmemdup(const void *src, size_t len)
++{
++ void *p = vmalloc(len);
++
++ if (p)
++ memcpy(p, src, len);
++ return p;
++}
++
+ int
+ _nouveau_falcon_init(struct nouveau_object *object)
+ {
+@@ -111,7 +121,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret == 0) {
+- falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
++ falcon->code.data = vmemdup(fw->data, fw->size);
+ falcon->code.size = fw->size;
+ falcon->data.data = NULL;
+ falcon->data.size = 0;
+@@ -134,7 +144,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
+ return ret;
+ }
+
+- falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
++ falcon->data.data = vmemdup(fw->data, fw->size);
+ falcon->data.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->data.data)
+@@ -149,7 +159,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
+ return ret;
+ }
+
+- falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
++ falcon->code.data = vmemdup(fw->data, fw->size);
+ falcon->code.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->code.data)
+@@ -235,8 +245,8 @@ _nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+ if (!suspend) {
+ nouveau_gpuobj_ref(NULL, &falcon->core);
+ if (falcon->external) {
+- kfree(falcon->data.data);
+- kfree(falcon->code.data);
++ vfree(falcon->data.data);
++ vfree(falcon->code.data);
+ falcon->code.data = NULL;
+ }
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 755c38d06271..60a97b6b908c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -802,25 +802,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ {
+ struct nouveau_mem *node = old_mem->mm_node;
+- struct nouveau_bo *nvbo = nouveau_bo(bo);
+ u64 length = (new_mem->num_pages << PAGE_SHIFT);
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
++ int src_tiled = !!node->memtype;
++ int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
+ int ret;
+
+ while (length) {
+ u32 amount, stride, height;
+
++ ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
++ if (ret)
++ return ret;
++
+ amount = min(length, (u64)(4 * 1024 * 1024));
+ stride = 16 * 4;
+ height = amount / stride;
+
+- if (old_mem->mem_type == TTM_PL_VRAM &&
+- nouveau_bo_tile_layout(nvbo)) {
+- ret = RING_SPACE(chan, 8);
+- if (ret)
+- return ret;
+-
++ if (src_tiled) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+@@ -830,19 +830,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ } else {
+- ret = RING_SPACE(chan, 2);
+- if (ret)
+- return ret;
+-
+ BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
+ OUT_RING (chan, 1);
+ }
+- if (new_mem->mem_type == TTM_PL_VRAM &&
+- nouveau_bo_tile_layout(nvbo)) {
+- ret = RING_SPACE(chan, 8);
+- if (ret)
+- return ret;
+-
++ if (dst_tiled) {
+ BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+@@ -852,18 +843,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ } else {
+- ret = RING_SPACE(chan, 2);
+- if (ret)
+- return ret;
+-
+ BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
+ OUT_RING (chan, 1);
+ }
+
+- ret = RING_SPACE(chan, 14);
+- if (ret)
+- return ret;
+-
+ BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 368e1b84f429..0ee2cf5cf76e 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -209,6 +209,16 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+
++static const u32 vga_control_regs[6] =
++{
++ AVIVO_D1VGA_CONTROL,
++ AVIVO_D2VGA_CONTROL,
++ EVERGREEN_D3VGA_CONTROL,
++ EVERGREEN_D4VGA_CONTROL,
++ EVERGREEN_D5VGA_CONTROL,
++ EVERGREEN_D6VGA_CONTROL,
++};
++
+ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+@@ -216,13 +226,23 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+ struct radeon_device *rdev = dev->dev_private;
+ int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
+ BLANK_CRTC_PS_ALLOCATION args;
++ u32 vga_control = 0;
+
+ memset(&args, 0, sizeof(args));
+
++ if (ASIC_IS_DCE8(rdev)) {
++ vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
++ WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
++ }
++
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucBlanking = state;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ if (ASIC_IS_DCE8(rdev)) {
++ WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
++ }
+ }
+
+ static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
+@@ -938,11 +958,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
+ ATOM_DP_SS_ID1);
+- } else
++ } else {
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
+ ATOM_DP_SS_ID1);
++ }
++ /* disable spread spectrum on DCE3 DP */
++ radeon_crtc->ss_enabled = false;
+ }
+ break;
+ case ATOM_ENCODER_MODE_LVDS:
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index b5c67a99dda9..ffb36c1ee005 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4249,8 +4249,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
+
+- /* only one DAC on DCE6 */
+- if (!ASIC_IS_DCE6(rdev))
++ /* only one DAC on DCE5 */
++ if (!ASIC_IS_DCE5(rdev))
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
+index eb8ac315f92f..c7cac07f139b 100644
+--- a/drivers/gpu/drm/radeon/evergreen_cs.c
++++ b/drivers/gpu/drm/radeon/evergreen_cs.c
+@@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+ if (track->cb_dirty) {
+ tmp = track->cb_target_mask;
+ for (i = 0; i < 8; i++) {
+- if ((tmp >> (i * 4)) & 0xF) {
++ u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
++
++ if (format != V_028C70_COLOR_INVALID &&
++ (tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 954eb9afbe71..b2dbd48f7f28 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1335,13 +1335,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
+ {
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
++ u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
++ PACKET3_SH_ACTION_ENA;
+
+ /* flush read cache over gart for this vmid */
+- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+- radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
++ radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+@@ -1357,6 +1356,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
+ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+ {
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
++ u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
++ PACKET3_SH_ACTION_ENA;
+
+ /* set to DX10/11 mode */
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+@@ -1381,14 +1382,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+ (ib->vm ? (ib->vm->id << 24) : 0));
+
+ /* flush read cache over gart for this vmid */
+- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+- radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
++ radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+- radeon_ring_write(ring, 10); /* poll interval */
++ radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
+ }
+
+ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
+index 22421bc80c0d..d996033c243e 100644
+--- a/drivers/gpu/drm/radeon/nid.h
++++ b/drivers/gpu/drm/radeon/nid.h
+@@ -1154,6 +1154,7 @@
+ # define PACKET3_DB_ACTION_ENA (1 << 26)
+ # define PACKET3_SH_ACTION_ENA (1 << 27)
+ # define PACKET3_SX_ACTION_ENA (1 << 28)
++# define PACKET3_ENGINE_ME (1 << 31)
+ #define PACKET3_ME_INITIALIZE 0x44
+ #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+ #define PACKET3_COND_WRITE 0x45
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index f9be22062df1..2acbf89cdfd3 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2554,14 +2554,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+ {
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
++ u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
++ PACKET3_SH_ACTION_ENA;
++
++ if (rdev->family >= CHIP_RV770)
++ cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
+
+ if (rdev->wb.use_event) {
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* flush read cache over gart */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
+- PACKET3_VC_ACTION_ENA |
+- PACKET3_SH_ACTION_ENA);
++ radeon_ring_write(ring, cp_coher_cntl);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+@@ -2575,9 +2578,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
+ } else {
+ /* flush read cache over gart */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
+- PACKET3_VC_ACTION_ENA |
+- PACKET3_SH_ACTION_ENA);
++ radeon_ring_write(ring, cp_coher_cntl);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index 01a3ec83f284..745e66eacd47 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
+ }
+
+ for (i = 0; i < 8; i++) {
+- if ((tmp >> (i * 4)) & 0xF) {
++ u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
++
++ if (format != V_0280A0_COLOR_INVALID &&
++ (tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
+index 7b3c7b5932c5..72484b4b679e 100644
+--- a/drivers/gpu/drm/radeon/r600d.h
++++ b/drivers/gpu/drm/radeon/r600d.h
+@@ -1547,6 +1547,7 @@
+ # define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+ #define PACKET3_SURFACE_SYNC 0x43
+ # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
++# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
+ # define PACKET3_TC_ACTION_ENA (1 << 23)
+ # define PACKET3_VC_ACTION_ENA (1 << 24)
+ # define PACKET3_CB_ACTION_ENA (1 << 25)
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 5c39bf7c3d88..dfa641277175 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -3944,6 +3944,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+ /* tell the bios not to handle mode switching */
+ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+
++ /* clear the vbios dpms state */
++ if (ASIC_IS_DCE4(rdev))
++ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
++
+ if (rdev->family >= CHIP_R600) {
+ WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+ WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index fc60b74ee304..e24ca6ab96de 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+ /* Add the default buses */
+ void radeon_i2c_init(struct radeon_device *rdev)
+ {
++ if (radeon_hw_i2c)
++ DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
++
+ if (rdev->is_atom_bios)
+ radeon_atombios_i2c_init(rdev);
+ else
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 4f6b7fc7ad3c..a0ec4bb9d896 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1024,8 +1024,10 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
+ rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_sclk = rdev->pm.default_sclk;
+ rdev->pm.current_mclk = rdev->pm.default_mclk;
+- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+- rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
++ if (rdev->pm.power_state) {
++ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
++ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
++ }
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM
+ && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index 1d029ccf428b..6d916fc93116 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -91,6 +91,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
+ case CHIP_VERDE:
+ case CHIP_PITCAIRN:
+ case CHIP_ARUBA:
++ case CHIP_OLAND:
+ fw_name = FIRMWARE_TAHITI;
+ break;
+
+diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
+index 374499db20c7..a239b30aaf9d 100644
+--- a/drivers/gpu/drm/radeon/rv770_dpm.c
++++ b/drivers/gpu/drm/radeon/rv770_dpm.c
+@@ -2531,6 +2531,12 @@ bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
+ (rdev->pdev->subsystem_device == 0x1c42))
+ switch_limit = 200;
+
++ /* RV770 */
++ /* mclk switching doesn't seem to work reliably on desktop RV770s */
++ if ((rdev->family == CHIP_RV770) &&
++ !(rdev->flags & RADEON_IS_MOBILITY))
++ switch_limit = 0xffffffff; /* disable mclk switching */
++
+ if (vblank_time < switch_limit)
+ return true;
+ else
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 37acf938b779..3f39f15d48a6 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -5625,7 +5625,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
+ }
+
+ if (!ASIC_IS_NODCE(rdev)) {
+- WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
++ WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
+index 6e23019cd110..205a96177f95 100644
+--- a/drivers/gpu/drm/radeon/sid.h
++++ b/drivers/gpu/drm/radeon/sid.h
+@@ -770,7 +770,7 @@
+ # define GRPH_PFLIP_INT_MASK (1 << 0)
+ # define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+-#define DACA_AUTODETECT_INT_CONTROL 0x66c8
++#define DAC_AUTODETECT_INT_CONTROL 0x67c8
+
+ #define DC_HPD1_INT_STATUS 0x601c
+ #define DC_HPD2_INT_STATUS 0x6028
+diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
+index b19ef4951085..824550db3fed 100644
+--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
++++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
+@@ -153,6 +153,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
++ case CHIP_OLAND:
+ chip_id = 0x01000016;
+ break;
+ case CHIP_ARUBA:
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index a9d24e4bf792..c9511fd2f501 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -371,7 +371,6 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
+ goto error;
+
+ rcrtc->plane->format = format;
+- rcrtc->plane->pitch = crtc->fb->pitches[0];
+
+ rcrtc->plane->src_x = x;
+ rcrtc->plane->src_y = y;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+index 53000644733f..3fb69d9ae61b 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+@@ -104,6 +104,15 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
+ {
+ struct rcar_du_group *rgrp = plane->group;
+ unsigned int index = plane->hwindex;
++ u32 mwr;
++
++ /* Memory pitch (expressed in pixels) */
++ if (plane->format->planes == 2)
++ mwr = plane->pitch;
++ else
++ mwr = plane->pitch * 8 / plane->format->bpp;
++
++ rcar_du_plane_write(rgrp, index, PnMWR, mwr);
+
+ /* The Y position is expressed in raster line units and must be doubled
+ * for 32bpp formats, according to the R8A7790 datasheet. No mention of
+@@ -133,6 +142,8 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
+ {
+ struct drm_gem_cma_object *gem;
+
++ plane->pitch = fb->pitches[0];
++
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ plane->dma[0] = gem->paddr + fb->offsets[0];
+
+@@ -209,7 +220,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
+ struct rcar_du_group *rgrp = plane->group;
+ u32 ddcr2 = PnDDCR2_CODE;
+ u32 ddcr4;
+- u32 mwr;
+
+ /* Data format
+ *
+@@ -240,14 +250,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
+ rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
+ rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
+
+- /* Memory pitch (expressed in pixels) */
+- if (plane->format->planes == 2)
+- mwr = plane->pitch;
+- else
+- mwr = plane->pitch * 8 / plane->format->bpp;
+-
+- rcar_du_plane_write(rgrp, index, PnMWR, mwr);
+-
+ /* Destination position and size */
+ rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
+ rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
+@@ -309,7 +311,6 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+
+ rplane->crtc = crtc;
+ rplane->format = format;
+- rplane->pitch = fb->pitches[0];
+
+ rplane->src_x = src_x >> 16;
+ rplane->src_y = src_y >> 16;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 599f6469a1eb..8b059eb09d9b 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1483,11 +1483,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
+ command_size);
+ if (unlikely(ret != 0))
+- goto out_err;
++ goto out_err_nores;
+
+ ret = vmw_resources_reserve(sw_context);
+ if (unlikely(ret != 0))
+- goto out_err;
++ goto out_err_nores;
+
+ ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
+ if (unlikely(ret != 0))
+@@ -1569,10 +1569,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ return 0;
+
+ out_err:
+- vmw_resource_relocations_free(&sw_context->res_relocations);
+- vmw_free_relocations(sw_context);
+ ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
++out_err_nores:
+ vmw_resource_list_unreserve(&sw_context->resource_list, true);
++ vmw_resource_relocations_free(&sw_context->res_relocations);
++ vmw_free_relocations(sw_context);
+ vmw_clear_validations(sw_context);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
+index d6c7fe7f88d5..3ad651c3356c 100644
+--- a/drivers/infiniband/hw/qib/qib_ud.c
++++ b/drivers/infiniband/hw/qib/qib_ud.c
+@@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+ struct qib_sge *sge;
+ struct ib_wc wc;
+ u32 length;
++ enum ib_qp_type sqptype, dqptype;
+
+ qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
+ if (!qp) {
+ ibp->n_pkt_drops++;
+ return;
+ }
+- if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
++
++ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
++ IB_QPT_UD : sqp->ibqp.qp_type;
++ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
++ IB_QPT_UD : qp->ibqp.qp_type;
++
++ if (dqptype != sqptype ||
+ !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ goto drop;
+diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
+index e21c1816a8f9..fbfdc10573be 100644
+--- a/drivers/input/misc/xen-kbdfront.c
++++ b/drivers/input/misc/xen-kbdfront.c
+@@ -29,6 +29,7 @@
+ #include <xen/interface/io/fbif.h>
+ #include <xen/interface/io/kbdif.h>
+ #include <xen/xenbus.h>
++#include <xen/platform_pci.h>
+
+ struct xenkbd_info {
+ struct input_dev *kbd;
+@@ -380,6 +381,9 @@ static int __init xenkbd_init(void)
+ if (xen_initial_domain())
+ return -ENODEV;
+
++ if (!xen_has_pv_devices())
++ return -ENODEV;
++
+ return xenbus_register_frontend(&xenkbd_driver);
+ }
+
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 40203ada635e..cae5a0866046 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -917,7 +917,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
+
+ /* If range covers entire pagetable, free it */
+ if (!(start_pfn > level_pfn ||
+- last_pfn < level_pfn + level_size(level))) {
++ last_pfn < level_pfn + level_size(level) - 1)) {
+ dma_clear_pte(pte);
+ domain_flush_cache(domain, pte, sizeof(*pte));
+ free_pgtable_page(level_pte);
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index 30b426ed744b..34d009728d81 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -176,8 +176,12 @@ config MD_FAULTY
+
+ source "drivers/md/bcache/Kconfig"
+
++config BLK_DEV_DM_BUILTIN
++ boolean
++
+ config BLK_DEV_DM
+ tristate "Device mapper support"
++ select BLK_DEV_DM_BUILTIN
+ ---help---
+ Device-mapper is a low level volume manager. It works by allowing
+ people to specify mappings for ranges of logical sectors. Various
+diff --git a/drivers/md/Makefile b/drivers/md/Makefile
+index 2acc43fe0229..f26d83292579 100644
+--- a/drivers/md/Makefile
++++ b/drivers/md/Makefile
+@@ -32,6 +32,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
+ obj-$(CONFIG_BCACHE) += bcache/
+ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
+ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
++obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
+ obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
+ obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
+ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
+diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
+new file mode 100644
+index 000000000000..6c9049c51b2b
+--- /dev/null
++++ b/drivers/md/dm-builtin.c
+@@ -0,0 +1,48 @@
++#include "dm.h"
++
++/*
++ * The kobject release method must not be placed in the module itself,
++ * otherwise we are subject to module unload races.
++ *
++ * The release method is called when the last reference to the kobject is
++ * dropped. It may be called by any other kernel code that drops the last
++ * reference.
++ *
++ * The release method suffers from module unload race. We may prevent the
++ * module from being unloaded at the start of the release method (using
++ * increased module reference count or synchronizing against the release
++ * method), however there is no way to prevent the module from being
++ * unloaded at the end of the release method.
++ *
++ * If this code were placed in the dm module, the following race may
++ * happen:
++ * 1. Some other process takes a reference to dm kobject
++ * 2. The user issues ioctl function to unload the dm device
++ * 3. dm_sysfs_exit calls kobject_put, however the object is not released
++ * because of the other reference taken at step 1
++ * 4. dm_sysfs_exit waits on the completion
++ * 5. The other process that took the reference in step 1 drops it,
++ * dm_kobject_release is called from this process
++ * 6. dm_kobject_release calls complete()
++ * 7. a reschedule happens before dm_kobject_release returns
++ * 8. dm_sysfs_exit continues, the dm device is unloaded, module reference
++ * count is decremented
++ * 9. The user unloads the dm module
++ * 10. The other process that was rescheduled in step 7 continues to run,
++ * it is now executing code in unloaded module, so it crashes
++ *
++ * Note that if the process that takes the foreign reference to dm kobject
++ * has a low priority and the system is sufficiently loaded with
++ * higher-priority processes that prevent the low-priority process from
++ * being scheduled long enough, this bug may really happen.
++ *
++ * In order to fix this module unload race, we place the release method
++ * into a helper code that is compiled directly into the kernel.
++ */
++
++void dm_kobject_release(struct kobject *kobj)
++{
++ complete(dm_get_completion_from_kobject(kobj));
++}
++
++EXPORT_SYMBOL(dm_kobject_release);
+diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
+index 84d2b91e4efb..c62c5ab6aed5 100644
+--- a/drivers/md/dm-sysfs.c
++++ b/drivers/md/dm-sysfs.c
+@@ -86,6 +86,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
+ static struct kobj_type dm_ktype = {
+ .sysfs_ops = &dm_sysfs_ops,
+ .default_attrs = dm_attrs,
++ .release = dm_kobject_release,
+ };
+
+ /*
+@@ -104,5 +105,7 @@ int dm_sysfs_init(struct mapped_device *md)
+ */
+ void dm_sysfs_exit(struct mapped_device *md)
+ {
+- kobject_put(dm_kobject(md));
++ struct kobject *kobj = dm_kobject(md);
++ kobject_put(kobj);
++ wait_for_completion(dm_get_completion_from_kobject(kobj));
+ }
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 8a30ad54bd46..7da347665552 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1349,6 +1349,12 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
+ return td->id;
+ }
+
++/*
++ * Check whether @time (of block creation) is older than @td's last snapshot.
++ * If so then the associated block is shared with the last snapshot device.
++ * Any block on a device created *after* the device last got snapshotted is
++ * necessarily not shared.
++ */
+ static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
+ {
+ return td->snapshotted_time > time;
+@@ -1458,6 +1464,20 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
+ return r;
+ }
+
++int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
++{
++ int r;
++ uint32_t ref_count;
++
++ down_read(&pmd->root_lock);
++ r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
++ if (!r)
++ *result = (ref_count != 0);
++ up_read(&pmd->root_lock);
++
++ return r;
++}
++
+ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
+ {
+ int r;
+diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
+index 7bcc0e1d6238..2edf5dbac76a 100644
+--- a/drivers/md/dm-thin-metadata.h
++++ b/drivers/md/dm-thin-metadata.h
+@@ -181,6 +181,8 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
+
+ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
+
++int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
++
+ /*
+ * Returns -ENOSPC if the new size is too small and already allocated
+ * blocks would be lost.
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index ee29037ffc2e..bc0c97d7921e 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -512,6 +512,7 @@ struct dm_thin_new_mapping {
+ unsigned quiesced:1;
+ unsigned prepared:1;
+ unsigned pass_discard:1;
++ unsigned definitely_not_shared:1;
+
+ struct thin_c *tc;
+ dm_block_t virt_block;
+@@ -683,7 +684,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
+ cell_defer_no_holder(tc, m->cell2);
+
+ if (m->pass_discard)
+- remap_and_issue(tc, m->bio, m->data_block);
++ if (m->definitely_not_shared)
++ remap_and_issue(tc, m->bio, m->data_block);
++ else {
++ bool used = false;
++ if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
++ bio_endio(m->bio, 0);
++ else
++ remap_and_issue(tc, m->bio, m->data_block);
++ }
+ else
+ bio_endio(m->bio, 0);
+
+@@ -751,13 +760,17 @@ static int ensure_next_mapping(struct pool *pool)
+
+ static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
+ {
+- struct dm_thin_new_mapping *r = pool->next_mapping;
++ struct dm_thin_new_mapping *m = pool->next_mapping;
+
+ BUG_ON(!pool->next_mapping);
+
++ memset(m, 0, sizeof(struct dm_thin_new_mapping));
++ INIT_LIST_HEAD(&m->list);
++ m->bio = NULL;
++
+ pool->next_mapping = NULL;
+
+- return r;
++ return m;
+ }
+
+ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
+@@ -769,15 +782,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
+ struct pool *pool = tc->pool;
+ struct dm_thin_new_mapping *m = get_next_mapping(pool);
+
+- INIT_LIST_HEAD(&m->list);
+- m->quiesced = 0;
+- m->prepared = 0;
+ m->tc = tc;
+ m->virt_block = virt_block;
+ m->data_block = data_dest;
+ m->cell = cell;
+- m->err = 0;
+- m->bio = NULL;
+
+ if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
+ m->quiesced = 1;
+@@ -840,15 +848,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
+ struct pool *pool = tc->pool;
+ struct dm_thin_new_mapping *m = get_next_mapping(pool);
+
+- INIT_LIST_HEAD(&m->list);
+ m->quiesced = 1;
+ m->prepared = 0;
+ m->tc = tc;
+ m->virt_block = virt_block;
+ m->data_block = data_block;
+ m->cell = cell;
+- m->err = 0;
+- m->bio = NULL;
+
+ /*
+ * If the whole block of data is being overwritten or we are not
+@@ -1040,12 +1045,12 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
+ */
+ m = get_next_mapping(pool);
+ m->tc = tc;
+- m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
++ m->pass_discard = pool->pf.discard_passdown;
++ m->definitely_not_shared = !lookup_result.shared;
+ m->virt_block = block;
+ m->data_block = lookup_result.block;
+ m->cell = cell;
+ m->cell2 = cell2;
+- m->err = 0;
+ m->bio = bio;
+
+ if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
+@@ -1390,16 +1395,16 @@ static enum pool_mode get_pool_mode(struct pool *pool)
+ return pool->pf.mode;
+ }
+
+-static void set_pool_mode(struct pool *pool, enum pool_mode mode)
++static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ {
+ int r;
++ enum pool_mode old_mode = pool->pf.mode;
+
+- pool->pf.mode = mode;
+-
+- switch (mode) {
++ switch (new_mode) {
+ case PM_FAIL:
+- DMERR("%s: switching pool to failure mode",
+- dm_device_name(pool->pool_md));
++ if (old_mode != new_mode)
++ DMERR("%s: switching pool to failure mode",
++ dm_device_name(pool->pool_md));
+ dm_pool_metadata_read_only(pool->pmd);
+ pool->process_bio = process_bio_fail;
+ pool->process_discard = process_bio_fail;
+@@ -1408,13 +1413,15 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
+ break;
+
+ case PM_READ_ONLY:
+- DMERR("%s: switching pool to read-only mode",
+- dm_device_name(pool->pool_md));
++ if (old_mode != new_mode)
++ DMERR("%s: switching pool to read-only mode",
++ dm_device_name(pool->pool_md));
+ r = dm_pool_abort_metadata(pool->pmd);
+ if (r) {
+ DMERR("%s: aborting transaction failed",
+ dm_device_name(pool->pool_md));
+- set_pool_mode(pool, PM_FAIL);
++ new_mode = PM_FAIL;
++ set_pool_mode(pool, new_mode);
+ } else {
+ dm_pool_metadata_read_only(pool->pmd);
+ pool->process_bio = process_bio_read_only;
+@@ -1425,6 +1432,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
+ break;
+
+ case PM_WRITE:
++ if (old_mode != new_mode)
++ DMINFO("%s: switching pool to write mode",
++ dm_device_name(pool->pool_md));
+ dm_pool_metadata_read_write(pool->pmd);
+ pool->process_bio = process_bio;
+ pool->process_discard = process_discard;
+@@ -1432,6 +1442,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
+ pool->process_prepared_discard = process_prepared_discard;
+ break;
+ }
++
++ pool->pf.mode = new_mode;
+ }
+
+ /*----------------------------------------------------------------*/
+@@ -1648,6 +1660,17 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
+ enum pool_mode new_mode = pt->adjusted_pf.mode;
+
+ /*
++ * Don't change the pool's mode until set_pool_mode() below.
++ * Otherwise the pool's process_* function pointers may
++ * not match the desired pool mode.
++ */
++ pt->adjusted_pf.mode = old_mode;
++
++ pool->ti = ti;
++ pool->pf = pt->adjusted_pf;
++ pool->low_water_blocks = pt->low_water_blocks;
++
++ /*
+ * If we were in PM_FAIL mode, rollback of metadata failed. We're
+ * not going to recover without a thin_repair. So we never let the
+ * pool move out of the old mode. On the other hand a PM_READ_ONLY
+@@ -1657,10 +1680,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
+ if (old_mode == PM_FAIL)
+ new_mode = old_mode;
+
+- pool->ti = ti;
+- pool->low_water_blocks = pt->low_water_blocks;
+- pool->pf = pt->adjusted_pf;
+-
+ set_pool_mode(pool, new_mode);
+
+ return 0;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index b3e26c7d1417..a562d5a4fa9d 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -194,8 +194,8 @@ struct mapped_device {
+ /* forced geometry settings */
+ struct hd_geometry geometry;
+
+- /* sysfs handle */
+- struct kobject kobj;
++ /* kobject and completion */
++ struct dm_kobject_holder kobj_holder;
+
+ /* zero-length flush that will be cloned and submitted to targets */
+ struct bio flush_bio;
+@@ -2005,6 +2005,7 @@ static struct mapped_device *alloc_dev(int minor)
+ init_waitqueue_head(&md->wait);
+ INIT_WORK(&md->work, dm_wq_work);
+ init_waitqueue_head(&md->eventq);
++ init_completion(&md->kobj_holder.completion);
+
+ md->disk->major = _major;
+ md->disk->first_minor = minor;
+@@ -2866,20 +2867,14 @@ struct gendisk *dm_disk(struct mapped_device *md)
+
+ struct kobject *dm_kobject(struct mapped_device *md)
+ {
+- return &md->kobj;
++ return &md->kobj_holder.kobj;
+ }
+
+-/*
+- * struct mapped_device should not be exported outside of dm.c
+- * so use this check to verify that kobj is part of md structure
+- */
+ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
+ {
+ struct mapped_device *md;
+
+- md = container_of(kobj, struct mapped_device, kobj);
+- if (&md->kobj != kobj)
+- return NULL;
++ md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
+
+ if (test_bit(DMF_FREEING, &md->flags) ||
+ dm_deleting_md(md))
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index 1d1ad7b7e527..a8db73cc708f 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -15,6 +15,8 @@
+ #include <linux/list.h>
+ #include <linux/blkdev.h>
+ #include <linux/hdreg.h>
++#include <linux/completion.h>
++#include <linux/kobject.h>
+
+ #include "dm-stats.h"
+
+@@ -138,12 +140,27 @@ void dm_interface_exit(void);
+ /*
+ * sysfs interface
+ */
++struct dm_kobject_holder {
++ struct kobject kobj;
++ struct completion completion;
++};
++
++static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
++{
++ return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
++}
++
+ int dm_sysfs_init(struct mapped_device *md);
+ void dm_sysfs_exit(struct mapped_device *md);
+ struct kobject *dm_kobject(struct mapped_device *md);
+ struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
+
+ /*
++ * The kobject helper
++ */
++void dm_kobject_release(struct kobject *kobj);
++
++/*
+ * Targets for linear and striped mappings
+ */
+ int dm_linear_init(void);
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index 466a60bbd716..aacbe70c2c2e 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -245,6 +245,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+ return -EINVAL;
+ }
+
++ /*
++ * We need to set this before the dm_tm_new_block() call below.
++ */
++ ll->nr_blocks = nr_blocks;
+ for (i = old_blocks; i < blocks; i++) {
+ struct dm_block *b;
+ struct disk_index_entry idx;
+@@ -252,6 +256,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+ r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
+ if (r < 0)
+ return r;
++
+ idx.blocknr = cpu_to_le64(dm_block_location(b));
+
+ r = dm_tm_unlock(ll->tm, b);
+@@ -266,7 +271,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+ return r;
+ }
+
+- ll->nr_blocks = nr_blocks;
+ return 0;
+ }
+
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 58fc1eef7499..afb419e514bf 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -608,20 +608,38 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+ * Flick into a mode where all blocks get allocated in the new area.
+ */
+ smm->begin = old_len;
+- memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
++ memcpy(sm, &bootstrap_ops, sizeof(*sm));
+
+ /*
+ * Extend.
+ */
+ r = sm_ll_extend(&smm->ll, extra_blocks);
++ if (r)
++ goto out;
+
+ /*
+- * Switch back to normal behaviour.
++ * We repeatedly increment then commit until the commit doesn't
++ * allocate any new blocks.
+ */
+- memcpy(&smm->sm, &ops, sizeof(smm->sm));
+- for (i = old_len; !r && i < smm->begin; i++)
+- r = sm_ll_inc(&smm->ll, i, &ev);
++ do {
++ for (i = old_len; !r && i < smm->begin; i++) {
++ r = sm_ll_inc(&smm->ll, i, &ev);
++ if (r)
++ goto out;
++ }
++ old_len = smm->begin;
++
++ r = sm_ll_commit(&smm->ll);
++ if (r)
++ goto out;
++
++ } while (old_len != smm->begin);
+
++out:
++ /*
++ * Switch back to normal behaviour.
++ */
++ memcpy(sm, &ops, sizeof(*sm));
+ return r;
+ }
+
+diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
+index 419a2d6b4349..7e0f61930a12 100644
+--- a/drivers/media/dvb-core/dvb-usb-ids.h
++++ b/drivers/media/dvb-core/dvb-usb-ids.h
+@@ -239,6 +239,7 @@
+ #define USB_PID_AVERMEDIA_A835B_4835 0x4835
+ #define USB_PID_AVERMEDIA_1867 0x1867
+ #define USB_PID_AVERMEDIA_A867 0xa867
++#define USB_PID_AVERMEDIA_H335 0x0335
+ #define USB_PID_AVERMEDIA_TWINSTAR 0x0825
+ #define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
+ #define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
+diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
+index 90536147bf04..ccac8467a28b 100644
+--- a/drivers/media/dvb-frontends/dib8000.c
++++ b/drivers/media/dvb-frontends/dib8000.c
+@@ -157,15 +157,10 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
+ return ret;
+ }
+
+-static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
++static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
+ {
+ u16 ret;
+
+- if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+- dprintk("could not acquire lock");
+- return 0;
+- }
+-
+ state->i2c_write_buffer[0] = reg >> 8;
+ state->i2c_write_buffer[1] = reg & 0xff;
+
+@@ -183,6 +178,21 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+ dprintk("i2c read error on %d", reg);
+
+ ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
++
++ return ret;
++}
++
++static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
++{
++ u16 ret;
++
++ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
++ dprintk("could not acquire lock");
++ return 0;
++ }
++
++ ret = __dib8000_read_word(state, reg);
++
+ mutex_unlock(&state->i2c_buffer_lock);
+
+ return ret;
+@@ -192,8 +202,15 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
+ {
+ u16 rw[2];
+
+- rw[0] = dib8000_read_word(state, reg + 0);
+- rw[1] = dib8000_read_word(state, reg + 1);
++ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
++ dprintk("could not acquire lock");
++ return 0;
++ }
++
++ rw[0] = __dib8000_read_word(state, reg + 0);
++ rw[1] = __dib8000_read_word(state, reg + 1);
++
++ mutex_unlock(&state->i2c_buffer_lock);
+
+ return ((rw[0] << 16) | (rw[1]));
+ }
+@@ -2445,7 +2462,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
+ if (state->revision == 0x8090)
+ internal = dib8000_read32(state, 23) / 1000;
+
+- if (state->autosearch_state == AS_SEARCHING_FFT) {
++ if ((state->revision >= 0x8002) &&
++ (state->autosearch_state == AS_SEARCHING_FFT)) {
+ dib8000_write_word(state, 37, 0x0065); /* P_ctrl_pha_off_max default values */
+ dib8000_write_word(state, 116, 0x0000); /* P_ana_gain to 0 */
+
+@@ -2481,7 +2499,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
+ dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (1 << 13)); /* P_restart_ccg = 1 */
+ dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (0 << 13)); /* P_restart_ccg = 0 */
+ dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x7ff) | (0 << 15) | (1 << 13)); /* P_restart_search = 0; */
+- } else if (state->autosearch_state == AS_SEARCHING_GUARD) {
++ } else if ((state->revision >= 0x8002) &&
++ (state->autosearch_state == AS_SEARCHING_GUARD)) {
+ c->transmission_mode = TRANSMISSION_MODE_8K;
+ c->guard_interval = GUARD_INTERVAL_1_8;
+ c->inversion = 0;
+@@ -2583,7 +2602,8 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
+ struct dib8000_state *state = fe->demodulator_priv;
+ u16 irq_pending = dib8000_read_word(state, 1284);
+
+- if (state->autosearch_state == AS_SEARCHING_FFT) {
++ if ((state->revision >= 0x8002) &&
++ (state->autosearch_state == AS_SEARCHING_FFT)) {
+ if (irq_pending & 0x1) {
+ dprintk("dib8000_autosearch_irq: max correlation result available");
+ return 3;
+diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
+index 4da5272075cb..02699c111019 100644
+--- a/drivers/media/dvb-frontends/m88rs2000.c
++++ b/drivers/media/dvb-frontends/m88rs2000.c
+@@ -110,28 +110,94 @@ static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 reg)
+ return b1[0];
+ }
+
++static u32 m88rs2000_get_mclk(struct dvb_frontend *fe)
++{
++ struct m88rs2000_state *state = fe->demodulator_priv;
++ u32 mclk;
++ u8 reg;
++ /* Must not be 0x00 or 0xff */
++ reg = m88rs2000_readreg(state, 0x86);
++ if (!reg || reg == 0xff)
++ return 0;
++
++ reg /= 2;
++ reg += 1;
++
++ mclk = (u32)(reg * RS2000_FE_CRYSTAL_KHZ + 28 / 2) / 28;
++
++ return mclk;
++}
++
++static int m88rs2000_set_carrieroffset(struct dvb_frontend *fe, s16 offset)
++{
++ struct m88rs2000_state *state = fe->demodulator_priv;
++ u32 mclk;
++ s32 tmp;
++ u8 reg;
++ int ret;
++
++ mclk = m88rs2000_get_mclk(fe);
++ if (!mclk)
++ return -EINVAL;
++
++ tmp = (offset * 4096 + (s32)mclk / 2) / (s32)mclk;
++ if (tmp < 0)
++ tmp += 4096;
++
++ /* Carrier Offset */
++ ret = m88rs2000_writereg(state, 0x9c, (u8)(tmp >> 4));
++
++ reg = m88rs2000_readreg(state, 0x9d);
++ reg &= 0xf;
++ reg |= (u8)(tmp & 0xf) << 4;
++
++ ret |= m88rs2000_writereg(state, 0x9d, reg);
++
++ return ret;
++}
++
+ static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
+ {
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ int ret;
+- u32 temp;
++ u64 temp;
++ u32 mclk;
+ u8 b[3];
+
+ if ((srate < 1000000) || (srate > 45000000))
+ return -EINVAL;
+
++ mclk = m88rs2000_get_mclk(fe);
++ if (!mclk)
++ return -EINVAL;
++
+ temp = srate / 1000;
+- temp *= 11831;
+- temp /= 68;
+- temp -= 3;
++ temp *= 1 << 24;
++
++ do_div(temp, mclk);
+
+ b[0] = (u8) (temp >> 16) & 0xff;
+ b[1] = (u8) (temp >> 8) & 0xff;
+ b[2] = (u8) temp & 0xff;
++
+ ret = m88rs2000_writereg(state, 0x93, b[2]);
+ ret |= m88rs2000_writereg(state, 0x94, b[1]);
+ ret |= m88rs2000_writereg(state, 0x95, b[0]);
+
++ if (srate > 10000000)
++ ret |= m88rs2000_writereg(state, 0xa0, 0x20);
++ else
++ ret |= m88rs2000_writereg(state, 0xa0, 0x60);
++
++ ret |= m88rs2000_writereg(state, 0xa1, 0xe0);
++
++ if (srate > 12000000)
++ ret |= m88rs2000_writereg(state, 0xa3, 0x20);
++ else if (srate > 2800000)
++ ret |= m88rs2000_writereg(state, 0xa3, 0x98);
++ else
++ ret |= m88rs2000_writereg(state, 0xa3, 0x90);
++
+ deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
+ return ret;
+ }
+@@ -261,8 +327,6 @@ struct inittab m88rs2000_shutdown[] = {
+
+ struct inittab fe_reset[] = {
+ {DEMOD_WRITE, 0x00, 0x01},
+- {DEMOD_WRITE, 0xf1, 0xbf},
+- {DEMOD_WRITE, 0x00, 0x01},
+ {DEMOD_WRITE, 0x20, 0x81},
+ {DEMOD_WRITE, 0x21, 0x80},
+ {DEMOD_WRITE, 0x10, 0x33},
+@@ -305,9 +369,6 @@ struct inittab fe_trigger[] = {
+ {DEMOD_WRITE, 0x9b, 0x64},
+ {DEMOD_WRITE, 0x9e, 0x00},
+ {DEMOD_WRITE, 0x9f, 0xf8},
+- {DEMOD_WRITE, 0xa0, 0x20},
+- {DEMOD_WRITE, 0xa1, 0xe0},
+- {DEMOD_WRITE, 0xa3, 0x38},
+ {DEMOD_WRITE, 0x98, 0xff},
+ {DEMOD_WRITE, 0xc0, 0x0f},
+ {DEMOD_WRITE, 0x89, 0x01},
+@@ -540,9 +601,8 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ fe_status_t status;
+ int i, ret = 0;
+- s32 tmp;
+ u32 tuner_freq;
+- u16 offset = 0;
++ s16 offset = 0;
+ u8 reg;
+
+ state->no_lock_count = 0;
+@@ -567,29 +627,26 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
+ if (ret < 0)
+ return -ENODEV;
+
+- offset = tuner_freq - c->frequency;
+-
+- /* calculate offset assuming 96000kHz*/
+- tmp = offset;
+- tmp *= 65536;
+-
+- tmp = (2 * tmp + 96000) / (2 * 96000);
+- if (tmp < 0)
+- tmp += 65536;
++ offset = (s16)((s32)tuner_freq - c->frequency);
+
+- offset = tmp & 0xffff;
++ /* default mclk value 96.4285 * 2 * 1000 = 192857 */
++ if (((c->frequency % 192857) >= (192857 - 3000)) ||
++ (c->frequency % 192857) <= 3000)
++ ret = m88rs2000_writereg(state, 0x86, 0xc2);
++ else
++ ret = m88rs2000_writereg(state, 0x86, 0xc6);
+
+- ret = m88rs2000_writereg(state, 0x9a, 0x30);
+- /* Unknown usually 0xc6 sometimes 0xc1 */
+- reg = m88rs2000_readreg(state, 0x86);
+- ret |= m88rs2000_writereg(state, 0x86, reg);
+- /* Offset lower nibble always 0 */
+- ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
+- ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
++ ret |= m88rs2000_set_carrieroffset(fe, offset);
++ if (ret < 0)
++ return -ENODEV;
+
++ /* Reset demod by symbol rate */
++ if (c->symbol_rate > 27500000)
++ ret = m88rs2000_writereg(state, 0xf1, 0xa4);
++ else
++ ret = m88rs2000_writereg(state, 0xf1, 0xbf);
+
+- /* Reset Demod */
+- ret = m88rs2000_tab_set(state, fe_reset);
++ ret |= m88rs2000_tab_set(state, fe_reset);
+ if (ret < 0)
+ return -ENODEV;
+
+diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
+index 14ce31e76ae6..0a50ea90736b 100644
+--- a/drivers/media/dvb-frontends/m88rs2000.h
++++ b/drivers/media/dvb-frontends/m88rs2000.h
+@@ -53,6 +53,8 @@ static inline struct dvb_frontend *m88rs2000_attach(
+ }
+ #endif /* CONFIG_DVB_M88RS2000 */
+
++#define RS2000_FE_CRYSTAL_KHZ 27000
++
+ enum {
+ DEMOD_WRITE = 0x1,
+ WRITE_DELAY = 0x10,
+diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
+index fbca9856313a..4bf057544607 100644
+--- a/drivers/media/dvb-frontends/nxt200x.c
++++ b/drivers/media/dvb-frontends/nxt200x.c
+@@ -40,7 +40,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ /* Max transfer size done by I2C transfer functions */
+-#define MAX_XFER_SIZE 64
++#define MAX_XFER_SIZE 256
+
+ #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
+ #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+index 084263dd126f..4a521a9a6e9d 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+@@ -177,21 +177,6 @@ unlock:
+ mutex_unlock(&dev->mfc_mutex);
+ }
+
+-static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
+-{
+- struct video_device *vdev = video_devdata(file);
+-
+- if (!vdev) {
+- mfc_err("failed to get video_device");
+- return MFCNODE_INVALID;
+- }
+- if (vdev->index == 0)
+- return MFCNODE_DECODER;
+- else if (vdev->index == 1)
+- return MFCNODE_ENCODER;
+- return MFCNODE_INVALID;
+-}
+-
+ static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
+ {
+ mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
+@@ -701,6 +686,7 @@ irq_cleanup_hw:
+ /* Open an MFC node */
+ static int s5p_mfc_open(struct file *file)
+ {
++ struct video_device *vdev = video_devdata(file);
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = NULL;
+ struct vb2_queue *q;
+@@ -738,7 +724,7 @@ static int s5p_mfc_open(struct file *file)
+ /* Mark context as idle */
+ clear_work_bit_irqsave(ctx);
+ dev->ctx[ctx->num] = ctx;
+- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
++ if (vdev == dev->vfd_dec) {
+ ctx->type = MFCINST_DECODER;
+ ctx->c_ops = get_dec_codec_ops();
+ s5p_mfc_dec_init(ctx);
+@@ -748,7 +734,7 @@ static int s5p_mfc_open(struct file *file)
+ mfc_err("Failed to setup mfc controls\n");
+ goto err_ctrls_setup;
+ }
+- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
++ } else if (vdev == dev->vfd_enc) {
+ ctx->type = MFCINST_ENCODER;
+ ctx->c_ops = get_enc_codec_ops();
+ /* only for encoder */
+@@ -793,10 +779,10 @@ static int s5p_mfc_open(struct file *file)
+ q = &ctx->vq_dst;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->drv_priv = &ctx->fh;
+- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
++ if (vdev == dev->vfd_dec) {
+ q->io_modes = VB2_MMAP;
+ q->ops = get_dec_queue_ops();
+- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
++ } else if (vdev == dev->vfd_enc) {
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = get_enc_queue_ops();
+ } else {
+@@ -815,10 +801,10 @@ static int s5p_mfc_open(struct file *file)
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = &ctx->fh;
+- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
++ if (vdev == dev->vfd_dec) {
+ q->io_modes = VB2_MMAP;
+ q->ops = get_dec_queue_ops();
+- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
++ } else if (vdev == dev->vfd_enc) {
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = get_enc_queue_ops();
+ } else {
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+index 6920b546181a..823812c6b9b0 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+@@ -115,15 +115,6 @@ enum s5p_mfc_fmt_type {
+ };
+
+ /**
+- * enum s5p_mfc_node_type - The type of an MFC device node.
+- */
+-enum s5p_mfc_node_type {
+- MFCNODE_INVALID = -1,
+- MFCNODE_DECODER = 0,
+- MFCNODE_ENCODER = 1,
+-};
+-
+-/**
+ * enum s5p_mfc_inst_type - The type of an MFC instance.
+ */
+ enum s5p_mfc_inst_type {
+diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
+index 90cfa35ef6e6..eeab79bdd2aa 100644
+--- a/drivers/media/usb/dvb-usb-v2/anysee.c
++++ b/drivers/media/usb/dvb-usb-v2/anysee.c
+@@ -442,6 +442,7 @@ static struct cxd2820r_config anysee_cxd2820r_config = {
+ * IOD[0] ZL10353 1=enabled
+ * IOE[0] tuner 0=enabled
+ * tuner is behind ZL10353 I2C-gate
++ * tuner is behind TDA10023 I2C-gate
+ *
+ * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
+ * PCB: 508TC (rev0.6)
+@@ -956,7 +957,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
+
+ if (fe && adap->fe[1]) {
+ /* attach tuner for 2nd FE */
+- fe = dvb_attach(dvb_pll_attach, adap->fe[0],
++ fe = dvb_attach(dvb_pll_attach, adap->fe[1],
+ (0xc0 >> 1), &d->i2c_adap,
+ DVB_PLL_SAMSUNG_DTOS403IH102A);
+ }
+diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
+index 1cb6899cf797..fe95a586dd5d 100644
+--- a/drivers/media/usb/dvb-usb-v2/it913x.c
++++ b/drivers/media/usb/dvb-usb-v2/it913x.c
+@@ -799,6 +799,9 @@ static const struct usb_device_id it913x_id_table[] = {
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
+ &it913x_properties, "Digital Dual TV Receiver CTVDIGDUAL_V2",
+ RC_MAP_IT913X_V1) },
++ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
++ &it913x_properties, "Avermedia H335",
++ RC_MAP_IT913X_V2) },
+ {} /* Terminating entry */
+ };
+
+diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
+index b5aaaac427ad..0a30dbf3d05c 100644
+--- a/drivers/media/v4l2-core/v4l2-dev.c
++++ b/drivers/media/v4l2-core/v4l2-dev.c
+@@ -872,8 +872,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
+
+ /* Should not happen since we thought this minor was free */
+ WARN_ON(video_device[vdev->minor] != NULL);
+- video_device[vdev->minor] = vdev;
+ vdev->index = get_index(vdev);
++ video_device[vdev->minor] = vdev;
+ mutex_unlock(&videodev_lock);
+
+ if (vdev->ioctl_ops)
+diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
+index 98f95614b5b1..8d39a1221438 100644
+--- a/drivers/misc/mei/hbm.c
++++ b/drivers/misc/mei/hbm.c
+@@ -593,7 +593,7 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+ */
+ if (dev->hbm_state == MEI_HBM_IDLE) {
+ dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n");
+- return 0;
++ return;
+ }
+
+ switch (mei_msg->hbm_cmd) {
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 1a3163f1407e..4e8212c714b1 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1959,6 +1959,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
+ unsigned long flags;
++ unsigned int cmd_flags = req ? req->cmd_flags : 0;
+
+ if (req && !mq->mqrq_prev->req)
+ /* claim host only for the first request */
+@@ -1974,7 +1975,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ }
+
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+- if (req && req->cmd_flags & REQ_DISCARD) {
++ if (cmd_flags & REQ_DISCARD) {
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+@@ -1983,7 +1984,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ else
+ ret = mmc_blk_issue_discard_rq(mq, req);
+- } else if (req && req->cmd_flags & REQ_FLUSH) {
++ } else if (cmd_flags & REQ_FLUSH) {
+ /* complete ongoing async transfer before issuing flush */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+@@ -1999,7 +2000,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+
+ out:
+ if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
+- (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
++ (cmd_flags & MMC_REQ_SPECIAL_MASK))
+ /*
+ * Release host when there are no more requests
+ * and after special request(discard, flush) is done.
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 5e8823dc3ef6..06da0608283a 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -11,6 +11,7 @@
+ */
+
+ #include <linux/err.h>
++#include <linux/sizes.h>
+ #include <linux/slab.h>
+ #include <linux/stat.h>
+
+@@ -44,6 +45,13 @@ static const unsigned int tacc_mant[] = {
+ 35, 40, 45, 50, 55, 60, 70, 80,
+ };
+
++static const unsigned int sd_au_size[] = {
++ 0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512,
++ SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512,
++ SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
++ SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
++};
++
+ #define UNSTUFF_BITS(resp,start,size) \
+ ({ \
+ const int __size = size; \
+@@ -215,7 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
+ static int mmc_read_ssr(struct mmc_card *card)
+ {
+ unsigned int au, es, et, eo;
+- int err, i, max_au;
++ int err, i;
+ u32 *ssr;
+
+ if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
+@@ -239,26 +247,25 @@ static int mmc_read_ssr(struct mmc_card *card)
+ for (i = 0; i < 16; i++)
+ ssr[i] = be32_to_cpu(ssr[i]);
+
+- /* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
+- max_au = card->scr.sda_spec3 ? 0xF : 0x9;
+-
+ /*
+ * UNSTUFF_BITS only works with four u32s so we have to offset the
+ * bitfield positions accordingly.
+ */
+ au = UNSTUFF_BITS(ssr, 428 - 384, 4);
+- if (au > 0 && au <= max_au) {
+- card->ssr.au = 1 << (au + 4);
+- es = UNSTUFF_BITS(ssr, 408 - 384, 16);
+- et = UNSTUFF_BITS(ssr, 402 - 384, 6);
+- eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
+- if (es && et) {
+- card->ssr.erase_timeout = (et * 1000) / es;
+- card->ssr.erase_offset = eo * 1000;
++ if (au) {
++ if (au <= 9 || card->scr.sda_spec3) {
++ card->ssr.au = sd_au_size[au];
++ es = UNSTUFF_BITS(ssr, 408 - 384, 16);
++ et = UNSTUFF_BITS(ssr, 402 - 384, 6);
++ if (es && et) {
++ eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
++ card->ssr.erase_timeout = (et * 1000) / es;
++ card->ssr.erase_offset = eo * 1000;
++ }
++ } else {
++ pr_warning("%s: SD Status: Invalid Allocation Unit size.\n",
++ mmc_hostname(card->host));
+ }
+- } else {
+- pr_warning("%s: SD Status: Invalid Allocation Unit "
+- "size.\n", mmc_hostname(card->host));
+ }
+ out:
+ kfree(ssr);
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 92c18779d47e..a0752e9ce977 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1193,11 +1193,22 @@ static void atmci_start_request(struct atmel_mci *host,
+ iflags |= ATMCI_CMDRDY;
+ cmd = mrq->cmd;
+ cmdflags = atmci_prepare_command(slot->mmc, cmd);
+- atmci_send_command(host, cmd, cmdflags);
++
++ /*
++ * DMA transfer should be started before sending the command to avoid
++ * unexpected errors especially for read operations in SDIO mode.
++ * Unfortunately, in PDC mode, command has to be sent before starting
++ * the transfer.
++ */
++ if (host->submit_data != &atmci_submit_data_dma)
++ atmci_send_command(host, cmd, cmdflags);
+
+ if (data)
+ host->submit_data(host, data);
+
++ if (host->submit_data == &atmci_submit_data_dma)
++ atmci_send_command(host, cmd, cmdflags);
++
+ if (mrq->stop) {
+ host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
+ host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index d7d6bc8968d2..27ae563d0caa 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -59,6 +59,7 @@ struct sdhci_pci_fixes {
+ unsigned int quirks;
+ unsigned int quirks2;
+ bool allow_runtime_pm;
++ bool own_cd_for_runtime_pm;
+
+ int (*probe) (struct sdhci_pci_chip *);
+
+@@ -290,6 +291,7 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
+ static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .allow_runtime_pm = true,
++ .own_cd_for_runtime_pm = true,
+ };
+
+ static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
+@@ -354,6 +356,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+ .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON,
+ .allow_runtime_pm = true,
++ .own_cd_for_runtime_pm = true,
+ };
+
+ /* O2Micro extra registers */
+@@ -1381,6 +1384,15 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
+
+ sdhci_pci_add_own_cd(slot);
+
++ /*
++ * Check if the chip needs a separate GPIO for card detect to wake up
++ * from runtime suspend. If it is not there, don't allow runtime PM.
++ * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
++ */
++ if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
++ !gpio_is_valid(slot->cd_gpio))
++ chip->allow_runtime_pm = false;
++
+ return slot;
+
+ remove:
+diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
+index ce8242b6c3e7..e5c0e593ed1e 100644
+--- a/drivers/mtd/nand/mxc_nand.c
++++ b/drivers/mtd/nand/mxc_nand.c
+@@ -676,7 +676,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+- mtd->ecc_stats.corrected += ret;
+ pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+
+ return ret;
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 3f0f20081979..7c541dc1647e 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -2070,7 +2070,7 @@ static int __init netif_init(void)
+ if (!xen_domain())
+ return -ENODEV;
+
+- if (xen_hvm_domain() && !xen_platform_pci_unplug)
++ if (!xen_has_pv_nic_devices())
+ return -ENODEV;
+
+ pr_info("Initialising Xen virtual ethernet driver\n");
+diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
+index f7197a790341..eae7cd9fde7b 100644
+--- a/drivers/pci/xen-pcifront.c
++++ b/drivers/pci/xen-pcifront.c
+@@ -20,6 +20,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/bitops.h>
+ #include <linux/time.h>
++#include <xen/platform_pci.h>
+
+ #include <asm/xen/swiotlb-xen.h>
+ #define INVALID_GRANT_REF (0)
+@@ -1138,6 +1139,9 @@ static int __init pcifront_init(void)
+ if (!xen_pv_domain() || xen_initial_domain())
+ return -ENODEV;
+
++ if (!xen_has_pv_devices())
++ return -ENODEV;
++
+ pci_frontend_registrar(1 /* enable */);
+
+ return xenbus_register_frontend(&xenpci_driver);
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 24e733c98f8b..371a7e91dbf6 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -34,11 +34,11 @@
+ #include <linux/interrupt.h>
+ #include <linux/spinlock.h>
+ #include <linux/platform_device.h>
+-#include <linux/mod_devicetable.h>
+ #include <linux/log2.h>
+ #include <linux/pm.h>
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
++#include <linux/dmi.h>
+
+ /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
+ #include <asm-generic/rtc.h>
+@@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+ return 0;
+ }
+
++/*
++ * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
++ */
++static bool alarm_disable_quirk;
++
++static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
++{
++ alarm_disable_quirk = true;
++ pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
++ pr_info("RTC alarms disabled\n");
++ return 0;
++}
++
++static const struct dmi_system_id rtc_quirks[] __initconst = {
++ /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
++ {
++ .callback = set_alarm_disable_quirk,
++ .ident = "IBM Truman",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
++ },
++ },
++ /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
++ {
++ .callback = set_alarm_disable_quirk,
++ .ident = "Gigabyte GA-990XA-UD3",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR,
++ "Gigabyte Technology Co., Ltd."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
++ },
++ },
++ /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
++ {
++ .callback = set_alarm_disable_quirk,
++ .ident = "Toshiba Satellite L300",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
++ },
++ },
++ {}
++};
++
+ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
+ {
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+@@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
+ if (!is_valid_irq(cmos->irq))
+ return -EINVAL;
+
++ if (alarm_disable_quirk)
++ return 0;
++
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ if (enabled)
+@@ -1158,6 +1206,8 @@ static int __init cmos_init(void)
+ platform_driver_registered = true;
+ }
+
++ dmi_check_system(rtc_quirks);
++
+ if (retval == 0)
+ return 0;
+
+diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
+index 536b0e363826..fa905c9d772a 100644
+--- a/drivers/spi/spi-bcm63xx.c
++++ b/drivers/spi/spi-bcm63xx.c
+@@ -169,8 +169,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
+ transfer_list);
+ }
+
+- len -= prepend_len;
+-
+ init_completion(&bs->done);
+
+ /* Fill in the Message control register */
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 28361f7783cd..7b69e93d8448 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1070,6 +1070,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+
+ pdata->num_chipselect = 1;
+ pdata->enable_dma = true;
++ pdata->tx_chan_id = -1;
++ pdata->rx_chan_id = -1;
+
+ return pdata;
+ }
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 9e039c60c068..30b1229f6406 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -600,7 +600,9 @@ static void spi_pump_messages(struct kthread_work *work)
+ ret = master->transfer_one_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+- "failed to transfer one message from queue\n");
++ "failed to transfer one message from queue: %d\n", ret);
++ master->cur_msg->status = ret;
++ spi_finalize_current_message(master);
+ return;
+ }
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index b0cac0c342e1..1039de499bc6 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -156,9 +156,13 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+ {
+ struct iscsi_cmd *cmd;
+ struct se_session *se_sess = conn->sess->se_sess;
+- int size, tag;
++ int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_INTERRUPTIBLE :
++ TASK_RUNNING;
++
++ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
++ if (tag < 0)
++ return NULL;
+
+- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
+ size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
+ cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
+ memset(cmd, 0, size);
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index e663921eebb6..d300fd99a2b8 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
+ }
+ se_sess = tv_nexus->tvn_se_sess;
+
+- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
++ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ if (tag < 0) {
+ pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
+index cd005c227a23..4b2d3ab870f3 100644
+--- a/drivers/video/xen-fbfront.c
++++ b/drivers/video/xen-fbfront.c
+@@ -35,6 +35,7 @@
+ #include <xen/interface/io/fbif.h>
+ #include <xen/interface/io/protocols.h>
+ #include <xen/xenbus.h>
++#include <xen/platform_pci.h>
+
+ struct xenfb_info {
+ unsigned char *fb;
+@@ -699,6 +700,9 @@ static int __init xenfb_init(void)
+ if (xen_initial_domain())
+ return -ENODEV;
+
++ if (!xen_has_pv_devices())
++ return -ENODEV;
++
+ return xenbus_register_frontend(&xenfb_driver);
+ }
+
+diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
+index 34b20bfa4e8c..6244f9c8cfb8 100644
+--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
++++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
+@@ -496,7 +496,7 @@ subsys_initcall(xenbus_probe_frontend_init);
+ #ifndef MODULE
+ static int __init boot_wait_for_devices(void)
+ {
+- if (xen_hvm_domain() && !xen_platform_pci_unplug)
++ if (!xen_has_pv_devices())
+ return -ENODEV;
+
+ ready_to_wait_for_devices = 1;
+diff --git a/fs/dcookies.c b/fs/dcookies.c
+index ab5954b50267..ac44a69fbea9 100644
+--- a/fs/dcookies.c
++++ b/fs/dcookies.c
+@@ -204,7 +204,7 @@ out:
+ }
+
+ #ifdef CONFIG_COMPAT
+-COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, size_t, len)
++COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len)
+ {
+ #ifdef __BIG_ENDIAN
+ return sys_lookup_dcookie(((u64)w0 << 32) | w1, buf, len);
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index b74422888604..85cde3e76290 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -103,7 +103,7 @@ int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
+
+ layout->max_io_length =
+ (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
+- layout->group_width;
++ (layout->group_width - layout->parity);
+ if (layout->parity) {
+ unsigned stripe_length =
+ (layout->group_width - layout->parity) *
+@@ -286,7 +286,8 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
+ if (length) {
+ ore_calc_stripe_info(layout, offset, length, &ios->si);
+ ios->length = ios->si.length;
+- ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
++ ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
++ ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
+ if (layout->parity)
+ _ore_post_alloc_raid_stuff(ios);
+ }
+@@ -536,6 +537,7 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
+ u64 H = LmodS - G * T;
+
+ u32 N = div_u64(H, U);
++ u32 Nlast;
+
+ /* "H - (N * U)" is just "H % U" so it's bound to u32 */
+ u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
+@@ -568,6 +570,10 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
+ si->length = T - H;
+ if (si->length > length)
+ si->length = length;
++
++ Nlast = div_u64(H + si->length + U - 1, U);
++ si->maxdevUnits = Nlast - N;
++
+ si->M = M;
+ }
+ EXPORT_SYMBOL(ore_calc_stripe_info);
+@@ -583,13 +589,16 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
+ int ret;
+
+ if (per_dev->bio == NULL) {
+- unsigned pages_in_stripe = ios->layout->group_width *
+- (ios->layout->stripe_unit / PAGE_SIZE);
+- unsigned nr_pages = ios->nr_pages * ios->layout->group_width /
+- (ios->layout->group_width -
+- ios->layout->parity);
+- unsigned bio_size = (nr_pages + pages_in_stripe) /
+- ios->layout->group_width;
++ unsigned bio_size;
++
++ if (!ios->reading) {
++ bio_size = ios->si.maxdevUnits;
++ } else {
++ bio_size = (ios->si.maxdevUnits + 1) *
++ (ios->layout->group_width - ios->layout->parity) /
++ ios->layout->group_width;
++ }
++ bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
+
+ per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
+ if (unlikely(!per_dev->bio)) {
+@@ -609,8 +618,12 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
+ added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
+ pglen, pgbase);
+ if (unlikely(pglen != added_len)) {
+- ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
+- per_dev->bio->bi_vcnt);
++ /* If bi_vcnt == bi_max then this is a SW BUG */
++ ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
++ "bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
++ per_dev->bio->bi_vcnt,
++ per_dev->bio->bi_max_vecs,
++ BIO_MAX_PAGES_KMALLOC, cur_len);
+ ret = -ENOMEM;
+ goto out;
+ }
+@@ -1098,7 +1111,7 @@ int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
+ size_attr->attr = g_attr_logical_length;
+ size_attr->attr.val_ptr = &size_attr->newsize;
+
+- ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
++ ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
+ _LLU(oc->comps->obj.id), _LLU(obj_size), i);
+ ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
+ &size_attr->attr);
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index ef74ad5fd362..fa8cb4b7b8fe 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1296,22 +1296,6 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
+ return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
+ }
+
+-static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
+-{
+- return 1;
+-}
+-
+-static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
+- .can_merge = 0,
+- .map = generic_pipe_buf_map,
+- .unmap = generic_pipe_buf_unmap,
+- .confirm = generic_pipe_buf_confirm,
+- .release = generic_pipe_buf_release,
+- .steal = fuse_dev_pipe_buf_steal,
+- .get = generic_pipe_buf_get,
+-};
+-
+ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+@@ -1358,7 +1342,11 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+ buf->page = bufs[page_nr].page;
+ buf->offset = bufs[page_nr].offset;
+ buf->len = bufs[page_nr].len;
+- buf->ops = &fuse_dev_pipe_buf_ops;
++ /*
++ * Need to be careful about this. Having buf->ops in module
++ * code can Oops if the buffer persists after module unload.
++ */
++ buf->ops = &nosteal_pipe_buf_ops;
+
+ pipe->nrbufs++;
+ page_nr++;
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index a860ab566d6e..8a572ddde55b 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -407,13 +407,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+ error = nfs4_discover_server_trunking(clp, &old);
+ if (error < 0)
+ goto error;
+- nfs_put_client(clp);
+- if (clp != old) {
+- clp->cl_preserve_clid = true;
+- clp = old;
+- }
+
+- return clp;
++ if (clp != old)
++ clp->cl_preserve_clid = true;
++ nfs_put_client(clp);
++ return old;
+
+ error:
+ nfs_mark_client_ready(clp, error);
+@@ -491,9 +489,10 @@ int nfs40_walk_client_list(struct nfs_client *new,
+ prev = pos;
+
+ status = nfs_wait_client_init_complete(pos);
+- spin_lock(&nn->nfs_client_lock);
+ if (status < 0)
+- continue;
++ goto out;
++ status = -NFS4ERR_STALE_CLIENTID;
++ spin_lock(&nn->nfs_client_lock);
+ }
+ if (pos->cl_cons_state != NFS_CS_READY)
+ continue;
+@@ -631,7 +630,8 @@ int nfs41_walk_client_list(struct nfs_client *new,
+ }
+ spin_lock(&nn->nfs_client_lock);
+ if (status < 0)
+- continue;
++ break;
++ status = -NFS4ERR_STALE_CLIENTID;
+ }
+ if (pos->cl_cons_state != NFS_CS_READY)
+ continue;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index dacb2979e8ac..29c5a2c08f02 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -532,7 +532,7 @@ static int nfs40_sequence_done(struct rpc_task *task,
+ struct nfs4_slot *slot = res->sr_slot;
+ struct nfs4_slot_table *tbl;
+
+- if (!RPC_WAS_SENT(task))
++ if (slot == NULL)
+ goto out;
+
+ tbl = slot->table;
+@@ -7057,9 +7057,9 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct pnfs_layout_hdr *lo;
+ struct nfs4_state *state = NULL;
+- unsigned long timeo, giveup;
++ unsigned long timeo, now, giveup;
+
+- dprintk("--> %s\n", __func__);
++ dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
+
+ if (!nfs41_sequence_done(task, &lgp->res.seq_res))
+ goto out;
+@@ -7067,12 +7067,38 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
+ switch (task->tk_status) {
+ case 0:
+ goto out;
++ /*
++ * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
++ * (or clients) writing to the same RAID stripe
++ */
+ case -NFS4ERR_LAYOUTTRYLATER:
++ /*
++ * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
++ * existing layout before getting a new one).
++ */
+ case -NFS4ERR_RECALLCONFLICT:
+ timeo = rpc_get_timeout(task->tk_client);
+ giveup = lgp->args.timestamp + timeo;
+- if (time_after(giveup, jiffies))
+- task->tk_status = -NFS4ERR_DELAY;
++ now = jiffies;
++ if (time_after(giveup, now)) {
++ unsigned long delay;
++
++ /* Delay for:
++ * - Not less then NFS4_POLL_RETRY_MIN.
++ * - One last time a jiffie before we give up
++ * - exponential backoff (time_now minus start_attempt)
++ */
++ delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
++ min((giveup - now - 1),
++ now - lgp->args.timestamp));
++
++ dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
++ __func__, delay);
++ rpc_delay(task, delay);
++ task->tk_status = 0;
++ rpc_restart_call_prepare(task);
++ goto out; /* Do not call nfs4_async_handle_error() */
++ }
+ break;
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_BAD_STATEID:
+@@ -7561,7 +7587,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
+ switch (err) {
+ case 0:
+ case -NFS4ERR_WRONGSEC:
+- case -NFS4ERR_NOTSUPP:
++ case -ENOTSUPP:
+ goto out;
+ default:
+ err = nfs4_handle_exception(server, err, &exception);
+@@ -7595,7 +7621,7 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
+ * Fall back on "guess and check" method if
+ * the server doesn't support SECINFO_NO_NAME
+ */
+- if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
++ if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
+ err = nfs4_find_root_sec(server, fhandle, info);
+ goto out_freepage;
+ }
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 79210d23f607..b2f842d0901b 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -3053,7 +3053,8 @@ out_overflow:
+ return -EIO;
+ }
+
+-static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
++static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
++ int *nfs_retval)
+ {
+ __be32 *p;
+ uint32_t opnum;
+@@ -3063,19 +3064,32 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+ if (unlikely(!p))
+ goto out_overflow;
+ opnum = be32_to_cpup(p++);
+- if (opnum != expected) {
+- dprintk("nfs: Server returned operation"
+- " %d but we issued a request for %d\n",
+- opnum, expected);
+- return -EIO;
+- }
++ if (unlikely(opnum != expected))
++ goto out_bad_operation;
+ nfserr = be32_to_cpup(p);
+- if (nfserr != NFS_OK)
+- return nfs4_stat_to_errno(nfserr);
+- return 0;
++ if (nfserr == NFS_OK)
++ *nfs_retval = 0;
++ else
++ *nfs_retval = nfs4_stat_to_errno(nfserr);
++ return true;
++out_bad_operation:
++ dprintk("nfs: Server returned operation"
++ " %d but we issued a request for %d\n",
++ opnum, expected);
++ *nfs_retval = -EREMOTEIO;
++ return false;
+ out_overflow:
+ print_overflow_msg(__func__, xdr);
+- return -EIO;
++ *nfs_retval = -EIO;
++ return false;
++}
++
++static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
++{
++ int retval;
++
++ __decode_op_hdr(xdr, expected, &retval);
++ return retval;
+ }
+
+ /* Dummy routine */
+@@ -4957,11 +4971,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
+ uint32_t savewords, bmlen, i;
+ int status;
+
+- status = decode_op_hdr(xdr, OP_OPEN);
+- if (status != -EIO)
+- nfs_increment_open_seqid(status, res->seqid);
+- if (!status)
+- status = decode_stateid(xdr, &res->stateid);
++ if (!__decode_op_hdr(xdr, OP_OPEN, &status))
++ return status;
++ nfs_increment_open_seqid(status, res->seqid);
++ if (status)
++ return status;
++ status = decode_stateid(xdr, &res->stateid);
+ if (unlikely(status))
+ return status;
+
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index ac1dc331ba31..28466be64eeb 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -922,19 +922,20 @@ out:
+ * extend the write to cover the entire page in order to avoid fragmentation
+ * inefficiencies.
+ *
+- * If the file is opened for synchronous writes or if we have a write delegation
+- * from the server then we can just skip the rest of the checks.
++ * If the file is opened for synchronous writes then we can just skip the rest
++ * of the checks.
+ */
+ static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
+ {
+ if (file->f_flags & O_DSYNC)
+ return 0;
++ if (!nfs_write_pageuptodate(page, inode))
++ return 0;
+ if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
+ return 1;
+- if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
+- (inode->i_flock->fl_start == 0 &&
++ if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
+ inode->i_flock->fl_end == OFFSET_MAX &&
+- inode->i_flock->fl_type != F_RDLCK)))
++ inode->i_flock->fl_type != F_RDLCK))
+ return 1;
+ return 0;
+ }
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index e44cb6427df3..6663511ab33a 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -888,9 +888,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
+ {
+ return sys_fanotify_mark(fanotify_fd, flags,
+ #ifdef __BIG_ENDIAN
+- ((__u64)mask1 << 32) | mask0,
+-#else
+ ((__u64)mask0 << 32) | mask1,
++#else
++ ((__u64)mask1 << 32) | mask0,
+ #endif
+ dfd, pathname);
+ }
+diff --git a/fs/read_write.c b/fs/read_write.c
+index e3cd280b158c..3889dcc25114 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -977,9 +977,9 @@ out:
+ return ret;
+ }
+
+-COMPAT_SYSCALL_DEFINE3(readv, unsigned long, fd,
++COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
+ const struct compat_iovec __user *,vec,
+- unsigned long, vlen)
++ compat_ulong_t, vlen)
+ {
+ struct fd f = fdget(fd);
+ ssize_t ret;
+@@ -1014,9 +1014,9 @@ COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
+ return ret;
+ }
+
+-COMPAT_SYSCALL_DEFINE5(preadv, unsigned long, fd,
++COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
+ const struct compat_iovec __user *,vec,
+- unsigned long, vlen, u32, pos_low, u32, pos_high)
++ compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
+ {
+ loff_t pos = ((loff_t)pos_high << 32) | pos_low;
+ return compat_sys_preadv64(fd, vec, vlen, pos);
+@@ -1044,9 +1044,9 @@ out:
+ return ret;
+ }
+
+-COMPAT_SYSCALL_DEFINE3(writev, unsigned long, fd,
++COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
+ const struct compat_iovec __user *, vec,
+- unsigned long, vlen)
++ compat_ulong_t, vlen)
+ {
+ struct fd f = fdget(fd);
+ ssize_t ret;
+@@ -1081,9 +1081,9 @@ COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
+ return ret;
+ }
+
+-COMPAT_SYSCALL_DEFINE5(pwritev, unsigned long, fd,
++COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
+ const struct compat_iovec __user *,vec,
+- unsigned long, vlen, u32, pos_low, u32, pos_high)
++ compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
+ {
+ loff_t pos = ((loff_t)pos_high << 32) | pos_low;
+ return compat_sys_pwritev64(fd, vec, vlen, pos);
+diff --git a/fs/splice.c b/fs/splice.c
+index 3b7ee656f3aa..84f810d63c37 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -555,6 +555,24 @@ static const struct pipe_buf_operations default_pipe_buf_ops = {
+ .get = generic_pipe_buf_get,
+ };
+
++static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
++ struct pipe_buffer *buf)
++{
++ return 1;
++}
++
++/* Pipe buffer operations for a socket and similar. */
++const struct pipe_buf_operations nosteal_pipe_buf_ops = {
++ .can_merge = 0,
++ .map = generic_pipe_buf_map,
++ .unmap = generic_pipe_buf_unmap,
++ .confirm = generic_pipe_buf_confirm,
++ .release = generic_pipe_buf_release,
++ .steal = generic_pipe_buf_nosteal,
++ .get = generic_pipe_buf_get,
++};
++EXPORT_SYMBOL(nosteal_pipe_buf_ops);
++
+ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+ unsigned long vlen, loff_t offset)
+ {
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index 729a4d165bcc..4fb28b23a4a4 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -135,7 +135,7 @@ static inline void audit_syscall_exit(void *pt_regs)
+ {
+ if (unlikely(current->audit_context)) {
+ int success = is_syscall_success(pt_regs);
+- int return_code = regs_return_value(pt_regs);
++ long return_code = regs_return_value(pt_regs);
+
+ __audit_syscall_exit(success, return_code);
+ }
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 345da00a86e0..0f62cb7a4ff0 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -327,16 +327,16 @@ asmlinkage long compat_sys_keyctl(u32 option,
+ u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
+
+-asmlinkage ssize_t compat_sys_readv(unsigned long fd,
+- const struct compat_iovec __user *vec, unsigned long vlen);
+-asmlinkage ssize_t compat_sys_writev(unsigned long fd,
+- const struct compat_iovec __user *vec, unsigned long vlen);
+-asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
++asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
++ const struct compat_iovec __user *vec, compat_ulong_t vlen);
++asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
++ const struct compat_iovec __user *vec, compat_ulong_t vlen);
++asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+- unsigned long vlen, u32 pos_low, u32 pos_high);
+-asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
++ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
++asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+- unsigned long vlen, u32 pos_low, u32 pos_high);
++ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+ asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
+
+ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
+@@ -422,7 +422,7 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ compat_long_t addr, compat_long_t data);
+
+-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
++asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
+ /*
+ * epoll (fs/eventpoll.c) compat bits follow ...
+ */
+diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
+index 0b23edbee309..67de9b761374 100644
+--- a/include/linux/percpu_ida.h
++++ b/include/linux/percpu_ida.h
+@@ -4,6 +4,7 @@
+ #include <linux/types.h>
+ #include <linux/bitops.h>
+ #include <linux/init.h>
++#include <linux/sched.h>
+ #include <linux/spinlock_types.h>
+ #include <linux/wait.h>
+ #include <linux/cpumask.h>
+@@ -51,7 +52,7 @@ struct percpu_ida {
+ } ____cacheline_aligned_in_smp;
+ };
+
+-int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
++int percpu_ida_alloc(struct percpu_ida *pool, int state);
+ void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
+
+ void percpu_ida_destroy(struct percpu_ida *pool);
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index b8809fef61f5..ab5752692113 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -157,6 +157,8 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+
++extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
++
+ /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+ struct pipe_inode_info *get_pipe_info(struct file *file);
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index e4b948080d20..a67b38415768 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -142,8 +142,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ return x;
+ }
+
+-extern unsigned long global_reclaimable_pages(void);
+-
+ #ifdef CONFIG_NUMA
+ /*
+ * Determine the per node value of a stat item. This function
+diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
+index a5f9b960dfc8..6ca3265a4dca 100644
+--- a/include/scsi/osd_ore.h
++++ b/include/scsi/osd_ore.h
+@@ -102,6 +102,7 @@ struct ore_striping_info {
+ unsigned unit_off;
+ unsigned cur_pg;
+ unsigned cur_comp;
++ unsigned maxdevUnits;
+ };
+
+ struct ore_io_state;
+diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h
+index 438c256c274b..b49eeab0262e 100644
+--- a/include/xen/platform_pci.h
++++ b/include/xen/platform_pci.h
+@@ -48,4 +48,27 @@ static inline int xen_must_unplug_disks(void) {
+
+ extern int xen_platform_pci_unplug;
+
++#if defined(CONFIG_XEN_PVHVM)
++extern bool xen_has_pv_devices(void);
++extern bool xen_has_pv_disk_devices(void);
++extern bool xen_has_pv_nic_devices(void);
++extern bool xen_has_pv_and_legacy_disk_devices(void);
++#else
++static inline bool xen_has_pv_devices(void)
++{
++ return IS_ENABLED(CONFIG_XEN);
++}
++static inline bool xen_has_pv_disk_devices(void)
++{
++ return IS_ENABLED(CONFIG_XEN);
++}
++static inline bool xen_has_pv_nic_devices(void)
++{
++ return IS_ENABLED(CONFIG_XEN);
++}
++static inline bool xen_has_pv_and_legacy_disk_devices(void)
++{
++ return false;
++}
++#endif
+ #endif /* _XEN_PLATFORM_PCI_H */
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 7ddfd8a00a2a..6def25f1b351 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -103,7 +103,8 @@ static int audit_rate_limit;
+
+ /* Number of outstanding audit_buffers allowed. */
+ static int audit_backlog_limit = 64;
+-static int audit_backlog_wait_time = 60 * HZ;
++#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
++static int audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
+ static int audit_backlog_wait_overflow = 0;
+
+ /* The identity of the user shutting down the audit system. */
+@@ -1135,6 +1136,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
+ return NULL;
+ }
+
++ audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
++
+ ab = audit_buffer_alloc(ctx, gfp_mask, type);
+ if (!ab) {
+ audit_log_lost("out of memory in audit_log_start");
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 5cf6c7097a71..bfca770a64e0 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -77,7 +77,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
+ tk->wall_to_monotonic = wtm;
+ set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
+ tk->offs_real = timespec_to_ktime(tmp);
+- tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
++ tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
+ }
+
+ static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
+@@ -595,7 +595,7 @@ s32 timekeeping_get_tai_offset(void)
+ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
+ {
+ tk->tai_offset = tai_offset;
+- tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
++ tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
+ }
+
+ /**
+@@ -610,6 +610,7 @@ void timekeeping_set_tai_offset(s32 tai_offset)
+ raw_spin_lock_irqsave(&timekeeper_lock, flags);
+ write_seqcount_begin(&timekeeper_seq);
+ __timekeeping_set_tai_offset(tk, tai_offset);
++ timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
+ write_seqcount_end(&timekeeper_seq);
+ raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+ clock_was_set();
+@@ -1023,6 +1024,8 @@ static int timekeeping_suspend(void)
+ timekeeping_suspend_time =
+ timespec_add(timekeeping_suspend_time, delta_delta);
+ }
++
++ timekeeping_update(tk, TK_MIRROR);
+ write_seqcount_end(&timekeeper_seq);
+ raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+
+@@ -1255,7 +1258,7 @@ out_adjust:
+ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
+ {
+ u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
+- unsigned int action = 0;
++ unsigned int clock_set = 0;
+
+ while (tk->xtime_nsec >= nsecps) {
+ int leap;
+@@ -1277,11 +1280,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
+
+ __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
+
+- clock_was_set_delayed();
+- action = TK_CLOCK_WAS_SET;
++ clock_set = TK_CLOCK_WAS_SET;
+ }
+ }
+- return action;
++ return clock_set;
+ }
+
+ /**
+@@ -1294,7 +1296,8 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
+ * Returns the unconsumed cycles.
+ */
+ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
+- u32 shift)
++ u32 shift,
++ unsigned int *clock_set)
+ {
+ cycle_t interval = tk->cycle_interval << shift;
+ u64 raw_nsecs;
+@@ -1308,7 +1311,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
+ tk->cycle_last += interval;
+
+ tk->xtime_nsec += tk->xtime_interval << shift;
+- accumulate_nsecs_to_secs(tk);
++ *clock_set |= accumulate_nsecs_to_secs(tk);
+
+ /* Accumulate raw time */
+ raw_nsecs = (u64)tk->raw_interval << shift;
+@@ -1366,7 +1369,7 @@ static void update_wall_time(void)
+ struct timekeeper *tk = &shadow_timekeeper;
+ cycle_t offset;
+ int shift = 0, maxshift;
+- unsigned int action;
++ unsigned int clock_set = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&timekeeper_lock, flags);
+@@ -1401,7 +1404,8 @@ static void update_wall_time(void)
+ maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
+ shift = min(shift, maxshift);
+ while (offset >= tk->cycle_interval) {
+- offset = logarithmic_accumulation(tk, offset, shift);
++ offset = logarithmic_accumulation(tk, offset, shift,
++ &clock_set);
+ if (offset < tk->cycle_interval<<shift)
+ shift--;
+ }
+@@ -1419,7 +1423,7 @@ static void update_wall_time(void)
+ * Finally, make sure that after the rounding
+ * xtime_nsec isn't larger than NSEC_PER_SEC
+ */
+- action = accumulate_nsecs_to_secs(tk);
++ clock_set |= accumulate_nsecs_to_secs(tk);
+
+ write_seqcount_begin(&timekeeper_seq);
+ /* Update clock->cycle_last with the new value */
+@@ -1435,10 +1439,23 @@ static void update_wall_time(void)
+ * updating.
+ */
+ memcpy(real_tk, tk, sizeof(*tk));
+- timekeeping_update(real_tk, action);
++ timekeeping_update(real_tk, clock_set);
+ write_seqcount_end(&timekeeper_seq);
+ out:
+ raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
++ if (clock_set) {
++ /*
++ * XXX - I'd rather we just call clock_was_set(), but
++ * since we're currently holding the jiffies lock, calling
++ * clock_was_set would trigger an ipi which would then grab
++ * the jiffies lock and we'd deadlock. :(
++ * The right solution should probably be droping
++ * the jiffies lock before calling update_wall_time
++ * but that requires some rework of the tick sched
++ * code.
++ */
++ clock_was_set_delayed();
++ }
+ }
+
+ /**
+@@ -1697,12 +1714,14 @@ int do_adjtimex(struct timex *txc)
+
+ if (tai != orig_tai) {
+ __timekeeping_set_tai_offset(tk, tai);
+- update_pvclock_gtod(tk, true);
+- clock_was_set_delayed();
++ timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
+ }
+ write_seqcount_end(&timekeeper_seq);
+ raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+
++ if (tai != orig_tai)
++ clock_was_set();
++
+ ntp_notify_cmos_timer();
+
+ return ret;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 1345d9ff0662..e66411fb55b3 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
+
+ /* Current function tracing op */
+ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
++/* What to set function_trace_op to */
++static struct ftrace_ops *set_function_trace_op;
+
+ /* List for set_ftrace_pid's pids. */
+ LIST_HEAD(ftrace_pids);
+@@ -278,6 +280,29 @@ static void update_global_ops(void)
+ global_ops.func = func;
+ }
+
++static void ftrace_sync(struct work_struct *work)
++{
++ /*
++ * This function is just a stub to implement a hard force
++ * of synchronize_sched(). This requires synchronizing
++ * tasks even in userspace and idle.
++ *
++ * Yes, function tracing is rude.
++ */
++}
++
++static void ftrace_sync_ipi(void *data)
++{
++ /* Probably not needed, but do it anyway */
++ smp_rmb();
++}
++
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++static void update_function_graph_func(void);
++#else
++static inline void update_function_graph_func(void) { }
++#endif
++
+ static void update_ftrace_function(void)
+ {
+ ftrace_func_t func;
+@@ -296,16 +321,61 @@ static void update_ftrace_function(void)
+ !FTRACE_FORCE_LIST_FUNC)) {
+ /* Set the ftrace_ops that the arch callback uses */
+ if (ftrace_ops_list == &global_ops)
+- function_trace_op = ftrace_global_list;
++ set_function_trace_op = ftrace_global_list;
+ else
+- function_trace_op = ftrace_ops_list;
++ set_function_trace_op = ftrace_ops_list;
+ func = ftrace_ops_list->func;
+ } else {
+ /* Just use the default ftrace_ops */
+- function_trace_op = &ftrace_list_end;
++ set_function_trace_op = &ftrace_list_end;
+ func = ftrace_ops_list_func;
+ }
+
++ /* If there's no change, then do nothing more here */
++ if (ftrace_trace_function == func)
++ return;
++
++ update_function_graph_func();
++
++ /*
++ * If we are using the list function, it doesn't care
++ * about the function_trace_ops.
++ */
++ if (func == ftrace_ops_list_func) {
++ ftrace_trace_function = func;
++ /*
++ * Don't even bother setting function_trace_ops,
++ * it would be racy to do so anyway.
++ */
++ return;
++ }
++
++#ifndef CONFIG_DYNAMIC_FTRACE
++ /*
++ * For static tracing, we need to be a bit more careful.
++ * The function change takes affect immediately. Thus,
++ * we need to coorditate the setting of the function_trace_ops
++ * with the setting of the ftrace_trace_function.
++ *
++ * Set the function to the list ops, which will call the
++ * function we want, albeit indirectly, but it handles the
++ * ftrace_ops and doesn't depend on function_trace_op.
++ */
++ ftrace_trace_function = ftrace_ops_list_func;
++ /*
++ * Make sure all CPUs see this. Yes this is slow, but static
++ * tracing is slow and nasty to have enabled.
++ */
++ schedule_on_each_cpu(ftrace_sync);
++ /* Now all cpus are using the list ops. */
++ function_trace_op = set_function_trace_op;
++ /* Make sure the function_trace_op is visible on all CPUs */
++ smp_wmb();
++ /* Nasty way to force a rmb on all cpus */
++ smp_call_function(ftrace_sync_ipi, NULL, 1);
++ /* OK, we are all set to update the ftrace_trace_function now! */
++#endif /* !CONFIG_DYNAMIC_FTRACE */
++
+ ftrace_trace_function = func;
+ }
+
+@@ -410,17 +480,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
+ return 0;
+ }
+
+-static void ftrace_sync(struct work_struct *work)
+-{
+- /*
+- * This function is just a stub to implement a hard force
+- * of synchronize_sched(). This requires synchronizing
+- * tasks even in userspace and idle.
+- *
+- * Yes, function tracing is rude.
+- */
+-}
+-
+ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ {
+ int ret;
+@@ -439,20 +498,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+ ret = remove_ftrace_list_ops(&ftrace_control_list,
+ &control_ops, ops);
+- if (!ret) {
+- /*
+- * The ftrace_ops is now removed from the list,
+- * so there'll be no new users. We must ensure
+- * all current users are done before we free
+- * the control data.
+- * Note synchronize_sched() is not enough, as we
+- * use preempt_disable() to do RCU, but the function
+- * tracer can be called where RCU is not active
+- * (before user_exit()).
+- */
+- schedule_on_each_cpu(ftrace_sync);
+- control_ops_free(ops);
+- }
+ } else
+ ret = remove_ftrace_ops(&ftrace_ops_list, ops);
+
+@@ -462,17 +507,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ if (ftrace_enabled)
+ update_ftrace_function();
+
+- /*
+- * Dynamic ops may be freed, we must make sure that all
+- * callers are done before leaving this function.
+- *
+- * Again, normal synchronize_sched() is not good enough.
+- * We need to do a hard force of sched synchronization.
+- */
+- if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+- schedule_on_each_cpu(ftrace_sync);
+-
+-
+ return 0;
+ }
+
+@@ -1992,8 +2026,14 @@ void ftrace_modify_all_code(int command)
+ else if (command & FTRACE_DISABLE_CALLS)
+ ftrace_replace_code(0);
+
+- if (update && ftrace_trace_function != ftrace_ops_list_func)
++ if (update && ftrace_trace_function != ftrace_ops_list_func) {
++ function_trace_op = set_function_trace_op;
++ smp_wmb();
++ /* If irqs are disabled, we are in stop machine */
++ if (!irqs_disabled())
++ smp_call_function(ftrace_sync_ipi, NULL, 1);
+ ftrace_update_ftrace_func(ftrace_trace_function);
++ }
+
+ if (command & FTRACE_START_FUNC_RET)
+ ftrace_enable_ftrace_graph_caller();
+@@ -2156,10 +2196,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ command |= FTRACE_UPDATE_TRACE_FUNC;
+ }
+
+- if (!command || !ftrace_enabled)
++ if (!command || !ftrace_enabled) {
++ /*
++ * If these are control ops, they still need their
++ * per_cpu field freed. Since, function tracing is
++ * not currently active, we can just free them
++ * without synchronizing all CPUs.
++ */
++ if (ops->flags & FTRACE_OPS_FL_CONTROL)
++ control_ops_free(ops);
+ return 0;
++ }
+
+ ftrace_run_update_code(command);
++
++ /*
++ * Dynamic ops may be freed, we must make sure that all
++ * callers are done before leaving this function.
++ * The same goes for freeing the per_cpu data of the control
++ * ops.
++ *
++ * Again, normal synchronize_sched() is not good enough.
++ * We need to do a hard force of sched synchronization.
++ * This is because we use preempt_disable() to do RCU, but
++ * the function tracers can be called where RCU is not watching
++ * (like before user_exit()). We can not rely on the RCU
++ * infrastructure to do the synchronization, thus we must do it
++ * ourselves.
++ */
++ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
++ schedule_on_each_cpu(ftrace_sync);
++
++ if (ops->flags & FTRACE_OPS_FL_CONTROL)
++ control_ops_free(ops);
++ }
++
+ return 0;
+ }
+
+@@ -4777,6 +4848,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+ trace_func_graph_ret_t ftrace_graph_return =
+ (trace_func_graph_ret_t)ftrace_stub;
+ trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
++static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
+
+ /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
+ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+@@ -4918,6 +4990,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
+ FTRACE_OPS_FL_RECURSION_SAFE,
+ };
+
++static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
++{
++ if (!ftrace_ops_test(&global_ops, trace->func, NULL))
++ return 0;
++ return __ftrace_graph_entry(trace);
++}
++
++/*
++ * The function graph tracer should only trace the functions defined
++ * by set_ftrace_filter and set_ftrace_notrace. If another function
++ * tracer ops is registered, the graph tracer requires testing the
++ * function against the global ops, and not just trace any function
++ * that any ftrace_ops registered.
++ */
++static void update_function_graph_func(void)
++{
++ if (ftrace_ops_list == &ftrace_list_end ||
++ (ftrace_ops_list == &global_ops &&
++ global_ops.next == &ftrace_list_end))
++ ftrace_graph_entry = __ftrace_graph_entry;
++ else
++ ftrace_graph_entry = ftrace_graph_entry_test;
++}
++
+ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc)
+ {
+@@ -4942,7 +5038,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ }
+
+ ftrace_graph_return = retfunc;
+- ftrace_graph_entry = entryfunc;
++
++ /*
++ * Update the indirect function to the entryfunc, and the
++ * function that gets called to the entry_test first. Then
++ * call the update fgraph entry function to determine if
++ * the entryfunc should be called directly or not.
++ */
++ __ftrace_graph_entry = entryfunc;
++ ftrace_graph_entry = ftrace_graph_entry_test;
++ update_function_graph_func();
+
+ ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
+
+@@ -4961,6 +5066,7 @@ void unregister_ftrace_graph(void)
+ ftrace_graph_active--;
+ ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+ ftrace_graph_entry = ftrace_graph_entry_stub;
++ __ftrace_graph_entry = ftrace_graph_entry_stub;
+ ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
+ unregister_pm_notifier(&ftrace_suspend_notifier);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index b778e96e02a1..138077b1a607 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -435,6 +435,9 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ unsigned long irq_flags;
+ int alloc;
+
++ if (unlikely(tracing_selftest_running || tracing_disabled))
++ return 0;
++
+ alloc = sizeof(*entry) + size + 2; /* possible \n added */
+
+ local_save_flags(irq_flags);
+@@ -475,6 +478,9 @@ int __trace_bputs(unsigned long ip, const char *str)
+ unsigned long irq_flags;
+ int size = sizeof(struct bputs_entry);
+
++ if (unlikely(tracing_selftest_running || tracing_disabled))
++ return 0;
++
+ local_save_flags(irq_flags);
+ buffer = global_trace.trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+@@ -5872,6 +5878,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
+
+ rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
+
++ buf->tr = tr;
++
+ buf->buffer = ring_buffer_alloc(size, rb_flags);
+ if (!buf->buffer)
+ return -ENOMEM;
+diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
+index bab1ba2a4c71..fd7d6d3d88a1 100644
+--- a/lib/percpu_ida.c
++++ b/lib/percpu_ida.c
+@@ -142,22 +142,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida *pool,
+ /**
+ * percpu_ida_alloc - allocate a tag
+ * @pool: pool to allocate from
+- * @gfp: gfp flags
++ * @state: task state for prepare_to_wait
+ *
+ * Returns a tag - an integer in the range [0..nr_tags) (passed to
+ * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
+ *
+ * Safe to be called from interrupt context (assuming it isn't passed
+- * __GFP_WAIT, of course).
++ * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
+ *
+ * @gfp indicates whether or not to wait until a free id is available (it's not
+ * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
+ * however long it takes until another thread frees an id (same semantics as a
+ * mempool).
+ *
+- * Will not fail if passed __GFP_WAIT.
++ * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
+ */
+-int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
++int percpu_ida_alloc(struct percpu_ida *pool, int state)
+ {
+ DEFINE_WAIT(wait);
+ struct percpu_ida_cpu *tags;
+@@ -184,7 +184,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
+ *
+ * global lock held and irqs disabled, don't need percpu lock
+ */
+- prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
++ if (state != TASK_RUNNING)
++ prepare_to_wait(&pool->wait, &wait, state);
+
+ if (!tags->nr_free)
+ alloc_global_tags(pool, tags);
+@@ -201,16 +202,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
+ spin_unlock(&pool->lock);
+ local_irq_restore(flags);
+
+- if (tag >= 0 || !(gfp & __GFP_WAIT))
++ if (tag >= 0 || state == TASK_RUNNING)
+ break;
+
++ if (signal_pending_state(state, current)) {
++ tag = -ERESTARTSYS;
++ break;
++ }
++
+ schedule();
+
+ local_irq_save(flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+ }
++ if (state != TASK_RUNNING)
++ finish_wait(&pool->wait, &wait);
+
+- finish_wait(&pool->wait, &wait);
+ return tag;
+ }
+ EXPORT_SYMBOL_GPL(percpu_ida_alloc);
+diff --git a/mm/internal.h b/mm/internal.h
+index 684f7aa9692a..8b6cfd63b5a5 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -85,7 +85,6 @@ extern unsigned long highest_memmap_pfn;
+ */
+ extern int isolate_lru_page(struct page *page);
+ extern void putback_lru_page(struct page *page);
+-extern unsigned long zone_reclaimable_pages(struct zone *zone);
+ extern bool zone_reclaimable(struct zone *zone);
+
+ /*
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index e0e979276df0..8e7adcba8176 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1079,16 +1079,22 @@ skip_node:
+ * skipped and we should continue the tree walk.
+ * last_visited css is safe to use because it is
+ * protected by css_get and the tree walk is rcu safe.
++ *
++ * We do not take a reference on the root of the tree walk
++ * because we might race with the root removal when it would
++ * be the only node in the iterated hierarchy and mem_cgroup_iter
++ * would end up in an endless loop because it expects that at
++ * least one valid node will be returned. Root cannot disappear
++ * because caller of the iterator should hold it already so
++ * skipping css reference should be safe.
+ */
+ if (next_css) {
+- struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
++ if ((next_css->flags & CSS_ONLINE) &&
++ (next_css == &root->css || css_tryget(next_css)))
++ return mem_cgroup_from_css(next_css);
+
+- if (css_tryget(&mem->css))
+- return mem;
+- else {
+- prev_css = next_css;
+- goto skip_node;
+- }
++ prev_css = next_css;
++ goto skip_node;
+ }
+
+ return NULL;
+@@ -1122,7 +1128,15 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
+ if (iter->last_dead_count == *sequence) {
+ smp_rmb();
+ position = iter->last_visited;
+- if (position && !css_tryget(&position->css))
++
++ /*
++ * We cannot take a reference to root because we might race
++ * with root removal and returning NULL would end up in
++ * an endless loop on the iterator user level when root
++ * would be returned all the time.
++ */
++ if (position && position != root &&
++ !css_tryget(&position->css))
+ position = NULL;
+ }
+ return position;
+@@ -1131,9 +1145,11 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
+ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
+ struct mem_cgroup *last_visited,
+ struct mem_cgroup *new_position,
++ struct mem_cgroup *root,
+ int sequence)
+ {
+- if (last_visited)
++ /* root reference counting symmetric to mem_cgroup_iter_load */
++ if (last_visited && last_visited != root)
+ css_put(&last_visited->css);
+ /*
+ * We store the sequence count from the time @last_visited was
+@@ -1208,7 +1224,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
+ memcg = __mem_cgroup_iter_next(root, last_visited);
+
+ if (reclaim) {
+- mem_cgroup_iter_update(iter, last_visited, memcg, seq);
++ mem_cgroup_iter_update(iter, last_visited, memcg, root,
++ seq);
+
+ if (!memcg)
+ iter->generation++;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 9aea53f4551c..428adeedd3be 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -856,14 +856,14 @@ static int page_action(struct page_state *ps, struct page *p,
+ * the pages and send SIGBUS to the processes if the data was dirty.
+ */
+ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+- int trapno, int flags)
++ int trapno, int flags, struct page **hpagep)
+ {
+ enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
+ struct address_space *mapping;
+ LIST_HEAD(tokill);
+ int ret;
+ int kill = 1, forcekill;
+- struct page *hpage = compound_head(p);
++ struct page *hpage = *hpagep;
+ struct page *ppage;
+
+ if (PageReserved(p) || PageSlab(p))
+@@ -942,11 +942,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ * We pinned the head page for hwpoison handling,
+ * now we split the thp and we are interested in
+ * the hwpoisoned raw page, so move the refcount
+- * to it.
++ * to it. Similarly, page lock is shifted.
+ */
+ if (hpage != p) {
+ put_page(hpage);
+ get_page(p);
++ lock_page(p);
++ unlock_page(hpage);
++ *hpagep = p;
+ }
+ /* THP is split, so ppage should be the real poisoned page. */
+ ppage = p;
+@@ -964,17 +967,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ if (kill)
+ collect_procs(ppage, &tokill);
+
+- if (hpage != ppage)
+- lock_page(ppage);
+-
+ ret = try_to_unmap(ppage, ttu);
+ if (ret != SWAP_SUCCESS)
+ printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
+ pfn, page_mapcount(ppage));
+
+- if (hpage != ppage)
+- unlock_page(ppage);
+-
+ /*
+ * Now that the dirty bit has been propagated to the
+ * struct page and all unmaps done we can decide if
+@@ -1193,8 +1190,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ /*
+ * Now take care of user space mappings.
+ * Abort on fail: __delete_from_page_cache() assumes unmapped page.
++ *
++ * When the raw error page is thp tail page, hpage points to the raw
++ * page after thp split.
+ */
+- if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) {
++ if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
++ != SWAP_SUCCESS) {
+ printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+ res = -EBUSY;
+ goto out;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 362e5f1327ec..af99b9ed2007 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -895,7 +895,15 @@ again: remove_next = 1 + (end > next->vm_end);
+ static inline int is_mergeable_vma(struct vm_area_struct *vma,
+ struct file *file, unsigned long vm_flags)
+ {
+- if (vma->vm_flags ^ vm_flags)
++ /*
++ * VM_SOFTDIRTY should not prevent from VMA merging, if we
++ * match the flags but dirty bit -- the caller should mark
++ * merged VMA as dirty. If dirty bit won't be excluded from
++ * comparison, we increase pressue on the memory system forcing
++ * the kernel to generate new VMAs when old one could be
++ * extended instead.
++ */
++ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
+ return 0;
+ if (vma->vm_file != file)
+ return 0;
+@@ -1084,7 +1092,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
+ return a->vm_end == b->vm_start &&
+ mpol_equal(vma_policy(a), vma_policy(b)) &&
+ a->vm_file == b->vm_file &&
+- !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
++ !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
+ b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
+ }
+
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 6738c47f1f72..e73f01c56d10 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -170,7 +170,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
+ * implementation used by LSMs.
+ */
+ if (has_capability_noaudit(p, CAP_SYS_ADMIN))
+- adj -= 30;
++ points -= (points * 3) / 100;
+
+ /* Normalize to oom_score_adj units */
+ adj *= totalpages / 1000;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 63807583d8e8..2d30e2cfe804 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -191,6 +191,26 @@ static unsigned long writeout_period_time = 0;
+ * global dirtyable memory first.
+ */
+
++/**
++ * zone_dirtyable_memory - number of dirtyable pages in a zone
++ * @zone: the zone
++ *
++ * Returns the zone's number of pages potentially available for dirty
++ * page cache. This is the base value for the per-zone dirty limits.
++ */
++static unsigned long zone_dirtyable_memory(struct zone *zone)
++{
++ unsigned long nr_pages;
++
++ nr_pages = zone_page_state(zone, NR_FREE_PAGES);
++ nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
++
++ nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
++ nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
++
++ return nr_pages;
++}
++
+ static unsigned long highmem_dirtyable_memory(unsigned long total)
+ {
+ #ifdef CONFIG_HIGHMEM
+@@ -198,11 +218,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
+ unsigned long x = 0;
+
+ for_each_node_state(node, N_HIGH_MEMORY) {
+- struct zone *z =
+- &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
++ struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+
+- x += zone_page_state(z, NR_FREE_PAGES) +
+- zone_reclaimable_pages(z) - z->dirty_balance_reserve;
++ x += zone_dirtyable_memory(z);
+ }
+ /*
+ * Unreclaimable memory (kernel memory or anonymous memory
+@@ -238,9 +256,12 @@ static unsigned long global_dirtyable_memory(void)
+ {
+ unsigned long x;
+
+- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
++ x = global_page_state(NR_FREE_PAGES);
+ x -= min(x, dirty_balance_reserve);
+
++ x += global_page_state(NR_INACTIVE_FILE);
++ x += global_page_state(NR_ACTIVE_FILE);
++
+ if (!vm_highmem_is_dirtyable)
+ x -= highmem_dirtyable_memory(x);
+
+@@ -289,32 +310,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
+ }
+
+ /**
+- * zone_dirtyable_memory - number of dirtyable pages in a zone
+- * @zone: the zone
+- *
+- * Returns the zone's number of pages potentially available for dirty
+- * page cache. This is the base value for the per-zone dirty limits.
+- */
+-static unsigned long zone_dirtyable_memory(struct zone *zone)
+-{
+- /*
+- * The effective global number of dirtyable pages may exclude
+- * highmem as a big-picture measure to keep the ratio between
+- * dirty memory and lowmem reasonable.
+- *
+- * But this function is purely about the individual zone and a
+- * highmem zone can hold its share of dirty pages, so we don't
+- * care about vm_highmem_is_dirtyable here.
+- */
+- unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
+- zone_reclaimable_pages(zone);
+-
+- /* don't allow this to underflow */
+- nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+- return nr_pages;
+-}
+-
+-/**
+ * zone_dirty_limit - maximum number of dirty pages allowed in a zone
+ * @zone: the zone
+ *
+diff --git a/mm/slub.c b/mm/slub.c
+index 96f21691b67c..5c1343a391d0 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4272,7 +4272,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
+
+ page = ACCESS_ONCE(c->partial);
+ if (page) {
+- x = page->pobjects;
++ node = page_to_nid(page);
++ if (flags & SO_TOTAL)
++ WARN_ON_ONCE(1);
++ else if (flags & SO_OBJECTS)
++ WARN_ON_ONCE(1);
++ else
++ x = page->pages;
+ total += x;
+ nodes[node] += x;
+ }
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index eea668d9cff6..05e6095159dc 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -147,7 +147,7 @@ static bool global_reclaim(struct scan_control *sc)
+ }
+ #endif
+
+-unsigned long zone_reclaimable_pages(struct zone *zone)
++static unsigned long zone_reclaimable_pages(struct zone *zone)
+ {
+ int nr;
+
+@@ -3297,27 +3297,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
+ wake_up_interruptible(&pgdat->kswapd_wait);
+ }
+
+-/*
+- * The reclaimable count would be mostly accurate.
+- * The less reclaimable pages may be
+- * - mlocked pages, which will be moved to unevictable list when encountered
+- * - mapped pages, which may require several travels to be reclaimed
+- * - dirty pages, which is not "instantly" reclaimable
+- */
+-unsigned long global_reclaimable_pages(void)
+-{
+- int nr;
+-
+- nr = global_page_state(NR_ACTIVE_FILE) +
+- global_page_state(NR_INACTIVE_FILE);
+-
+- if (get_nr_swap_pages() > 0)
+- nr += global_page_state(NR_ACTIVE_ANON) +
+- global_page_state(NR_INACTIVE_ANON);
+-
+- return nr;
+-}
+-
+ #ifdef CONFIG_HIBERNATION
+ /*
+ * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 743e6ebf5f9f..2c7baa809913 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -74,36 +74,6 @@
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+
+-static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
+-{
+- put_page(buf->page);
+-}
+-
+-static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
+-{
+- get_page(buf->page);
+-}
+-
+-static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
+-{
+- return 1;
+-}
+-
+-
+-/* Pipe buffer operations for a socket. */
+-static const struct pipe_buf_operations sock_pipe_buf_ops = {
+- .can_merge = 0,
+- .map = generic_pipe_buf_map,
+- .unmap = generic_pipe_buf_unmap,
+- .confirm = generic_pipe_buf_confirm,
+- .release = sock_pipe_buf_release,
+- .steal = sock_pipe_buf_steal,
+- .get = sock_pipe_buf_get,
+-};
+-
+ /**
+ * skb_panic - private function for out-of-line support
+ * @skb: buffer
+@@ -1800,7 +1770,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ .partial = partial,
+ .nr_pages_max = MAX_SKB_FRAGS,
+ .flags = flags,
+- .ops = &sock_pipe_buf_ops,
++ .ops = &nosteal_pipe_buf_ops,
+ .spd_release = sock_spd_release,
+ };
+ struct sk_buff *frag_iter;
+diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
+index f1eb0d16666c..23fa3c1841cd 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
++++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
+@@ -137,7 +137,6 @@ void init_gssp_clnt(struct sunrpc_net *sn)
+ {
+ mutex_init(&sn->gssp_lock);
+ sn->gssp_clnt = NULL;
+- init_waitqueue_head(&sn->gssp_wq);
+ }
+
+ int set_gssp_clnt(struct net *net)
+@@ -154,7 +153,6 @@ int set_gssp_clnt(struct net *net)
+ sn->gssp_clnt = clnt;
+ }
+ mutex_unlock(&sn->gssp_lock);
+- wake_up(&sn->gssp_wq);
+ return ret;
+ }
+
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 09fb638bcaa4..e18be86dc486 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1295,34 +1295,9 @@ static int set_gss_proxy(struct net *net, int type)
+ else
+ ret = -EBUSY;
+ spin_unlock(&use_gssp_lock);
+- wake_up(&sn->gssp_wq);
+ return ret;
+ }
+
+-static inline bool gssp_ready(struct sunrpc_net *sn)
+-{
+- switch (sn->use_gss_proxy) {
+- case -1:
+- return false;
+- case 0:
+- return true;
+- case 1:
+- return sn->gssp_clnt;
+- }
+- WARN_ON_ONCE(1);
+- return false;
+-}
+-
+-static int wait_for_gss_proxy(struct net *net, struct file *file)
+-{
+- struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+-
+- if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
+- return -EAGAIN;
+- return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
+-}
+-
+-
+ static ssize_t write_gssp(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+@@ -1355,16 +1330,12 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+ struct net *net = PDE_DATA(file_inode(file));
++ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+ unsigned long p = *ppos;
+ char tbuf[10];
+ size_t len;
+- int ret;
+-
+- ret = wait_for_gss_proxy(net, file);
+- if (ret)
+- return ret;
+
+- snprintf(tbuf, sizeof(tbuf), "%d\n", use_gss_proxy(net));
++ snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
+ len = strlen(tbuf);
+ if (p >= len)
+ return 0;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 941d19f8c999..f318a95ec64d 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1433,9 +1433,13 @@ call_refreshresult(struct rpc_task *task)
+ task->tk_action = call_refresh;
+ switch (status) {
+ case 0:
+- if (rpcauth_uptodatecred(task))
++ if (rpcauth_uptodatecred(task)) {
+ task->tk_action = call_allocate;
+- return;
++ return;
++ }
++ /* Use rate-limiting and a max number of retries if refresh
++ * had status 0 but failed to update the cred.
++ */
+ case -ETIMEDOUT:
+ rpc_delay(task, 3*HZ);
+ case -EAGAIN:
+diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
+index 779742cfc1ff..3a260e47fad2 100644
+--- a/net/sunrpc/netns.h
++++ b/net/sunrpc/netns.h
+@@ -26,7 +26,6 @@ struct sunrpc_net {
+ unsigned int rpcb_is_af_local : 1;
+
+ struct mutex gssp_lock;
+- wait_queue_head_t gssp_wq;
+ struct rpc_clnt *gssp_clnt;
+ int use_gss_proxy;
+ int pipe_version;
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index c8adde3aff8f..7e5bceddc36f 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -1941,7 +1941,19 @@ static int filename_trans_read(struct policydb *p, void *fp)
+ if (rc)
+ goto out;
+
+- hashtab_insert(p->filename_trans, ft, otype);
++ rc = hashtab_insert(p->filename_trans, ft, otype);
++ if (rc) {
++ /*
++ * Do not return -EEXIST to the caller, or the system
++ * will not boot.
++ */
++ if (rc != -EEXIST)
++ goto out;
++ /* But free memory to avoid memory leak. */
++ kfree(ft);
++ kfree(name);
++ kfree(otype);
++ }
+ }
+ hash_eval(p->filename_trans, "filenametr");
+ return 0;
+diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
+index f09641da40d4..d1b3a361e526 100644
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -5,7 +5,7 @@ DESTDIR :=
+
+ turbostat : turbostat.c
+ CFLAGS += -Wall
+-CFLAGS += -I../../../../arch/x86/include/uapi/
++CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
+
+ %: %.c
+ @mkdir -p $(BUILD_OUTPUT)
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index fe702076ca46..6a7ee5f21c9b 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -20,7 +20,7 @@
+ */
+
+ #define _GNU_SOURCE
+-#include <asm/msr.h>
++#include MSRHEADER
+ #include <stdio.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+@@ -35,6 +35,7 @@
+ #include <string.h>
+ #include <ctype.h>
+ #include <sched.h>
++#include <cpuid.h>
+
+ char *proc_stat = "/proc/stat";
+ unsigned int interval_sec = 5; /* set with -i interval_sec */
+@@ -1894,7 +1895,7 @@ void check_cpuid()
+
+ eax = ebx = ecx = edx = 0;
+
+- asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
++ __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+
+ if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+ genuine_intel = 1;
+@@ -1903,7 +1904,7 @@ void check_cpuid()
+ fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
+ (char *)&ebx, (char *)&edx, (char *)&ecx);
+
+- asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
++ __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+ family = (fms >> 8) & 0xf;
+ model = (fms >> 4) & 0xf;
+ stepping = fms & 0xf;
+@@ -1925,7 +1926,7 @@ void check_cpuid()
+ * This check is valid for both Intel and AMD.
+ */
+ ebx = ecx = edx = 0;
+- asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
++ __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
+
+ if (max_level < 0x80000007) {
+ fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
+@@ -1936,7 +1937,7 @@ void check_cpuid()
+ * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
+ * this check is valid for both Intel and AMD
+ */
+- asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
++ __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+ has_invariant_tsc = edx & (1 << 8);
+
+ if (!has_invariant_tsc) {
+@@ -1949,7 +1950,7 @@ void check_cpuid()
+ * this check is valid for both Intel and AMD
+ */
+
+- asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
++ __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+ has_aperf = ecx & (1 << 0);
+ do_dts = eax & (1 << 0);
+ do_ptm = eax & (1 << 6);
diff --git a/1011_linux-3.12.12.patch b/1011_linux-3.12.12.patch
new file mode 100644
index 00000000..824a6774
--- /dev/null
+++ b/1011_linux-3.12.12.patch
@@ -0,0 +1,1111 @@
+diff --git a/Makefile b/Makefile
+index b9e092666bf9..563297e159b7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
+index fea9ee327206..889324981aa4 100644
+--- a/arch/arm64/include/asm/cacheflush.h
++++ b/arch/arm64/include/asm/cacheflush.h
+@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
+ static inline void __flush_icache_all(void)
+ {
+ asm("ic ialluis");
++ dsb();
+ }
+
+ #define flush_dcache_mmap_lock(mapping) \
+diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
+index 6a389dc1bd49..0ea7a22bcdf2 100644
+--- a/arch/arm64/kernel/vdso.c
++++ b/arch/arm64/kernel/vdso.c
+@@ -235,6 +235,8 @@ void update_vsyscall(struct timekeeper *tk)
+ vdso_data->use_syscall = use_syscall;
+ vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
+ vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
++ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
++ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+
+ if (!use_syscall) {
+ vdso_data->cs_cycle_last = tk->clock->cycle_last;
+@@ -242,8 +244,6 @@ void update_vsyscall(struct timekeeper *tk)
+ vdso_data->xtime_clock_nsec = tk->xtime_nsec;
+ vdso_data->cs_mult = tk->mult;
+ vdso_data->cs_shift = tk->shift;
+- vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+- vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+ }
+
+ smp_wmb();
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index d8064af42e62..6d20b7d162d8 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
+
+ # Actual build commands
+ quiet_cmd_vdsold = VDSOL $@
+- cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
++ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+ quiet_cmd_vdsoas = VDSOA $@
+ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+
+diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
+index f0a6d10b5211..fe652ffd34c2 100644
+--- a/arch/arm64/kernel/vdso/gettimeofday.S
++++ b/arch/arm64/kernel/vdso/gettimeofday.S
+@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
+ bl __do_get_tspec
+ seqcnt_check w9, 1b
+
++ mov x30, x2
++
+ cmp w0, #CLOCK_MONOTONIC
+ b.ne 6f
+
+@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
+ ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
+ b.ne 8f
+
++ /* xtime_coarse_nsec is already right-shifted */
++ mov x12, #0
++
+ /* Get coarse timespec. */
+ adr vdso_data, _vdso_data
+ 3: seqcnt_acquire
+@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
+ lsr x11, x11, x12
+ stp x10, x11, [x1, #TSPEC_TV_SEC]
+ mov x0, xzr
+- ret x2
++ ret
+ 7:
+ mov x30, x2
+ 8: /* Syscall fallback. */
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index f557ebbe7013..f8dc7e8fce6f 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ do {
+ next = pmd_addr_end(addr, end);
+ /* try section mapping first */
+- if (((addr | next | phys) & ~SECTION_MASK) == 0)
++ if (((addr | next | phys) & ~SECTION_MASK) == 0) {
++ pmd_t old_pmd =*pmd;
+ set_pmd(pmd, __pmd(phys | prot_sect_kernel));
+- else
++ /*
++ * Check for previous table entries created during
++ * boot (__create_page_tables) and flush them.
++ */
++ if (!pmd_none(old_pmd))
++ flush_tlb_all();
++ } else {
+ alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
++ }
+ phys += next - addr;
+ } while (pmd++, addr = next, addr != end);
+ }
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 94e20dd2729f..2a245b55bb71 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -25,6 +25,7 @@
+ #include <linux/err.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/spinlock.h>
+ #include "crypt_s390.h"
+
+ #define AES_KEYLEN_128 1
+@@ -32,6 +33,7 @@
+ #define AES_KEYLEN_256 4
+
+ static u8 *ctrblk;
++static DEFINE_SPINLOCK(ctrblk_lock);
+ static char keylen_flag;
+
+ struct s390_aes_ctx {
+@@ -756,43 +758,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ return aes_set_key(tfm, in_key, key_len);
+ }
+
++static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
++{
++ unsigned int i, n;
++
++ /* only use complete blocks, max. PAGE_SIZE */
++ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
++ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
++ memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
++ AES_BLOCK_SIZE);
++ crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
++ }
++ return n;
++}
++
+ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+ {
+ int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+- unsigned int i, n, nbytes;
+- u8 buf[AES_BLOCK_SIZE];
+- u8 *out, *in;
++ unsigned int n, nbytes;
++ u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
++ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
+
+- memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
++ if (spin_trylock(&ctrblk_lock))
++ ctrptr = ctrblk;
++
++ memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ while (nbytes >= AES_BLOCK_SIZE) {
+- /* only use complete blocks, max. PAGE_SIZE */
+- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+- nbytes & ~(AES_BLOCK_SIZE - 1);
+- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+- memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
+- AES_BLOCK_SIZE);
+- crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
+- }
+- ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
+- if (ret < 0 || ret != n)
++ if (ctrptr == ctrblk)
++ n = __ctrblk_init(ctrptr, nbytes);
++ else
++ n = AES_BLOCK_SIZE;
++ ret = crypt_s390_kmctr(func, sctx->key, out, in,
++ n, ctrptr);
++ if (ret < 0 || ret != n) {
++ if (ctrptr == ctrblk)
++ spin_unlock(&ctrblk_lock);
+ return -EIO;
++ }
+ if (n > AES_BLOCK_SIZE)
+- memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
++ memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+- crypto_inc(ctrblk, AES_BLOCK_SIZE);
++ crypto_inc(ctrptr, AES_BLOCK_SIZE);
+ out += n;
+ in += n;
+ nbytes -= n;
+ }
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
++ if (ctrptr == ctrblk) {
++ if (nbytes)
++ memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
++ else
++ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
++ spin_unlock(&ctrblk_lock);
++ }
+ /*
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+@@ -800,14 +826,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ ret = crypt_s390_kmctr(func, sctx->key, buf, in,
+- AES_BLOCK_SIZE, ctrblk);
++ AES_BLOCK_SIZE, ctrbuf);
+ if (ret < 0 || ret != AES_BLOCK_SIZE)
+ return -EIO;
+ memcpy(out, buf, nbytes);
+- crypto_inc(ctrblk, AES_BLOCK_SIZE);
++ crypto_inc(ctrbuf, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
++ memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
+ }
+- memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
++
+ return ret;
+ }
+
+diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
+index bcca01c9989d..2d96e68febb2 100644
+--- a/arch/s390/crypto/des_s390.c
++++ b/arch/s390/crypto/des_s390.c
+@@ -25,6 +25,7 @@
+ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
+
+ static u8 *ctrblk;
++static DEFINE_SPINLOCK(ctrblk_lock);
+
+ struct s390_des_ctx {
+ u8 iv[DES_BLOCK_SIZE];
+@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
+ }
+
+ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
+- u8 *iv, struct blkcipher_walk *walk)
++ struct blkcipher_walk *walk)
+ {
++ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
++ struct {
++ u8 iv[DES_BLOCK_SIZE];
++ u8 key[DES3_KEY_SIZE];
++ } param;
+
+ if (!nbytes)
+ goto out;
+
+- memcpy(iv, walk->iv, DES_BLOCK_SIZE);
++ memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
++ memcpy(param.key, ctx->key, DES3_KEY_SIZE);
+ do {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+- ret = crypt_s390_kmc(func, iv, out, in, n);
++ ret = crypt_s390_kmc(func, &param, out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
+
+ nbytes &= DES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+- memcpy(walk->iv, iv, DES_BLOCK_SIZE);
++ memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
+
+ out:
+ return ret;
+@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
++ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
+ }
+
+ static int cbc_des_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
++ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
+ }
+
+ static struct crypto_alg cbc_des_alg = {
+@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
++ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
+ }
+
+ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
++ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
+ }
+
+ static struct crypto_alg cbc_des3_alg = {
+@@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
+ }
+ };
+
++static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
++{
++ unsigned int i, n;
++
++ /* align to block size, max. PAGE_SIZE */
++ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
++ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
++ memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
++ crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
++ }
++ return n;
++}
++
+ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
+- struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
++ struct s390_des_ctx *ctx,
++ struct blkcipher_walk *walk)
+ {
+ int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+- unsigned int i, n, nbytes;
+- u8 buf[DES_BLOCK_SIZE];
+- u8 *out, *in;
++ unsigned int n, nbytes;
++ u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
++ u8 *out, *in, *ctrptr = ctrbuf;
++
++ if (!walk->nbytes)
++ return ret;
+
+- memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
++ if (spin_trylock(&ctrblk_lock))
++ ctrptr = ctrblk;
++
++ memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ while (nbytes >= DES_BLOCK_SIZE) {
+- /* align to block size, max. PAGE_SIZE */
+- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+- nbytes & ~(DES_BLOCK_SIZE - 1);
+- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+- memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
+- DES_BLOCK_SIZE);
+- crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
+- }
+- ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
+- if (ret < 0 || ret != n)
++ if (ctrptr == ctrblk)
++ n = __ctrblk_init(ctrptr, nbytes);
++ else
++ n = DES_BLOCK_SIZE;
++ ret = crypt_s390_kmctr(func, ctx->key, out, in,
++ n, ctrptr);
++ if (ret < 0 || ret != n) {
++ if (ctrptr == ctrblk)
++ spin_unlock(&ctrblk_lock);
+ return -EIO;
++ }
+ if (n > DES_BLOCK_SIZE)
+- memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
++ memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
+ DES_BLOCK_SIZE);
+- crypto_inc(ctrblk, DES_BLOCK_SIZE);
++ crypto_inc(ctrptr, DES_BLOCK_SIZE);
+ out += n;
+ in += n;
+ nbytes -= n;
+ }
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
+-
++ if (ctrptr == ctrblk) {
++ if (nbytes)
++ memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
++ else
++ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
++ spin_unlock(&ctrblk_lock);
++ }
+ /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
+ if (nbytes) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ ret = crypt_s390_kmctr(func, ctx->key, buf, in,
+- DES_BLOCK_SIZE, ctrblk);
++ DES_BLOCK_SIZE, ctrbuf);
+ if (ret < 0 || ret != DES_BLOCK_SIZE)
+ return -EIO;
+ memcpy(out, buf, nbytes);
+- crypto_inc(ctrblk, DES_BLOCK_SIZE);
++ crypto_inc(ctrbuf, DES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
++ memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
+ }
+- memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
+ return ret;
+ }
+
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index a51efc90b534..87c0be59970a 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -628,7 +628,7 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
+ tlb_flushall_shift = 5;
+ break;
+ case 0x63a: /* Ivybridge */
+- tlb_flushall_shift = 1;
++ tlb_flushall_shift = 2;
+ break;
+ default:
+ tlb_flushall_shift = 6;
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index bb328a366122..a51ee009ed83 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -229,7 +229,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & IPI_DOORBELL_MASK;
+
+- writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
++ writel(~ipimask, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ /* Handle all pending doorbells */
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 8f9b2cea88f0..8ede8ea762e6 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = {
+ &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
+ { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
+ &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
++ { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
++ &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(usb, af9035_id_table);
+diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+index 90f583e5d6a6..a8f65d88c9e7 100644
+--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
++++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+ #else
+ static inline
+ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+- struct mxl111sf_state *mxl_state
++ struct mxl111sf_state *mxl_state,
+ struct mxl111sf_tuner_config *cfg)
+ {
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+index 2627553f7de1..3b948e3d4583 100644
+--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
++++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
+ ret = -EINVAL;
+ }
+
+- pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
++ pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
+ fail:
+ return ret;
+ }
+diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
+index 65411adcd0ea..7e6b209b7002 100644
+--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
++++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
+@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
+ static void videobuf_vm_open(struct vm_area_struct *vma)
+ {
+ struct videobuf_mapping *map = vma->vm_private_data;
+- struct videobuf_queue *q = map->q;
+
+- dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
++ dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ map, map->count, vma->vm_start, vma->vm_end);
+
+- videobuf_queue_lock(q);
+ map->count++;
+- videobuf_queue_unlock(q);
+ }
+
+ static void videobuf_vm_close(struct vm_area_struct *vma)
+@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+ map, map->count, vma->vm_start, vma->vm_end);
+
+- videobuf_queue_lock(q);
+- if (!--map->count) {
++ map->count--;
++ if (0 == map->count) {
+ struct videobuf_dma_contig_memory *mem;
+
+ dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
++ videobuf_queue_lock(q);
+
+ /* We need first to cancel streams, before unmapping */
+ if (q->streaming)
+@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+
+ kfree(map);
+
++ videobuf_queue_unlock(q);
+ }
+- videobuf_queue_unlock(q);
+ }
+
+ static const struct vm_operations_struct videobuf_vm_ops = {
+diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
+index 9db674ccdc68..828e7c10bd70 100644
+--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
++++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
+@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
+ static void videobuf_vm_open(struct vm_area_struct *vma)
+ {
+ struct videobuf_mapping *map = vma->vm_private_data;
+- struct videobuf_queue *q = map->q;
+
+ dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+- videobuf_queue_lock(q);
+ map->count++;
+- videobuf_queue_unlock(q);
+ }
+
+ static void videobuf_vm_close(struct vm_area_struct *vma)
+@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+- videobuf_queue_lock(q);
+- if (!--map->count) {
++ map->count--;
++ if (0 == map->count) {
+ dprintk(1, "munmap %p q=%p\n", map, q);
++ videobuf_queue_lock(q);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ q->bufs[i]->baddr = 0;
+ q->ops->buf_release(q, q->bufs[i]);
+ }
++ videobuf_queue_unlock(q);
+ kfree(map);
+ }
+- videobuf_queue_unlock(q);
+ return;
+ }
+
+diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
+index 1365c651c177..2ff7fcc77b11 100644
+--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
++++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
+@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
+ static void videobuf_vm_open(struct vm_area_struct *vma)
+ {
+ struct videobuf_mapping *map = vma->vm_private_data;
+- struct videobuf_queue *q = map->q;
+
+ dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+- videobuf_queue_lock(q);
+ map->count++;
+- videobuf_queue_unlock(q);
+ }
+
+ static void videobuf_vm_close(struct vm_area_struct *vma)
+@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+- videobuf_queue_lock(q);
+- if (!--map->count) {
++ map->count--;
++ if (0 == map->count) {
+ struct videobuf_vmalloc_memory *mem;
+
+ dprintk(1, "munmap %p q=%p\n", map, q);
++ videobuf_queue_lock(q);
+
+ /* We need first to cancel streams, before unmapping */
+ if (q->streaming)
+@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+
+ kfree(map);
+
++ videobuf_queue_unlock(q);
+ }
+- videobuf_queue_unlock(q);
+
+ return;
+ }
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 92f86ab30a13..a1ffae4c3770 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -837,7 +837,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
+ kref_init(&p->users);
+
+ /* Add the pinctrl handle to the global list */
++ mutex_lock(&pinctrl_list_mutex);
+ list_add_tail(&p->node, &pinctrl_list);
++ mutex_unlock(&pinctrl_list_mutex);
+
+ return p;
+ }
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index f350fd2e170e..f4e99a9491cc 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -1251,22 +1251,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+- irq_set_handler(d->irq, handle_simple_irq);
++ __irq_set_handler_locked(d->irq, handle_simple_irq);
+ writel_relaxed(mask, pio + PIO_ESR);
+ writel_relaxed(mask, pio + PIO_REHLSR);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+- irq_set_handler(d->irq, handle_simple_irq);
++ __irq_set_handler_locked(d->irq, handle_simple_irq);
+ writel_relaxed(mask, pio + PIO_ESR);
+ writel_relaxed(mask, pio + PIO_FELLSR);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+- irq_set_handler(d->irq, handle_level_irq);
++ __irq_set_handler_locked(d->irq, handle_level_irq);
+ writel_relaxed(mask, pio + PIO_LSR);
+ writel_relaxed(mask, pio + PIO_FELLSR);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+- irq_set_handler(d->irq, handle_level_irq);
++ __irq_set_handler_locked(d->irq, handle_level_irq);
+ writel_relaxed(mask, pio + PIO_LSR);
+ writel_relaxed(mask, pio + PIO_REHLSR);
+ break;
+@@ -1275,7 +1275,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
+ * disable additional interrupt modes:
+ * fall back to default behavior
+ */
+- irq_set_handler(d->irq, handle_simple_irq);
++ __irq_set_handler_locked(d->irq, handle_simple_irq);
+ writel_relaxed(mask, pio + PIO_AIMDR);
+ return 0;
+ case IRQ_TYPE_NONE:
+diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
+index 39aec0850810..25ab2eec92e4 100644
+--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
++++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
+@@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
+ if (!configs)
+ return -ENOMEM;
+
+- configs[0] = pull;
++ switch (pull) {
++ case 0:
++ configs[0] = PIN_CONFIG_BIAS_DISABLE;
++ break;
++ case 1:
++ configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
++ break;
++ case 2:
++ configs[0] = PIN_CONFIG_BIAS_PULL_UP;
++ break;
++ default:
++ configs[0] = PIN_CONFIG_BIAS_DISABLE;
++ dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
++ }
+
+ map->type = PIN_MAP_TYPE_CONFIGS_PIN;
+ map->data.configs.group_or_pin = data->groups[group];
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8b8eff051493..1b63d29e44b7 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2613,7 +2613,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
+ EXTENT_DEFRAG, 1, cached_state);
+ if (ret) {
+ u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
+- if (last_snapshot >= BTRFS_I(inode)->generation)
++ if (0 && last_snapshot >= BTRFS_I(inode)->generation)
+ /* the inode is shared */
+ new = record_old_file_extents(inode, ordered_extent);
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 6024877335ca..aeeea6529bcd 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -654,14 +654,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
+ static void __set_page_dirty(struct page *page,
+ struct address_space *mapping, int warn)
+ {
+- spin_lock_irq(&mapping->tree_lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&mapping->tree_lock, flags);
+ if (page->mapping) { /* Race with truncate? */
+ WARN_ON_ONCE(warn && !PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ }
+- spin_unlock_irq(&mapping->tree_lock);
++ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ }
+
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 8a572ddde55b..55ebebec4d3b 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -169,7 +169,7 @@ void nfs41_shutdown_client(struct nfs_client *clp)
+ void nfs40_shutdown_client(struct nfs_client *clp)
+ {
+ if (clp->cl_slot_tbl) {
+- nfs4_release_slot_table(clp->cl_slot_tbl);
++ nfs4_shutdown_slot_table(clp->cl_slot_tbl);
+ kfree(clp->cl_slot_tbl);
+ }
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 29c5a2c08f02..d3d7766f55e3 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1611,15 +1611,15 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
+ {
+ struct nfs4_opendata *data = calldata;
+
+- nfs40_setup_sequence(data->o_arg.server, &data->o_arg.seq_args,
+- &data->o_res.seq_res, task);
++ nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
++ &data->c_res.seq_res, task);
+ }
+
+ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
+ {
+ struct nfs4_opendata *data = calldata;
+
+- nfs40_sequence_done(task, &data->o_res.seq_res);
++ nfs40_sequence_done(task, &data->c_res.seq_res);
+
+ data->rpc_status = task->tk_status;
+ if (data->rpc_status == 0) {
+@@ -1677,7 +1677,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
+ };
+ int status;
+
+- nfs4_init_sequence(&data->o_arg.seq_args, &data->o_res.seq_res, 1);
++ nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
+ kref_get(&data->kref);
+ data->rpc_done = 0;
+ data->rpc_status = 0;
+diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
+index cf883c7ae053..e799dc3c3b1d 100644
+--- a/fs/nfs/nfs4session.c
++++ b/fs/nfs/nfs4session.c
+@@ -231,14 +231,23 @@ out:
+ return ret;
+ }
+
++/*
++ * nfs4_release_slot_table - release all slot table entries
++ */
++static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
++{
++ nfs4_shrink_slot_table(tbl, 0);
++}
++
+ /**
+- * nfs4_release_slot_table - release resources attached to a slot table
++ * nfs4_shutdown_slot_table - release resources attached to a slot table
+ * @tbl: slot table to shut down
+ *
+ */
+-void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
++void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
+ {
+- nfs4_shrink_slot_table(tbl, 0);
++ nfs4_release_slot_table(tbl);
++ rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
+ }
+
+ /**
+@@ -422,7 +431,7 @@ void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
+ spin_unlock(&tbl->slot_tbl_lock);
+ }
+
+-static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
++static void nfs4_release_session_slot_tables(struct nfs4_session *session)
+ {
+ nfs4_release_slot_table(&session->fc_slot_table);
+ nfs4_release_slot_table(&session->bc_slot_table);
+@@ -450,7 +459,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
+ if (status && tbl->slots == NULL)
+ /* Fore and back channel share a connection so get
+ * both slot tables or neither */
+- nfs4_destroy_session_slot_tables(ses);
++ nfs4_release_session_slot_tables(ses);
+ return status;
+ }
+
+@@ -470,6 +479,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
+ return session;
+ }
+
++static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
++{
++ nfs4_shutdown_slot_table(&session->fc_slot_table);
++ nfs4_shutdown_slot_table(&session->bc_slot_table);
++}
++
+ void nfs4_destroy_session(struct nfs4_session *session)
+ {
+ struct rpc_xprt *xprt;
+diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
+index 232306100651..b34ada9bc6a2 100644
+--- a/fs/nfs/nfs4session.h
++++ b/fs/nfs/nfs4session.h
+@@ -74,7 +74,7 @@ enum nfs4_session_state {
+
+ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
+ unsigned int max_reqs, const char *queue);
+-extern void nfs4_release_slot_table(struct nfs4_slot_table *tbl);
++extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
+ extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
+ extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
+ extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 49f52c8f4422..ea9e076a91bf 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -379,12 +379,14 @@ struct nfs_openres {
+ * Arguments to the open_confirm call.
+ */
+ struct nfs_open_confirmargs {
++ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ nfs4_stateid * stateid;
+ struct nfs_seqid * seqid;
+ };
+
+ struct nfs_open_confirmres {
++ struct nfs4_sequence_res seq_res;
+ nfs4_stateid stateid;
+ struct nfs_seqid * seqid;
+ };
+diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
+index 4a1fef09f658..07cbdfea9ae2 100644
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER
+ # Generic configurable interrupt chip implementation
+ config GENERIC_IRQ_CHIP
+ bool
++ select IRQ_DOMAIN
+
+ # Generic irq_domain hw <--> linux irq number translation
+ config IRQ_DOMAIN
+diff --git a/lib/Makefile b/lib/Makefile
+index f3bb2cb98adf..6af6fbb053e3 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -47,6 +47,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+ lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
+
++GCOV_PROFILE_hweight.o := n
+ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 2d30e2cfe804..7106cb1aca8e 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -2173,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page)
+ if (!TestSetPageDirty(page)) {
+ struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping2;
++ unsigned long flags;
+
+ if (!mapping)
+ return 1;
+
+- spin_lock_irq(&mapping->tree_lock);
++ spin_lock_irqsave(&mapping->tree_lock, flags);
+ mapping2 = page_mapping(page);
+ if (mapping2) { /* Race with truncate? */
+ BUG_ON(mapping2 != mapping);
+@@ -2186,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page)
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ }
+- spin_unlock_irq(&mapping->tree_lock);
++ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ if (mapping->host) {
+ /* !PageAnon && !swapper_space */
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index de7c904e52e5..0ec2eaf3ccfd 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1922,7 +1922,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ p->swap_map = NULL;
+ cluster_info = p->cluster_info;
+ p->cluster_info = NULL;
+- p->flags = 0;
+ frontswap_map = frontswap_map_get(p);
+ frontswap_map_set(p, NULL);
+ spin_unlock(&p->lock);
+@@ -1948,6 +1947,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ mutex_unlock(&inode->i_mutex);
+ }
+ filp_close(swap_file, NULL);
++
++ /*
++ * Clear the SWP_USED flag after all resources are freed so that swapon
++ * can reuse this swap_info in alloc_swap_info() safely. It is ok to
++ * not hold p->lock after we cleared its SWP_WRITEOK.
++ */
++ spin_lock(&swap_lock);
++ p->flags = 0;
++ spin_unlock(&swap_lock);
++
+ err = 0;
+ atomic_inc(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index b4feecc3fe01..18caa16de27b 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1231,6 +1231,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
+ struct context context;
+ int rc = 0;
+
++ /* An empty security context is never valid. */
++ if (!scontext_len)
++ return -EINVAL;
++
+ if (!ss_initialized) {
+ int i;
+
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 31230c68b603..7fc15814c618 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -227,6 +227,19 @@ static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
+ }
+ }
+
++/* Toshiba Satellite L40 implements EAPD in a standard way unlike others */
++static void ad1986a_fixup_eapd(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ struct ad198x_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ codec->inv_eapd = 0;
++ spec->gen.keep_eapd_on = 1;
++ spec->eapd_nid = 0x1b;
++ }
++}
++
+ enum {
+ AD1986A_FIXUP_INV_JACK_DETECT,
+ AD1986A_FIXUP_ULTRA,
+@@ -234,6 +247,7 @@ enum {
+ AD1986A_FIXUP_3STACK,
+ AD1986A_FIXUP_LAPTOP,
+ AD1986A_FIXUP_LAPTOP_IMIC,
++ AD1986A_FIXUP_EAPD,
+ };
+
+ static const struct hda_fixup ad1986a_fixups[] = {
+@@ -294,6 +308,10 @@ static const struct hda_fixup ad1986a_fixups[] = {
+ .chained_before = 1,
+ .chain_id = AD1986A_FIXUP_LAPTOP,
+ },
++ [AD1986A_FIXUP_EAPD] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = ad1986a_fixup_eapd,
++ },
+ };
+
+ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+@@ -301,6 +319,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+ SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
+ SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
+ SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
++ SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40", AD1986A_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
+ SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
+ SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
+@@ -455,6 +474,8 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec)
+ static int patch_ad1983(struct hda_codec *codec)
+ {
+ struct ad198x_spec *spec;
++ static hda_nid_t conn_0c[] = { 0x08 };
++ static hda_nid_t conn_0d[] = { 0x09 };
+ int err;
+
+ err = alloc_ad_spec(codec);
+@@ -462,8 +483,14 @@ static int patch_ad1983(struct hda_codec *codec)
+ return err;
+ spec = codec->spec;
+
++ spec->gen.mixer_nid = 0x0e;
+ spec->gen.beep_nid = 0x10;
+ set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
++
++ /* limit the loopback routes not to confuse the parser */
++ snd_hda_override_conn_list(codec, 0x0c, ARRAY_SIZE(conn_0c), conn_0c);
++ snd_hda_override_conn_list(codec, 0x0d, ARRAY_SIZE(conn_0d), conn_0d);
++
+ err = ad198x_parse_auto_config(codec, false);
+ if (err < 0)
+ goto error;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 56b62555eef4..6195a4f8d9b8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1771,6 +1771,7 @@ enum {
+ ALC889_FIXUP_IMAC91_VREF,
+ ALC889_FIXUP_MBA11_VREF,
+ ALC889_FIXUP_MBA21_VREF,
++ ALC889_FIXUP_MP11_VREF,
+ ALC882_FIXUP_INV_DMIC,
+ ALC882_FIXUP_NO_PRIMARY_HP,
+ ALC887_FIXUP_ASUS_BASS,
+@@ -2127,6 +2128,12 @@ static const struct hda_fixup alc882_fixups[] = {
+ .chained = true,
+ .chain_id = ALC889_FIXUP_MBP_VREF,
+ },
++ [ALC889_FIXUP_MP11_VREF] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc889_fixup_mba11_vref,
++ .chained = true,
++ .chain_id = ALC885_FIXUP_MACPRO_GPIO,
++ },
+ [ALC882_FIXUP_INV_DMIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+@@ -2184,7 +2191,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
+- SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO),
++ SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF),
+ SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
+ SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
+ SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
+diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
+index de9408b83f75..e05a86b7c0da 100644
+--- a/sound/usb/Kconfig
++++ b/sound/usb/Kconfig
+@@ -14,6 +14,7 @@ config SND_USB_AUDIO
+ select SND_HWDEP
+ select SND_RAWMIDI
+ select SND_PCM
++ select BITREVERSE
+ help
+ Say Y here to include support for USB audio and USB MIDI
+ devices.
diff --git a/1012_linux-3.12.13.patch b/1012_linux-3.12.13.patch
new file mode 100644
index 00000000..1357575e
--- /dev/null
+++ b/1012_linux-3.12.13.patch
@@ -0,0 +1,2461 @@
+diff --git a/Makefile b/Makefile
+index 563297e159b7..0a89e7d84a2d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
+index b9e25ae2579c..d7c00507568a 100644
+--- a/arch/s390/kernel/head64.S
++++ b/arch/s390/kernel/head64.S
+@@ -59,7 +59,7 @@ ENTRY(startup_continue)
+ .quad 0 # cr12: tracing off
+ .quad 0 # cr13: home space segment table
+ .quad 0xc0000000 # cr14: machine check handling off
+- .quad 0 # cr15: linkage stack operations
++ .quad .Llinkage_stack # cr15: linkage stack operations
+ .Lpcmsk:.quad 0x0000000180000000
+ .L4malign:.quad 0xffffffffffc00000
+ .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
+@@ -67,12 +67,15 @@ ENTRY(startup_continue)
+ .Lparmaddr:
+ .quad PARMAREA
+ .align 64
+-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
++.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
+ .long 0,0,0,0,0,0,0,0
++.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
+ .align 128
+ .Lduald:.rept 8
+ .long 0x80000000,0,0,0 # invalid access-list entries
+ .endr
++.Llinkage_stack:
++ .long 0,0,0x89000000,0,0,0,0x8a000000,0
+
+ ENTRY(_ehead)
+
+diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
+index a90d45e9dfb0..27c50f4d90cb 100644
+--- a/arch/s390/mm/page-states.c
++++ b/arch/s390/mm/page-states.c
+@@ -12,6 +12,8 @@
+ #include <linux/mm.h>
+ #include <linux/gfp.h>
+ #include <linux/init.h>
++#include <asm/setup.h>
++#include <asm/ipl.h>
+
+ #define ESSA_SET_STABLE 1
+ #define ESSA_SET_UNUSED 2
+@@ -41,6 +43,14 @@ void __init cmma_init(void)
+
+ if (!cmma_flag)
+ return;
++ /*
++ * Disable CMM for dump, otherwise the tprot based memory
++ * detection can fail because of unstable pages.
++ */
++ if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
++ cmma_flag = 0;
++ return;
++ }
+ asm volatile(
+ " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
+ "0: la %0,0\n"
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index bbc8b12fa443..5ad38ad07890 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -445,10 +445,20 @@ static inline int pte_same(pte_t a, pte_t b)
+ return a.pte == b.pte;
+ }
+
++static inline int pteval_present(pteval_t pteval)
++{
++ /*
++ * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
++ * way clearly states that the intent is that protnone and numa
++ * hinting ptes are considered present for the purposes of
++ * pagetable operations like zapping, protection changes, gup etc.
++ */
++ return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
++}
++
+ static inline int pte_present(pte_t a)
+ {
+- return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+- _PAGE_NUMA);
++ return pteval_present(pte_flags(a));
+ }
+
+ #define pte_accessible pte_accessible
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 2793d1f095a2..3533e2c082a3 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+ raw_local_save_flags(eflags);
+ BUG_ON(eflags & X86_EFLAGS_AC);
+
+- if (cpu_has(c, X86_FEATURE_SMAP))
++ if (cpu_has(c, X86_FEATURE_SMAP)) {
++#ifdef CONFIG_X86_SMAP
+ set_in_cr4(X86_CR4_SMAP);
++#else
++ clear_in_cr4(X86_CR4_SMAP);
++#endif
++ }
+ }
+
+ /*
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index d4bdd253fea7..e6253195a301 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
+ return addr >= start && addr < end;
+ }
+
+-static int
+-do_ftrace_mod_code(unsigned long ip, const void *new_code)
++static unsigned long text_ip_addr(unsigned long ip)
+ {
+ /*
+ * On x86_64, kernel text mappings are mapped read-only with
+@@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
+ if (within(ip, (unsigned long)_text, (unsigned long)_etext))
+ ip = (unsigned long)__va(__pa_symbol(ip));
+
+- return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
++ return ip;
+ }
+
+ static const unsigned char *ftrace_nop_replace(void)
+@@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
+ if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+ return -EINVAL;
+
++ ip = text_ip_addr(ip);
++
+ /* replace the text with the new text */
+- if (do_ftrace_mod_code(ip, new_code))
++ if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
+ return -EPERM;
+
+ sync_core();
+@@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ return -EINVAL;
+ }
+
+-int ftrace_update_ftrace_func(ftrace_func_t func)
++static unsigned long ftrace_update_func;
++
++static int update_ftrace_func(unsigned long ip, void *new)
+ {
+- unsigned long ip = (unsigned long)(&ftrace_call);
+- unsigned char old[MCOUNT_INSN_SIZE], *new;
++ unsigned char old[MCOUNT_INSN_SIZE];
+ int ret;
+
+- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+- new = ftrace_call_replace(ip, (unsigned long)func);
++ memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
++
++ ftrace_update_func = ip;
++ /* Make sure the breakpoints see the ftrace_update_func update */
++ smp_wmb();
+
+ /* See comment above by declaration of modifying_ftrace_code */
+ atomic_inc(&modifying_ftrace_code);
+
+ ret = ftrace_modify_code(ip, old, new);
+
++ atomic_dec(&modifying_ftrace_code);
++
++ return ret;
++}
++
++int ftrace_update_ftrace_func(ftrace_func_t func)
++{
++ unsigned long ip = (unsigned long)(&ftrace_call);
++ unsigned char *new;
++ int ret;
++
++ new = ftrace_call_replace(ip, (unsigned long)func);
++ ret = update_ftrace_func(ip, new);
++
+ /* Also update the regs callback function */
+ if (!ret) {
+ ip = (unsigned long)(&ftrace_regs_call);
+- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+- ret = ftrace_modify_code(ip, old, new);
++ ret = update_ftrace_func(ip, new);
+ }
+
+- atomic_dec(&modifying_ftrace_code);
+-
+ return ret;
+ }
+
+ static int is_ftrace_caller(unsigned long ip)
+ {
+- if (ip == (unsigned long)(&ftrace_call) ||
+- ip == (unsigned long)(&ftrace_regs_call))
++ if (ip == ftrace_update_func)
+ return 1;
+
+ return 0;
+@@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data)
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ extern void ftrace_graph_call(void);
+
+-static int ftrace_mod_jmp(unsigned long ip,
+- int old_offset, int new_offset)
++static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+ {
+- unsigned char code[MCOUNT_INSN_SIZE];
++ static union ftrace_code_union calc;
+
+- if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+- return -EFAULT;
++ /* Jmp not a call (ignore the .e8) */
++ calc.e8 = 0xe9;
++ calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
+
+- if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
+- return -EINVAL;
++ /*
++ * ftrace external locks synchronize the access to the static variable.
++ */
++ return calc.code;
++}
+
+- *(int *)(&code[1]) = new_offset;
++static int ftrace_mod_jmp(unsigned long ip, void *func)
++{
++ unsigned char *new;
+
+- if (do_ftrace_mod_code(ip, &code))
+- return -EPERM;
++ new = ftrace_jmp_replace(ip, (unsigned long)func);
+
+- return 0;
++ return update_ftrace_func(ip, new);
+ }
+
+ int ftrace_enable_ftrace_graph_caller(void)
+ {
+ unsigned long ip = (unsigned long)(&ftrace_graph_call);
+- int old_offset, new_offset;
+
+- old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
+- new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
+-
+- return ftrace_mod_jmp(ip, old_offset, new_offset);
++ return ftrace_mod_jmp(ip, &ftrace_graph_caller);
+ }
+
+ int ftrace_disable_ftrace_graph_caller(void)
+ {
+ unsigned long ip = (unsigned long)(&ftrace_graph_call);
+- int old_offset, new_offset;
+-
+- old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
+- new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
+
+- return ftrace_mod_jmp(ip, old_offset, new_offset);
++ return ftrace_mod_jmp(ip, &ftrace_stub);
+ }
+
+ #endif /* !CONFIG_DYNAMIC_FTRACE */
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 3aaeffcfd67a..d8b1ff68dbb9 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -980,6 +980,12 @@ static int fault_in_kernel_space(unsigned long address)
+
+ static inline bool smap_violation(int error_code, struct pt_regs *regs)
+ {
++ if (!IS_ENABLED(CONFIG_X86_SMAP))
++ return false;
++
++ if (!static_cpu_has(X86_FEATURE_SMAP))
++ return false;
++
+ if (error_code & PF_USER)
+ return false;
+
+@@ -1081,11 +1087,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+ if (unlikely(error_code & PF_RSVD))
+ pgtable_bad(regs, error_code, address);
+
+- if (static_cpu_has(X86_FEATURE_SMAP)) {
+- if (unlikely(smap_violation(error_code, regs))) {
+- bad_area_nosemaphore(regs, error_code, address);
+- return;
+- }
++ if (unlikely(smap_violation(error_code, regs))) {
++ bad_area_nosemaphore(regs, error_code, address);
++ return;
+ }
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index fdc3ba28ca38..d5af43af64dc 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+ /* Assume pteval_t is equivalent to all the other *val_t types. */
+ static pteval_t pte_mfn_to_pfn(pteval_t val)
+ {
+- if (val & _PAGE_PRESENT) {
++ if (pteval_present(val)) {
+ unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+
+@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
+
+ static pteval_t pte_pfn_to_mfn(pteval_t val)
+ {
+- if (val & _PAGE_PRESENT) {
++ if (pteval_present(val)) {
+ unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+ pteval_t flags = val & PTE_FLAGS_MASK;
+ unsigned long mfn;
+diff --git a/block/blk-lib.c b/block/blk-lib.c
+index d6f50d572565..9a32f5868fb9 100644
+--- a/block/blk-lib.c
++++ b/block/blk-lib.c
+@@ -121,6 +121,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+
+ atomic_inc(&bb.done);
+ submit_bio(type, bio);
++
++ /*
++ * We can loop for a long time in here, if someone does
++ * full device discards (like mkfs). Be nice and allow
++ * us to schedule out to avoid softlocking if preempt
++ * is disabled.
++ */
++ cond_resched();
+ }
+ blk_finish_plug(&plug);
+
+diff --git a/block/blk.h b/block/blk.h
+index e837b8f619b7..b3bdeb36f361 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
+ q->flush_queue_delayed = 1;
+ return NULL;
+ }
+- if (unlikely(blk_queue_dying(q)) ||
++ if (unlikely(blk_queue_bypass(q)) ||
+ !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index ed88b3c2e8ea..e85bc358e052 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1852,13 +1852,16 @@ static void blkback_changed(struct xenbus_device *dev,
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+ case XenbusStateUnknown:
+- case XenbusStateClosed:
+ break;
+
+ case XenbusStateConnected:
+ blkfront_connect(info);
+ break;
+
++ case XenbusStateClosed:
++ if (dev->state == XenbusStateClosed)
++ break;
++ /* Missed the backend's Closing state -- fallthrough */
+ case XenbusStateClosing:
+ blkfront_closing(info);
+ break;
+diff --git a/drivers/char/raw.c b/drivers/char/raw.c
+index f3223aac4df1..6e8d65e9b1d3 100644
+--- a/drivers/char/raw.c
++++ b/drivers/char/raw.c
+@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
+ struct raw_device_data *rawdev;
+ struct block_device *bdev;
+
+- if (number <= 0 || number >= MAX_RAW_MINORS)
++ if (number <= 0 || number >= max_raw_minors)
+ return -EINVAL;
+
+ rawdev = &raw_devices[number];
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index 89e109022d78..a9d98cdd11f4 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
+ *
+ * called with the mem_ctls_mutex held
+ */
+-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
++static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
++ bool init)
+ {
+ edac_dbg(0, "\n");
+
+@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+ if (mci->op_state != OP_RUNNING_POLL)
+ return;
+
+- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
++ if (init)
++ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
++
+ mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+ }
+
+@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+ * user space has updated our poll period value, need to
+ * reset our workq delays
+ */
+-void edac_mc_reset_delay_period(int value)
++void edac_mc_reset_delay_period(unsigned long value)
+ {
+ struct mem_ctl_info *mci;
+ struct list_head *item;
+@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value)
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+- edac_mc_workq_setup(mci, (unsigned long) value);
++ edac_mc_workq_setup(mci, value, false);
+ }
+
+ mutex_unlock(&mem_ctls_mutex);
+@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
+ /* This instance is NOW RUNNING */
+ mci->op_state = OP_RUNNING_POLL;
+
+- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
++ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
+ } else {
+ mci->op_state = OP_RUNNING_INTERRUPT;
+ }
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 9f7e0e609516..e5bdf216effe 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void)
+
+ static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
+ {
+- long l;
++ unsigned long l;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+- ret = kstrtol(val, 0, &l);
++ ret = kstrtoul(val, 0, &l);
+ if (ret)
+ return ret;
+- if ((int)l != l)
++
++ if (l < 1000)
+ return -EINVAL;
+- *((int *)kp->arg) = l;
++
++ *((unsigned long *)kp->arg) = l;
+
+ /* notify edac_mc engine to reset the poll period */
+ edac_mc_reset_delay_period(l);
+diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
+index 3d139c6e7fe3..f2118bfcf8df 100644
+--- a/drivers/edac/edac_module.h
++++ b/drivers/edac/edac_module.h
+@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
+ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
+ *edac_dev, unsigned long value);
+-extern void edac_mc_reset_delay_period(int value);
++extern void edac_mc_reset_delay_period(unsigned long value);
+
+ extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
+
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
+index 354e3e32b30e..eb7af5b430aa 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.c
++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
+@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+ va_list tmp;
+
+ va_copy(tmp, args);
+- if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
++ len = vsnprintf(NULL, 0, f, tmp);
++ va_end(tmp);
++
++ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 2acbf89cdfd3..67da7e285cde 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3854,6 +3854,10 @@ restart_ih:
+ break;
+ }
+ break;
++ case 124: /* UVD */
++ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
++ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
++ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 3f39f15d48a6..8277ee01a7b4 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6281,6 +6281,10 @@ restart_ih:
+ break;
+ }
+ break;
++ case 124: /* UVD */
++ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
++ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
++ break;
+ case 146:
+ case 147:
+ addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 936093e0271e..13c23a4789de 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+ int ret = 0;
+ struct vmbus_channel_initiate_contact *msg;
+ unsigned long flags;
+- int t;
+
+ init_completion(&msginfo->waitevent);
+
+@@ -102,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+ }
+
+ /* Wait for the connection response */
+- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+- if (t == 0) {
+- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
+- flags);
+- list_del(&msginfo->msglistentry);
+- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+- flags);
+- return -ETIMEDOUT;
+- }
++ wait_for_completion(&msginfo->waitevent);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
+index 8c23203915af..8a17f01e8672 100644
+--- a/drivers/hwmon/ntc_thermistor.c
++++ b/drivers/hwmon/ntc_thermistor.c
+@@ -145,7 +145,7 @@ struct ntc_data {
+ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
+ {
+ struct iio_channel *channel = pdata->chan;
+- unsigned int result;
++ s64 result;
+ int val, ret;
+
+ ret = iio_read_channel_raw(channel, &val);
+@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
+ }
+
+ /* unit: mV */
+- result = pdata->pullup_uv * val;
++ result = pdata->pullup_uv * (s64) val;
+ result >>= 12;
+
+- return result;
++ return (int)result;
+ }
+
+ static const struct of_device_id ntc_match[] = {
+diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
+index 6f02eb883e5e..564c1c076ea2 100644
+--- a/drivers/i2c/busses/i2c-mv64xxx.c
++++ b/drivers/i2c/busses/i2c-mv64xxx.c
+@@ -97,7 +97,6 @@ enum {
+ enum {
+ MV64XXX_I2C_ACTION_INVALID,
+ MV64XXX_I2C_ACTION_CONTINUE,
+- MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
+ MV64XXX_I2C_ACTION_SEND_START,
+ MV64XXX_I2C_ACTION_SEND_RESTART,
+ MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
+@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
+ unsigned long ctrl_reg;
+ struct i2c_msg *msg = drv_data->msgs;
+
++ if (!drv_data->offload_enabled)
++ return -EOPNOTSUPP;
++
+ drv_data->msg = msg;
+ drv_data->byte_posn = 0;
+ drv_data->bytes_left = msg->len;
+@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
+
+ drv_data->msgs++;
+ drv_data->num_msgs--;
+- if (!(drv_data->offload_enabled &&
+- mv64xxx_i2c_offload_msg(drv_data))) {
++ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+ drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
+ writel(drv_data->cntl_bits,
+ drv_data->reg_base + drv_data->reg_offsets.control);
+@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
+ drv_data->reg_base + drv_data->reg_offsets.control);
+ break;
+
+- case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
+- if (!mv64xxx_i2c_offload_msg(drv_data))
+- break;
+- else
+- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+- /* FALLTHRU */
+ case MV64XXX_I2C_ACTION_SEND_START:
+- writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+- drv_data->reg_base + drv_data->reg_offsets.control);
++ /* Can we offload this msg ? */
++ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
++ /* No, switch to standard path */
++ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
++ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
++ drv_data->reg_base + drv_data->reg_offsets.control);
++ }
+ break;
+
+ case MV64XXX_I2C_ACTION_SEND_ADDR_1:
+@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+- if (drv_data->offload_enabled) {
+- drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
+- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+- } else {
+- mv64xxx_i2c_prepare_for_io(drv_data, msg);
+
+- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+- }
++ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
++ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
++
+ drv_data->send_stop = is_last;
+ drv_data->block = 1;
+ mv64xxx_i2c_do_action(drv_data);
+diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
+index 4fb35d1d7494..cfb3d39b6664 100644
+--- a/drivers/iio/adc/max1363.c
++++ b/drivers/iio/adc/max1363.c
+@@ -1527,7 +1527,7 @@ static int max1363_probe(struct i2c_client *client,
+ st->client = client;
+
+ st->vref_uv = st->chip_info->int_vref_mv * 1000;
+- vref = devm_regulator_get(&client->dev, "vref");
++ vref = devm_regulator_get_optional(&client->dev, "vref");
+ if (!IS_ERR(vref)) {
+ int vref_uv;
+
+diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
+index 2f8f9d632386..0916bf6b6c31 100644
+--- a/drivers/iio/imu/adis16400.h
++++ b/drivers/iio/imu/adis16400.h
+@@ -189,6 +189,7 @@ enum {
+ ADIS16300_SCAN_INCLI_X,
+ ADIS16300_SCAN_INCLI_Y,
+ ADIS16400_SCAN_ADC,
++ ADIS16400_SCAN_TIMESTAMP,
+ };
+
+ #ifdef CONFIG_IIO_BUFFER
+diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
+index 368660dfe135..7c582f7ae34e 100644
+--- a/drivers/iio/imu/adis16400_core.c
++++ b/drivers/iio/imu/adis16400_core.c
+@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = {
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
+ ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
+- IIO_CHAN_SOFT_TIMESTAMP(12)
++ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+ };
+
+ static const struct iio_chan_spec adis16448_channels[] = {
+@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = {
+ },
+ },
+ ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
+- IIO_CHAN_SOFT_TIMESTAMP(11)
++ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+ };
+
+ static const struct iio_chan_spec adis16350_channels[] = {
+@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = {
+ ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
+ ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
+ ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
+- IIO_CHAN_SOFT_TIMESTAMP(11)
++ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+ };
+
+ static const struct iio_chan_spec adis16300_channels[] = {
+@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = {
+ ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
+ ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
+ ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
+- IIO_CHAN_SOFT_TIMESTAMP(14)
++ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+ };
+
+ static const struct iio_chan_spec adis16334_channels[] = {
+@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
+- IIO_CHAN_SOFT_TIMESTAMP(8)
++ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+ };
+
+ static struct attribute *adis16400_attributes[] = {
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index 7105f22d6cd7..9edf4c935fd7 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -85,6 +85,7 @@
+ #define AK8975_MAX_CONVERSION_TIMEOUT 500
+ #define AK8975_CONVERSION_DONE_POLL_TIME 10
+ #define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
++#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
+
+ /*
+ * Per-instance context data for the device.
+@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client)
+ *
+ * Since 1uT = 100 gauss, our final scale factor becomes:
+ *
+- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
+- * Hadj = H * ((ASA + 128) * 30 / 256
++ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
++ * Hadj = H * ((ASA + 128) * 0.003) / 256
+ *
+ * Since ASA doesn't change, we cache the resultant scale factor into the
+ * device context in ak8975_setup().
+ */
+- data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
+- data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
+- data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
++ data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
++ data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
++ data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
+
+ return 0;
+ }
+@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
+ case IIO_CHAN_INFO_RAW:
+ return ak8975_read_axis(indio_dev, chan->address, val);
+ case IIO_CHAN_INFO_SCALE:
+- *val = data->raw_to_gauss[chan->address];
+- return IIO_VAL_INT;
++ *val = 0;
++ *val2 = data->raw_to_gauss[chan->address];
++ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+ }
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index 5bfc02f450e6..d1bd21319d7d 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+
++ /* ensure previous Tx parameters are not still forced */
++ qib_write_kreg_port(ppd, krp_tx_deemph_override,
++ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
++ reset_tx_deemphasis_override));
++
+ if (qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index aacf6bf352d8..6edc2db428e9 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1855,11 +1855,15 @@ static int process_checks(struct r1bio *r1_bio)
+ for (i = 0; i < conf->raid_disks * 2; i++) {
+ int j;
+ int size;
++ int uptodate;
+ struct bio *b = r1_bio->bios[i];
+ if (b->bi_end_io != end_sync_read)
+ continue;
+- /* fixup the bio for reuse */
++ /* fixup the bio for reuse, but preserve BIO_UPTODATE */
++ uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
+ bio_reset(b);
++ if (!uptodate)
++ clear_bit(BIO_UPTODATE, &b->bi_flags);
+ b->bi_vcnt = vcnt;
+ b->bi_size = r1_bio->sectors << 9;
+ b->bi_sector = r1_bio->sector +
+@@ -1892,11 +1896,14 @@ static int process_checks(struct r1bio *r1_bio)
+ int j;
+ struct bio *pbio = r1_bio->bios[primary];
+ struct bio *sbio = r1_bio->bios[i];
++ int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
+
+ if (sbio->bi_end_io != end_sync_read)
+ continue;
++ /* Now we can 'fixup' the BIO_UPTODATE flag */
++ set_bit(BIO_UPTODATE, &sbio->bi_flags);
+
+- if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
++ if (uptodate) {
+ for (j = vcnt; j-- ; ) {
+ struct page *p, *s;
+ p = pbio->bi_io_vec[j].bv_page;
+@@ -1911,7 +1918,7 @@ static int process_checks(struct r1bio *r1_bio)
+ if (j >= 0)
+ atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
+ if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
+- && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
++ && uptodate)) {
+ /* No need to write to this device. */
+ sbio->bi_end_io = NULL;
+ rdev_dec_pending(conf->mirrors[i].rdev, mddev);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 10e9e46108fd..3ecfb063ec0b 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5351,23 +5351,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
+ return sectors * (raid_disks - conf->max_degraded);
+ }
+
++static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
++{
++ safe_put_page(percpu->spare_page);
++ kfree(percpu->scribble);
++ percpu->spare_page = NULL;
++ percpu->scribble = NULL;
++}
++
++static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
++{
++ if (conf->level == 6 && !percpu->spare_page)
++ percpu->spare_page = alloc_page(GFP_KERNEL);
++ if (!percpu->scribble)
++ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
++
++ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
++ free_scratch_buffer(conf, percpu);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
+ static void raid5_free_percpu(struct r5conf *conf)
+ {
+- struct raid5_percpu *percpu;
+ unsigned long cpu;
+
+ if (!conf->percpu)
+ return;
+
+- get_online_cpus();
+- for_each_possible_cpu(cpu) {
+- percpu = per_cpu_ptr(conf->percpu, cpu);
+- safe_put_page(percpu->spare_page);
+- kfree(percpu->scribble);
+- }
+ #ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&conf->cpu_notify);
+ #endif
++
++ get_online_cpus();
++ for_each_possible_cpu(cpu)
++ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ put_online_cpus();
+
+ free_percpu(conf->percpu);
+@@ -5394,15 +5414,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+- if (conf->level == 6 && !percpu->spare_page)
+- percpu->spare_page = alloc_page(GFP_KERNEL);
+- if (!percpu->scribble)
+- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+-
+- if (!percpu->scribble ||
+- (conf->level == 6 && !percpu->spare_page)) {
+- safe_put_page(percpu->spare_page);
+- kfree(percpu->scribble);
++ if (alloc_scratch_buffer(conf, percpu)) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
+ return notifier_from_errno(-ENOMEM);
+@@ -5410,10 +5422,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+- safe_put_page(percpu->spare_page);
+- kfree(percpu->scribble);
+- percpu->spare_page = NULL;
+- percpu->scribble = NULL;
++ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ break;
+ default:
+ break;
+@@ -5425,40 +5434,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
+ static int raid5_alloc_percpu(struct r5conf *conf)
+ {
+ unsigned long cpu;
+- struct page *spare_page;
+- struct raid5_percpu __percpu *allcpus;
+- void *scribble;
+- int err;
++ int err = 0;
+
+- allcpus = alloc_percpu(struct raid5_percpu);
+- if (!allcpus)
++ conf->percpu = alloc_percpu(struct raid5_percpu);
++ if (!conf->percpu)
+ return -ENOMEM;
+- conf->percpu = allcpus;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ conf->cpu_notify.notifier_call = raid456_cpu_notify;
++ conf->cpu_notify.priority = 0;
++ err = register_cpu_notifier(&conf->cpu_notify);
++ if (err)
++ return err;
++#endif
+
+ get_online_cpus();
+- err = 0;
+ for_each_present_cpu(cpu) {
+- if (conf->level == 6) {
+- spare_page = alloc_page(GFP_KERNEL);
+- if (!spare_page) {
+- err = -ENOMEM;
+- break;
+- }
+- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
+- }
+- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+- if (!scribble) {
+- err = -ENOMEM;
++ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
++ if (err) {
++ pr_err("%s: failed memory allocation for cpu%ld\n",
++ __func__, cpu);
+ break;
+ }
+- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
+ }
+-#ifdef CONFIG_HOTPLUG_CPU
+- conf->cpu_notify.notifier_call = raid456_cpu_notify;
+- conf->cpu_notify.priority = 0;
+- if (err == 0)
+- err = register_cpu_notifier(&conf->cpu_notify);
+-#endif
+ put_online_cpus();
+
+ return err;
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index e0684b4d9a08..8c33f943abbf 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -882,7 +882,6 @@ void mei_cl_all_disconnect(struct mei_device *dev)
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->mei_flow_ctrl_creds = 0;
+- cl->read_cb = NULL;
+ cl->timer_count = 0;
+ }
+ }
+@@ -916,8 +915,16 @@ void mei_cl_all_wakeup(struct mei_device *dev)
+ void mei_cl_all_write_clear(struct mei_device *dev)
+ {
+ struct mei_cl_cb *cb, *next;
++ struct list_head *list;
+
+- list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
++ list = &dev->write_list.list;
++ list_for_each_entry_safe(cb, next, list, list) {
++ list_del(&cb->list);
++ mei_io_cb_free(cb);
++ }
++
++ list = &dev->write_waiting_list.list;
++ list_for_each_entry_safe(cb, next, list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index 17d7fece35d2..57fc06e0f434 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -1764,7 +1764,7 @@ static struct usb_device_id ar5523_id_table[] = {
+ AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
+ AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
+- AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
++ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
+ (CyberTAN Technology) */
+ AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
+ AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index c3676bf1d6c4..50f991c89cfe 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -34,6 +34,10 @@ static int ath9k_htc_btcoex_enable;
+ module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
+ MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+
++static int ath9k_ps_enable;
++module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
++MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
++
+ #define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+@@ -725,12 +729,14 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+- IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+
++ if (ath9k_ps_enable)
++ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
++
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+index 79d67c35299b..968345e30436 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+@@ -1337,21 +1337,22 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+
++ if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
++ return;
++
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+- memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+- ath9k_htc_setup_rate(priv, sta, &trate);
+- if (!ath9k_htc_send_rate_cmd(priv, &trate))
+- ath_dbg(common, CONFIG,
+- "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+- sta->addr, be32_to_cpu(trate.capflags));
+- else
+- ath_dbg(common, CONFIG,
+- "Unable to update supported rates for sta: %pM\n",
+- sta->addr);
+- }
++ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
++ ath9k_htc_setup_rate(priv, sta, &trate);
++ if (!ath9k_htc_send_rate_cmd(priv, &trate))
++ ath_dbg(common, CONFIG,
++ "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
++ sta->addr, be32_to_cpu(trate.capflags));
++ else
++ ath_dbg(common, CONFIG,
++ "Unable to update supported rates for sta: %pM\n",
++ sta->addr);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index 9a1f349f9260..c9887cb60650 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -57,6 +57,10 @@ static int ath9k_bt_ant_diversity;
+ module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
+ MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
+
++static int ath9k_ps_enable;
++module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
++MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
++
+ bool is_ath9k_unloaded;
+ /* We use the hw_value as an index into our private channel structure */
+
+@@ -830,13 +834,15 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+- IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SUPPORTS_RC_TABLE |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+
++ if (ath9k_ps_enable)
++ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
++
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+index b76a9a8fc0b3..4a1cf1344fdb 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+@@ -182,6 +182,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+
+ for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
+ ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
++
++ if (ch_idx >= NUM_2GHZ_CHANNELS &&
++ !data->sku_cap_band_52GHz_enable)
++ ch_flags &= ~NVM_CHANNEL_VALID;
++
+ if (!(ch_flags & NVM_CHANNEL_VALID)) {
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d Flags %x [%sGHz] - No traffic\n",
+diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
+index 621fb71f282a..62258ebe3348 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
+@@ -311,7 +311,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
+
+ iwl_mvm_scan_fill_ssids(cmd, req);
+
+- cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
++ cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
++ TX_CMD_FLG_BT_DIS);
+ cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
+ cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ cmd->tx_cmd.rate_n_flags =
+diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
+index a9c357491434..45746674a1a1 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
++++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
+@@ -411,6 +411,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+ mvm->status, table.valid);
+ }
+
++ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
++
+ trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
+ table.data1, table.data2, table.data3,
+ table.blink1, table.blink2, table.ilink1,
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 3c4b2af51611..503b4e4cb551 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
+ static int of_bus_pci_match(struct device_node *np)
+ {
+ /*
++ * "pciex" is PCI Express
+ * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
+ * "ht" is hypertransport
+ */
+- return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
+- !strcmp(np->type, "ht");
++ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
++ !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
+ }
+
+ static void of_bus_pci_count_cells(struct device_node *np,
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index be5cba52a09c..5707135db526 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -706,6 +706,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
+ return (unsigned int)sta;
+ }
+
++static inline bool device_status_valid(unsigned int sta)
++{
++ /*
++ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
++ * if the device is valid but does not require a device driver to be
++ * loaded (Section 6.3.7 of ACPI 5.0A).
++ */
++ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
++ return (sta & mask) == mask;
++}
++
+ /**
+ * trim_stale_devices - remove PCI devices that are not responding.
+ * @dev: PCI device to start walking the hierarchy from.
+@@ -721,7 +732,7 @@ static void trim_stale_devices(struct pci_dev *dev)
+ unsigned long long sta;
+
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+- alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
++ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
+ || acpiphp_no_hotplug(handle);
+ }
+ if (!alive) {
+@@ -764,7 +775,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
+ mutex_lock(&slot->crit_sect);
+ if (slot_no_hotplug(slot)) {
+ ; /* do nothing */
+- } else if (get_slot_status(slot) == ACPI_STA_ALL) {
++ } else if (device_status_valid(get_slot_status(slot))) {
+ /* remove stale devices if any */
+ list_for_each_entry_safe(dev, tmp, &bus->devices,
+ bus_list)
+diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
+index c7ff6d67f158..0fbac861080d 100644
+--- a/drivers/power/max17040_battery.c
++++ b/drivers/power/max17040_battery.c
+@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
+ {
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+- if (chip->pdata->battery_online)
++ if (chip->pdata && chip->pdata->battery_online)
+ chip->online = chip->pdata->battery_online();
+ else
+ chip->online = 1;
+@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
+ {
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
++ if (!chip->pdata || !chip->pdata->charger_online
++ || !chip->pdata->charger_enable) {
+ chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
+ return;
+ }
+diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
+index 47a68b43bcd5..ba4e99a37327 100644
+--- a/drivers/spi/spi-nuc900.c
++++ b/drivers/spi/spi-nuc900.c
+@@ -363,6 +363,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
+ init_completion(&hw->done);
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
++ if (hw->pdata->lsb)
++ master->mode_bits |= SPI_LSB_FIRST;
+ master->num_chipselect = hw->pdata->num_cs;
+ master->bus_num = hw->pdata->bus_num;
+ hw->bitbang.master = hw->master;
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 30b1229f6406..9e039c60c068 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -600,9 +600,7 @@ static void spi_pump_messages(struct kthread_work *work)
+ ret = master->transfer_one_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+- "failed to transfer one message from queue: %d\n", ret);
+- master->cur_msg->status = ret;
+- spi_finalize_current_message(master);
++ "failed to transfer one message from queue\n");
+ return;
+ }
+ }
+diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
+index f84df46d326a..a917eda80ef0 100644
+--- a/drivers/staging/comedi/drivers/adv_pci1710.c
++++ b/drivers/staging/comedi/drivers/adv_pci1710.c
+@@ -490,6 +490,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
+ struct comedi_insn *insn, unsigned int *data)
+ {
+ struct pci1710_private *devpriv = dev->private;
++ unsigned int val;
+ int n, chan, range, ofs;
+
+ chan = CR_CHAN(insn->chanspec);
+@@ -505,11 +506,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
+ outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
+ ofs = PCI171x_DA1;
+ }
++ val = devpriv->ao_data[chan];
+
+- for (n = 0; n < insn->n; n++)
+- outw(data[n], dev->iobase + ofs);
++ for (n = 0; n < insn->n; n++) {
++ val = data[n];
++ outw(val, dev->iobase + ofs);
++ }
+
+- devpriv->ao_data[chan] = data[n];
++ devpriv->ao_data[chan] = val;
+
+ return n;
+
+@@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
+ struct comedi_insn *insn, unsigned int *data)
+ {
+ struct pci1710_private *devpriv = dev->private;
++ unsigned int val;
+ int n, rangereg, chan;
+
+ chan = CR_CHAN(insn->chanspec);
+@@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
+ outb(rangereg, dev->iobase + PCI1720_RANGE);
+ devpriv->da_ranges = rangereg;
+ }
++ val = devpriv->ao_data[chan];
+
+ for (n = 0; n < insn->n; n++) {
+- outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
++ val = data[n];
++ outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
+ outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
+ }
+
+- devpriv->ao_data[chan] = data[n];
++ devpriv->ao_data[chan] = val;
+
+ return n;
+ }
+diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
+index 2b2049c8bc6b..2c3af3f8c352 100644
+--- a/drivers/staging/iio/adc/ad799x_core.c
++++ b/drivers/staging/iio/adc/ad799x_core.c
+@@ -644,7 +644,8 @@ static int ad799x_probe(struct i2c_client *client,
+ return 0;
+
+ error_free_irq:
+- free_irq(client->irq, indio_dev);
++ if (client->irq > 0)
++ free_irq(client->irq, indio_dev);
+ error_cleanup_ring:
+ ad799x_ring_cleanup(indio_dev);
+ error_disable_reg:
+diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
+index 09844be5eec4..b369dff44756 100644
+--- a/drivers/staging/lustre/lustre/llite/dir.c
++++ b/drivers/staging/lustre/lustre/llite/dir.c
+@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
+ break;
+ case Q_GETQUOTA:
+ if (((type == USRQUOTA &&
+- uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
++ !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ (type == GRPQUOTA &&
+ !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
+ (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 9ca3180ebaa0..606d6f059972 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -53,7 +53,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ /*=== Customer ID ===*/
+ /****** 8188EUS ********/
+- {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
++ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ {} /* Terminating entry */
+ };
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index d1ae4c5c3ffd..dfe3db7942ea 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ sense_reason_t ret = TCM_NO_SENSE;
+- int pr_holder = 0;
++ int pr_holder = 0, type;
+
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out;
+ }
++ type = pr_reg->pr_res_type;
+
+ spin_lock(&pr_tmpl->registration_lock);
+ /*
+@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ * Release the calling I_T Nexus registration now..
+ */
+ __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
++ pr_reg = NULL;
+
+ /*
+ * From spc4r17, section 5.7.11.3 Unregistering
+@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ * RESERVATIONS RELEASED.
+ */
+ if (pr_holder &&
+- (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+- pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
++ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
++ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ list_for_each_entry(pr_reg_p,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
+
+ out:
+- core_scsi3_put_pr_reg(pr_reg);
++ if (pr_reg)
++ core_scsi3_put_pr_reg(pr_reg);
+ return ret;
+ }
+
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index c0f76da55304..5056090f1b3d 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -1089,6 +1089,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+ {
+ unsigned int addr = 0;
+ unsigned int modem = 0;
++ unsigned int brk = 0;
+ struct gsm_dlci *dlci;
+ int len = clen;
+ u8 *dp = data;
+@@ -1115,6 +1116,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+ if (len == 0)
+ return;
+ }
++ len--;
++ if (len > 0) {
++ while (gsm_read_ea(&brk, *dp++) == 0) {
++ len--;
++ if (len == 0)
++ return;
++ }
++ modem <<= 7;
++ modem |= (brk & 0x7f);
++ }
+ tty = tty_port_tty_get(&dlci->port);
+ gsm_process_modem(tty, dlci, modem, clen);
+ if (tty) {
+diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
+index 61c1ad03db5b..a72c33f8e263 100644
+--- a/drivers/tty/serial/sirfsoc_uart.c
++++ b/drivers/tty/serial/sirfsoc_uart.c
+@@ -540,8 +540,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+ SIRFUART_IO_MODE);
+- sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+ spin_unlock_irqrestore(&sirfport->rx_lock, flags);
++ spin_lock(&port->lock);
++ sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
++ spin_unlock(&port->lock);
+ if (sirfport->rx_io_count == 4) {
+ spin_lock_irqsave(&sirfport->rx_lock, flags);
+ sirfport->rx_io_count = 0;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 9a8e8c5a0c73..239eae55600a 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar)
+ scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
+ vc->vc_screenbuf_size >> 1);
+ set_origin(vc);
++ if (CON_IS_VISIBLE(vc))
++ update_screen(vc);
+ /* fall through */
+ case 2: /* erase whole display */
+ count = vc->vc_cols * vc->vc_rows;
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index f20a04494e16..d6a8d23f047b 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1033,7 +1033,6 @@ static int register_root_hub(struct usb_hcd *hcd)
+ dev_name(&usb_dev->dev), retval);
+ return retval;
+ }
+- usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
+ }
+
+ retval = usb_new_device (usb_dev);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index e3f7e41818f8..3670086fb7c8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -135,7 +135,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
+ return usb_get_intfdata(hdev->actconfig->interface[0]);
+ }
+
+-int usb_device_supports_lpm(struct usb_device *udev)
++static int usb_device_supports_lpm(struct usb_device *udev)
+ {
+ /* USB 2.1 (and greater) devices indicate LPM support through
+ * their USB 2.0 Extended Capabilities BOS descriptor.
+@@ -156,11 +156,6 @@ int usb_device_supports_lpm(struct usb_device *udev)
+ "Power management will be impacted.\n");
+ return 0;
+ }
+-
+- /* udev is root hub */
+- if (!udev->parent)
+- return 1;
+-
+ if (udev->parent->lpm_capable)
+ return 1;
+
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index c49383669cd8..823857767a16 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
+ unsigned int size);
+ extern int usb_get_bos_descriptor(struct usb_device *dev);
+ extern void usb_release_bos_descriptor(struct usb_device *dev);
+-extern int usb_device_supports_lpm(struct usb_device *udev);
+ extern char *usb_cache_string(struct usb_device *udev, int index);
+ extern int usb_set_configuration(struct usb_device *dev, int configuration);
+ extern int usb_choose_configuration(struct usb_device *udev);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index fec3c8654ccd..6bfbd80ec2b9 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2929,58 +2929,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ }
+
+ while (1) {
+- if (room_on_ring(xhci, ep_ring, num_trbs)) {
+- union xhci_trb *trb = ep_ring->enqueue;
+- unsigned int usable = ep_ring->enq_seg->trbs +
+- TRBS_PER_SEGMENT - 1 - trb;
+- u32 nop_cmd;
+-
+- /*
+- * Section 4.11.7.1 TD Fragments states that a link
+- * TRB must only occur at the boundary between
+- * data bursts (eg 512 bytes for 480M).
+- * While it is possible to split a large fragment
+- * we don't know the size yet.
+- * Simplest solution is to fill the trb before the
+- * LINK with nop commands.
+- */
+- if (num_trbs == 1 || num_trbs <= usable || usable == 0)
+- break;
+-
+- if (ep_ring->type != TYPE_BULK)
+- /*
+- * While isoc transfers might have a buffer that
+- * crosses a 64k boundary it is unlikely.
+- * Since we can't add NOPs without generating
+- * gaps in the traffic just hope it never
+- * happens at the end of the ring.
+- * This could be fixed by writing a LINK TRB
+- * instead of the first NOP - however the
+- * TRB_TYPE_LINK_LE32() calls would all need
+- * changing to check the ring length.
+- */
+- break;
+-
+- if (num_trbs >= TRBS_PER_SEGMENT) {
+- xhci_err(xhci, "Too many fragments %d, max %d\n",
+- num_trbs, TRBS_PER_SEGMENT - 1);
+- return -EINVAL;
+- }
+-
+- nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
+- ep_ring->cycle_state);
+- ep_ring->num_trbs_free -= usable;
+- do {
+- trb->generic.field[0] = 0;
+- trb->generic.field[1] = 0;
+- trb->generic.field[2] = 0;
+- trb->generic.field[3] = nop_cmd;
+- trb++;
+- } while (--usable);
+- ep_ring->enqueue = trb;
+- if (room_on_ring(xhci, ep_ring, num_trbs))
+- break;
+- }
++ if (room_on_ring(xhci, ep_ring, num_trbs))
++ break;
+
+ if (ep_ring == xhci->cmd_ring) {
+ xhci_err(xhci, "Do not support expand command ring\n");
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 1dceec25223e..82fb34183a7f 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4727,11 +4727,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ struct device *dev = hcd->self.controller;
+ int retval;
+
+- /* Limit the block layer scatter-gather lists to half a segment. */
+- hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2;
+-
+- /* support to build packet from discontinuous buffers */
+- hcd->self.no_sg_constraint = 1;
++ /* Accept arbitrarily long scatter-gather lists */
++ hcd->self.sg_tablesize = ~0;
+
+ /* XHCI controllers don't stop the ep queue on short packets :| */
+ hcd->self.no_stop_on_short = 1;
+@@ -4757,6 +4754,14 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ /* xHCI private pointer was set in xhci_pci_probe for the second
+ * registered roothub.
+ */
++ xhci = hcd_to_xhci(hcd);
++ /*
++ * Support arbitrarily aligned sg-list entries on hosts without
++ * TD fragment rules (which are currently unsupported).
++ */
++ if (xhci->hci_version < 0x100)
++ hcd->self.no_sg_constraint = 1;
++
+ return 0;
+ }
+
+@@ -4783,6 +4788,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ if (xhci->hci_version > 0x96)
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+
++ if (xhci->hci_version < 0x100)
++ hcd->self.no_sg_constraint = 1;
++
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 6b3164c75c98..ed3a425de8ce 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1262,7 +1262,7 @@ union xhci_trb {
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+-#define TRBS_PER_SEGMENT 256
++#define TRBS_PER_SEGMENT 64
+ /* Allow two commands + a link TRB, along with any reserved command TRBs */
+ #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
+ #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 6f1cbbf51ae8..eea9e7b6af4c 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -153,6 +153,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
+@@ -192,6 +193,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
++ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index a7019d1e3058..1e2d369df86e 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -50,6 +50,7 @@
+ #define TI_XDS100V2_PID 0xa6d0
+
+ #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
++#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
+
+ /* US Interface Navigator (http://www.usinterface.com/) */
+ #define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
+@@ -363,6 +364,12 @@
+ /* Sprog II (Andrew Crosland's SprogII DCC interface) */
+ #define FTDI_SPROG_II 0xF0C8
+
++/*
++ * Two of the Tagsys RFID Readers
++ */
++#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
++#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
++
+ /* an infrared receiver for user access control with IR tags */
+ #define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5c86f57e4afa..216d20affba8 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index c65437cfd4a2..968a40201e5f 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
+
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 52eb91f2eb2a..147f01971c39 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -72,7 +72,8 @@ DEVICE(hp4x, HP4X_IDS);
+
+ /* Suunto ANT+ USB Driver */
+ #define SUUNTO_IDS() \
+- { USB_DEVICE(0x0fcf, 0x1008) }
++ { USB_DEVICE(0x0fcf, 0x1008) }, \
++ { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
+ DEVICE(suunto, SUUNTO_IDS);
+
+ /* Siemens USB/MPI adapter */
+diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
+index 8470e1b114f2..1dd0604d1911 100644
+--- a/drivers/usb/storage/Kconfig
++++ b/drivers/usb/storage/Kconfig
+@@ -18,7 +18,9 @@ config USB_STORAGE
+
+ This option depends on 'SCSI' support being enabled, but you
+ probably also need 'SCSI device support: SCSI disk support'
+- (BLK_DEV_SD) for most USB storage devices.
++ (BLK_DEV_SD) for most USB storage devices. Some devices also
++ will require 'Probe all LUNs on each SCSI device'
++ (SCSI_MULTI_LUN).
+
+ To compile this driver as a module, choose M here: the
+ module will be called usb-storage.
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index 18509e6c21ab..9d38ddc8da49 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host)
+
+ static int slave_alloc (struct scsi_device *sdev)
+ {
++ struct us_data *us = host_to_us(sdev->host);
++
+ /*
+ * Set the INQUIRY transfer length to 36. We don't use any of
+ * the extra data and many devices choke if asked for more or
+@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev)
+ */
+ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+
++ /* Tell the SCSI layer if we know there is more than one LUN */
++ if (us->protocol == USB_PR_BULK && us->max_lun > 0)
++ sdev->sdev_bflags |= BLIST_FORCELUN;
++
+ return 0;
+ }
+
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 65a6a75066a8..82e8ed0324e3 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
+ "Cypress ISD-300LP",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index ad06255c2ade..adbeb255616a 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
++/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
++UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
++ "Research In Motion",
++ "BlackBerry Bold 9000",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64 ),
++
+ /* Reported by Michael Stattmann <michael@stattmann.com> */
+ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
+ "Sony Ericsson",
+diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
+index f8448573d030..0b2fefbfcd10 100644
+--- a/drivers/vme/bridges/vme_ca91cx42.c
++++ b/drivers/vme/bridges/vme_ca91cx42.c
+@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
+ if (done == count)
+ goto out;
+ }
+- if ((uintptr_t)addr & 0x2) {
++ if ((uintptr_t)(addr + done) & 0x2) {
+ if ((count - done) < 2) {
+ *(u8 *)(buf + done) = ioread8(addr + done);
+ done += 1;
+@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
+ if (done == count)
+ goto out;
+ }
+- if ((uintptr_t)addr & 0x2) {
++ if ((uintptr_t)(addr + done) & 0x2) {
+ if ((count - done) < 2) {
+ iowrite8(*(u8 *)(buf + done), addr + done);
+ done += 1;
+diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
+index 9cf88337e4e9..7db4e6395e23 100644
+--- a/drivers/vme/bridges/vme_tsi148.c
++++ b/drivers/vme/bridges/vme_tsi148.c
+@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
+ if (done == count)
+ goto out;
+ }
+- if ((uintptr_t)addr & 0x2) {
++ if ((uintptr_t)(addr + done) & 0x2) {
+ if ((count - done) < 2) {
+ *(u8 *)(buf + done) = ioread8(addr + done);
+ done += 1;
+@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
+ if (done == count)
+ goto out;
+ }
+- if ((uintptr_t)addr & 0x2) {
++ if ((uintptr_t)(addr + done) & 0x2) {
+ if ((count - done) < 2) {
+ iowrite8(*(u8 *)(buf + done), addr + done);
+ done += 1;
+diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
+index fc60b31453ee..6dea2b90b4d5 100644
+--- a/fs/bio-integrity.c
++++ b/fs/bio-integrity.c
+@@ -114,6 +114,14 @@ void bio_integrity_free(struct bio *bio)
+ }
+ EXPORT_SYMBOL(bio_integrity_free);
+
++static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
++{
++ if (bip->bip_slab == BIO_POOL_NONE)
++ return BIP_INLINE_VECS;
++
++ return bvec_nr_vecs(bip->bip_slab);
++}
++
+ /**
+ * bio_integrity_add_page - Attach integrity metadata
+ * @bio: bio to update
+@@ -129,7 +137,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
+ struct bio_integrity_payload *bip = bio->bi_integrity;
+ struct bio_vec *iv;
+
+- if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
++ if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
+ printk(KERN_ERR "%s: bip_vec full\n", __func__);
+ return 0;
+ }
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index 51f5e0ee7237..494b68349667 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -1027,15 +1027,30 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
+ __u32 secdesclen = 0;
+ struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
+ struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
++ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++ struct cifs_tcon *tcon;
++
++ if (IS_ERR(tlink))
++ return PTR_ERR(tlink);
++ tcon = tlink_tcon(tlink);
+
+ cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
+
+ /* Get the security descriptor */
+- pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
++
++ if (tcon->ses->server->ops->get_acl == NULL) {
++ cifs_put_tlink(tlink);
++ return -EOPNOTSUPP;
++ }
++
++ pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
++ &secdesclen);
+ if (IS_ERR(pntsd)) {
+ rc = PTR_ERR(pntsd);
+ cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
+- goto out;
++ cifs_put_tlink(tlink);
++ return rc;
+ }
+
+ /*
+@@ -1048,6 +1063,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
+ pnntsd = kmalloc(secdesclen, GFP_KERNEL);
+ if (!pnntsd) {
+ kfree(pntsd);
++ cifs_put_tlink(tlink);
+ return -ENOMEM;
+ }
+
+@@ -1056,14 +1072,18 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
+
+ cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
+
++ if (tcon->ses->server->ops->set_acl == NULL)
++ rc = -EOPNOTSUPP;
++
+ if (!rc) {
+ /* Set the security descriptor */
+- rc = set_cifs_acl(pnntsd, secdesclen, inode, path, aclflag);
++ rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode,
++ path, aclflag);
+ cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
+ }
++ cifs_put_tlink(tlink);
+
+ kfree(pnntsd);
+ kfree(pntsd);
+-out:
+ return rc;
+ }
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index c8e03f8d628f..db95dca335ca 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -379,6 +379,16 @@ struct smb_version_operations {
+ char * (*create_lease_buf)(u8 *, u8);
+ /* parse lease context buffer and return oplock/epoch info */
+ __u8 (*parse_lease_buf)(void *, unsigned int *);
++ ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *,
++ const unsigned char *, const unsigned char *, char *,
++ size_t, const struct nls_table *, int);
++ int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
++ const char *, const void *, const __u16,
++ const struct nls_table *, int);
++ struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
++ const char *, u32 *);
++ int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
++ int);
+ };
+
+ struct smb_version_values {
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 49719b8228e5..5f8bdff3a758 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -518,10 +518,15 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
+- rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS",
+- ea_value, 4 /* size of buf */, cifs_sb->local_nls,
+- cifs_sb->mnt_cifs_flags &
+- CIFS_MOUNT_MAP_SPECIAL_CHR);
++ if (tcon->ses->server->ops->query_all_EAs == NULL) {
++ cifs_put_tlink(tlink);
++ return -EOPNOTSUPP;
++ }
++
++ rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
++ "SETFILEBITS", ea_value, 4 /* size of buf */,
++ cifs_sb->local_nls,
++ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cifs_put_tlink(tlink);
+ if (rc < 0)
+ return (int)rc;
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index e50554b75463..e6ed0dc3071b 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -1003,6 +1003,14 @@ struct smb_version_operations smb1_operations = {
+ .push_mand_locks = cifs_push_mandatory_locks,
+ .query_mf_symlink = open_query_close_cifs_symlink,
+ .is_read_op = cifs_is_read_op,
++#ifdef CONFIG_CIFS_XATTR
++ .query_all_EAs = CIFSSMBQAllEAs,
++ .set_EA = CIFSSMBSetEA,
++#endif /* CIFS_XATTR */
++#ifdef CONFIG_CIFS_ACL
++ .get_acl = get_cifs_acl,
++ .set_acl = set_cifs_acl,
++#endif /* CIFS_ACL */
+ };
+
+ struct smb_version_values smb1_values = {
+diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
+index 09afda4cc58e..5ac836a86b18 100644
+--- a/fs/cifs/xattr.c
++++ b/fs/cifs/xattr.c
+@@ -82,9 +82,11 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
+ goto remove_ea_exit;
+
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
+- rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL,
+- (__u16)0, cifs_sb->local_nls,
+- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
++ if (pTcon->ses->server->ops->set_EA)
++ rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
++ full_path, ea_name, NULL, (__u16)0,
++ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
++ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ }
+ remove_ea_exit:
+ kfree(full_path);
+@@ -149,18 +151,22 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
+ cifs_dbg(FYI, "attempt to set cifs inode metadata\n");
+
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
+- rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
+- (__u16)value_size, cifs_sb->local_nls,
+- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
++ if (pTcon->ses->server->ops->set_EA)
++ rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
++ full_path, ea_name, ea_value, (__u16)value_size,
++ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
++ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)
+ == 0) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ goto set_ea_exit;
+
+ ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
+- rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
+- (__u16)value_size, cifs_sb->local_nls,
+- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
++ if (pTcon->ses->server->ops->set_EA)
++ rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
++ full_path, ea_name, ea_value, (__u16)value_size,
++ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
++ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
+ strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
+ #ifdef CONFIG_CIFS_ACL
+@@ -170,8 +176,12 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
+ rc = -ENOMEM;
+ } else {
+ memcpy(pacl, ea_value, value_size);
+- rc = set_cifs_acl(pacl, value_size,
+- direntry->d_inode, full_path, CIFS_ACL_DACL);
++ if (pTcon->ses->server->ops->set_acl)
++ rc = pTcon->ses->server->ops->set_acl(pacl,
++ value_size, direntry->d_inode,
++ full_path, CIFS_ACL_DACL);
++ else
++ rc = -EOPNOTSUPP;
+ if (rc == 0) /* force revalidate of the inode */
+ CIFS_I(direntry->d_inode)->time = 0;
+ kfree(pacl);
+@@ -272,17 +282,21 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
+ /* revalidate/getattr then populate from inode */
+ } /* BB add else when above is implemented */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
+- rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
+- buf_size, cifs_sb->local_nls,
+- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
++ if (pTcon->ses->server->ops->query_all_EAs)
++ rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
++ full_path, ea_name, ea_value, buf_size,
++ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
++ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ goto get_ea_exit;
+
+ ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
+- rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
+- buf_size, cifs_sb->local_nls,
+- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
++ if (pTcon->ses->server->ops->query_all_EAs)
++ rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
++ full_path, ea_name, ea_value, buf_size,
++ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
++ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ } else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
+ strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
+ #ifdef CONFIG_CIFS_POSIX
+@@ -313,8 +327,11 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
+ u32 acllen;
+ struct cifs_ntsd *pacl;
+
+- pacl = get_cifs_acl(cifs_sb, direntry->d_inode,
+- full_path, &acllen);
++ if (pTcon->ses->server->ops->get_acl == NULL)
++ goto get_ea_exit; /* rc already EOPNOTSUPP */
++
++ pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
++ direntry->d_inode, full_path, &acllen);
+ if (IS_ERR(pacl)) {
+ rc = PTR_ERR(pacl);
+ cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
+@@ -400,11 +417,12 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
+ /* if proc/fs/cifs/streamstoxattr is set then
+ search server for EAs or streams to
+ returns as xattrs */
+- rc = CIFSSMBQAllEAs(xid, pTcon, full_path, NULL, data,
+- buf_size, cifs_sb->local_nls,
+- cifs_sb->mnt_cifs_flags &
+- CIFS_MOUNT_MAP_SPECIAL_CHR);
+
++ if (pTcon->ses->server->ops->query_all_EAs)
++ rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
++ full_path, NULL, data, buf_size,
++ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
++ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ list_ea_exit:
+ kfree(full_path);
+ free_xid(xid);
+diff --git a/fs/file.c b/fs/file.c
+index 4a78f981557a..9de20265a78c 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -34,7 +34,7 @@ static void *alloc_fdmem(size_t size)
+ * vmalloc() if the allocation size will be considered "large" by the VM.
+ */
+ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+- void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
++ void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
+ if (data != NULL)
+ return data;
+ }
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index e066a3902973..ab798a88ec1d 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -779,6 +779,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
+ struct nlm_file *file = block->b_file;
+ struct nlm_lock *lock = &block->b_call->a_args.lock;
+ int error;
++ loff_t fl_start, fl_end;
+
+ dprintk("lockd: grant blocked lock %p\n", block);
+
+@@ -796,9 +797,16 @@ nlmsvc_grant_blocked(struct nlm_block *block)
+ }
+
+ /* Try the lock operation again */
++ /* vfs_lock_file() can mangle fl_start and fl_end, but we need
++ * them unchanged for the GRANT_MSG
++ */
+ lock->fl.fl_flags |= FL_SLEEP;
++ fl_start = lock->fl.fl_start;
++ fl_end = lock->fl.fl_end;
+ error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
+ lock->fl.fl_flags &= ~FL_SLEEP;
++ lock->fl.fl_start = fl_start;
++ lock->fl.fl_end = fl_end;
+
+ switch (error) {
+ case 0:
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 02b0df769e2d..e81a1cae81b5 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1852,6 +1852,11 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+ GFP_KERNEL)) {
+ SetPageUptodate(page);
+ unlock_page(page);
++ /*
++ * add_to_page_cache_lru() grabs an extra page refcount.
++ * Drop it here to avoid leaking this page later.
++ */
++ page_cache_release(page);
+ } else
+ __free_page(page);
+
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+index ded429966c1f..2507fd2a1eb4 100644
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -75,11 +75,7 @@
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+-#if GCC_VERSION <= 40801
+-# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+-#else
+-# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
+-#endif
++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+ #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+ #if GCC_VERSION >= 40400
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 6b02370256e4..39cfa0aca91f 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1262,8 +1262,6 @@ typedef void (*usb_complete_t)(struct urb *);
+ * @sg: scatter gather buffer list, the buffer size of each element in
+ * the list (except the last) must be divisible by the endpoint's
+ * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
+- * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
+- * Do not use urb->sg for interrupt endpoints for now, only bulk.)
+ * @num_mapped_sgs: (internal) number of mapped sg entries
+ * @num_sgs: number of entries in the sg list
+ * @transfer_buffer_length: How big is transfer_buffer. The transfer may
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 192a302d6cfd..8ab8e9390297 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
+ {
+ return (irq < NR_IRQS) ? irq_desc + irq : NULL;
+ }
++EXPORT_SYMBOL(irq_to_desc);
+
+ static void free_desc(unsigned int irq)
+ {
+diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
+index 7a925ba456fb..a6a5bf53e86d 100644
+--- a/kernel/time/jiffies.c
++++ b/kernel/time/jiffies.c
+@@ -51,7 +51,13 @@
+ * HZ shrinks, so values greater than 8 overflow 32bits when
+ * HZ=100.
+ */
++#if HZ < 34
++#define JIFFIES_SHIFT 6
++#elif HZ < 67
++#define JIFFIES_SHIFT 7
++#else
+ #define JIFFIES_SHIFT 8
++#endif
+
+ static cycle_t jiffies_read(struct clocksource *cs)
+ {
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index 218bcb565fed..8a95408b1345 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -755,6 +755,7 @@ out:
+ static void tick_broadcast_clear_oneshot(int cpu)
+ {
+ cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
++ cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
+ }
+
+ static void tick_broadcast_init_next_event(struct cpumask *mask,
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index cc2f66f68dc5..0e337eedb909 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2397,6 +2397,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ write &= RB_WRITE_MASK;
+ tail = write - length;
+
++ /*
++ * If this is the first commit on the page, then it has the same
++ * timestamp as the page itself.
++ */
++ if (!tail)
++ delta = 0;
++
+ /* See if we shot pass the end of this buffer page */
+ if (unlikely(write > BUF_PAGE_SIZE))
+ return rb_move_tail(cpu_buffer, length, tail,
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 428adeedd3be..de476c2f8827 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -945,8 +945,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ * to it. Similarly, page lock is shifted.
+ */
+ if (hpage != p) {
+- put_page(hpage);
+- get_page(p);
++ if (!(flags & MF_COUNT_INCREASED)) {
++ put_page(hpage);
++ get_page(p);
++ }
+ lock_page(p);
+ unlock_page(hpage);
+ *hpagep = p;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 9903ee585561..c6d417a3885f 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -995,8 +995,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
+ IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ err = ieee80211_assign_beacon(sdata, &params->beacon);
+- if (err < 0)
++ if (err < 0) {
++ ieee80211_vif_release_channel(sdata);
+ return err;
++ }
+ changed |= err;
+
+ err = drv_start_ap(sdata->local, sdata);
+@@ -1005,6 +1007,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
+ if (old)
+ kfree_rcu(old, rcu_head);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
++ ieee80211_vif_release_channel(sdata);
+ return err;
+ }
+
+@@ -2506,6 +2509,24 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
+ INIT_LIST_HEAD(&roc->dependents);
+
++ /*
++ * cookie is either the roc cookie (for normal roc)
++ * or the SKB (for mgmt TX)
++ */
++ if (!txskb) {
++ /* local->mtx protects this */
++ local->roc_cookie_counter++;
++ roc->cookie = local->roc_cookie_counter;
++ /* wow, you wrapped 64 bits ... more likely a bug */
++ if (WARN_ON(roc->cookie == 0)) {
++ roc->cookie = 1;
++ local->roc_cookie_counter++;
++ }
++ *cookie = roc->cookie;
++ } else {
++ *cookie = (unsigned long)txskb;
++ }
++
+ /* if there's one pending or we're scanning, queue this one */
+ if (!list_empty(&local->roc_list) ||
+ local->scanning || local->radar_detect_enabled)
+@@ -2640,24 +2661,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ if (!queued)
+ list_add_tail(&roc->list, &local->roc_list);
+
+- /*
+- * cookie is either the roc cookie (for normal roc)
+- * or the SKB (for mgmt TX)
+- */
+- if (!txskb) {
+- /* local->mtx protects this */
+- local->roc_cookie_counter++;
+- roc->cookie = local->roc_cookie_counter;
+- /* wow, you wrapped 64 bits ... more likely a bug */
+- if (WARN_ON(roc->cookie == 0)) {
+- roc->cookie = 1;
+- local->roc_cookie_counter++;
+- }
+- *cookie = roc->cookie;
+- } else {
+- *cookie = (unsigned long)txskb;
+- }
+-
+ return 0;
+ }
+
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 1eb2b78e927b..81dca92176c7 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -874,7 +874,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx,
+ }
+
+ /* adjust first fragment's length */
+- skb->len = hdrlen + per_fragm;
++ skb_trim(skb, hdrlen + per_fragm);
+ return 0;
+ }
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 626dc3b5fd8d..c2853bbf8059 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -1655,9 +1655,10 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
+ * We can then retry with the larger buffer.
+ */
+ if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
+- !skb->len &&
++ !skb->len && !state->split &&
+ cb->min_dump_alloc < 4096) {
+ cb->min_dump_alloc = 4096;
++ state->split_start = 0;
+ rtnl_unlock();
+ return 1;
+ }
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 23708636b05c..25e5cb0aaef6 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -210,8 +210,8 @@ static void do_usb_entry(void *symval,
+ range_lo < 0x9 ? "[%X-9" : "[%X",
+ range_lo);
+ sprintf(alias + strlen(alias),
+- range_hi > 0xA ? "a-%X]" : "%X]",
+- range_lo);
++ range_hi > 0xA ? "A-%X]" : "%X]",
++ range_hi);
+ }
+ }
+ if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1))
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6195a4f8d9b8..003a7ce5791c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4057,6 +4057,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
++ SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
+ SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
+index 88b2fe3ddf42..00d86427af0f 100644
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -154,17 +154,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
+ list_add_tail(&dev->list, &kvm->coalesced_zones);
+ mutex_unlock(&kvm->slots_lock);
+
+- return ret;
++ return 0;
+
+ out_free_dev:
+ mutex_unlock(&kvm->slots_lock);
+-
+ kfree(dev);
+
+- if (dev == NULL)
+- return -ENXIO;
+-
+- return 0;
++ return ret;
+ }
+
+ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
diff --git a/1013_linux-3.12.14.patch b/1013_linux-3.12.14.patch
new file mode 100644
index 00000000..4f247420
--- /dev/null
+++ b/1013_linux-3.12.14.patch
@@ -0,0 +1,5950 @@
+diff --git a/Makefile b/Makefile
+index 0a89e7d84a2d..5d38a5a79b3a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 15f2d5bf8875..cc8a2ca1d80a 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+ static inline void __flush_icache_all(void)
+ {
+ __flush_icache_preferred();
++ dsb();
+ }
+
+ /*
+diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
+index 4f2c28060c9a..05f8066255d0 100644
+--- a/arch/arm/include/asm/spinlock.h
++++ b/arch/arm/include/asm/spinlock.h
+@@ -44,18 +44,9 @@
+
+ static inline void dsb_sev(void)
+ {
+-#if __LINUX_ARM_ARCH__ >= 7
+- __asm__ __volatile__ (
+- "dsb ishst\n"
+- SEV
+- );
+-#else
+- __asm__ __volatile__ (
+- "mcr p15, 0, %0, c7, c10, 4\n"
+- SEV
+- : : "r" (0)
+- );
+-#endif
++
++ dsb(ishst);
++ __asm__(SEV);
+ }
+
+ /*
+diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
+index 579697adaae7..a60a97898090 100644
+--- a/arch/arm/mach-omap2/gpmc.c
++++ b/arch/arm/mach-omap2/gpmc.c
+@@ -1339,7 +1339,7 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
+ of_property_read_bool(np, "gpmc,time-para-granularity");
+ }
+
+-#ifdef CONFIG_MTD_NAND
++#if IS_ENABLED(CONFIG_MTD_NAND)
+
+ static const char * const nand_ecc_opts[] = {
+ [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw",
+@@ -1409,7 +1409,7 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
+ }
+ #endif
+
+-#ifdef CONFIG_MTD_ONENAND
++#if IS_ENABLED(CONFIG_MTD_ONENAND)
+ static int gpmc_probe_onenand_child(struct platform_device *pdev,
+ struct device_node *child)
+ {
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 1272ed202dde..a2d0f91c5bd7 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -1325,7 +1325,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+- if (gfp & GFP_ATOMIC)
++ if (!(gfp & __GFP_WAIT))
+ return __iommu_alloc_atomic(dev, size, handle);
+
+ /*
+diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
+index 1128064fddcb..c0b44fbaad98 100644
+--- a/arch/arm/mm/proc-v6.S
++++ b/arch/arm/mm/proc-v6.S
+@@ -208,7 +208,6 @@ __v6_setup:
+ mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
+- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ #ifdef CONFIG_MMU
+ mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
+ mcr p15, 0, r0, c2, c0, 2 @ TTB control register
+@@ -218,6 +217,8 @@ __v6_setup:
+ ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
+ mcr p15, 0, r8, c2, c0, 1 @ load TTB1
+ #endif /* CONFIG_MMU */
++ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
++ @ complete invalidations
+ adr r5, v6_crval
+ ldmia r5, {r5, r6}
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index c63d9bdee51e..ced046d9f825 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -344,7 +344,6 @@ __v7_setup:
+
+ 4: mov r10, #0
+ mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
+- dsb
+ #ifdef CONFIG_MMU
+ mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
+ v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
+@@ -353,6 +352,7 @@ __v7_setup:
+ mcr p15, 0, r5, c10, c2, 0 @ write PRRR
+ mcr p15, 0, r6, c10, c2, 1 @ write NMRR
+ #endif
++ dsb @ Complete invalidations
+ #ifndef CONFIG_ARM_THUMBEE
+ mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
+ and r0, r0, #(0xf << 12) @ ThumbEE enabled field
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index d25459ff57fc..048334bb2651 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
+
+ frame->sp = fp + 0x10;
+ frame->fp = *(unsigned long *)(fp);
+- frame->pc = *(unsigned long *)(fp + 8);
++ /*
++ * -4 here because we care about the PC at time of bl,
++ * not where the return will go.
++ */
++ frame->pc = *(unsigned long *)(fp + 8) - 4;
+
+ return 0;
+ }
+diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
+index 22fb66590dcd..dba48a5d5bb9 100644
+--- a/arch/avr32/Makefile
++++ b/arch/avr32/Makefile
+@@ -11,7 +11,7 @@ all: uImage vmlinux.elf
+
+ KBUILD_DEFCONFIG := atstk1002_defconfig
+
+-KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
++KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
+ KBUILD_AFLAGS += -mrelax -mno-pic
+ KBUILD_CFLAGS_MODULE += -mno-relax
+ LDFLAGS_vmlinux += --relax
+diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
+index 9764a1a1073e..c1466a872b9c 100644
+--- a/arch/avr32/boards/mimc200/fram.c
++++ b/arch/avr32/boards/mimc200/fram.c
+@@ -11,6 +11,7 @@
+ #define FRAM_VERSION "1.0"
+
+ #include <linux/miscdevice.h>
++#include <linux/module.h>
+ #include <linux/proc_fs.h>
+ #include <linux/mm.h>
+ #include <linux/io.h>
+diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
+index 84fdf6857c31..a613d2c82fd9 100644
+--- a/arch/powerpc/include/asm/compat.h
++++ b/arch/powerpc/include/asm/compat.h
+@@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len)
+
+ /*
+ * We can't access below the stack pointer in the 32bit ABI and
+- * can access 288 bytes in the 64bit ABI
++ * can access 288 bytes in the 64bit big-endian ABI,
++ * or 512 bytes with the new ELFv2 little-endian ABI.
+ */
+ if (!is_32bit_task())
+- usp -= 288;
++ usp -= USER_REDZONE_SIZE;
+
+ return (void __user *) (usp - len);
+ }
+diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
+index becc08e6a65c..279b80f3bb29 100644
+--- a/arch/powerpc/include/asm/ptrace.h
++++ b/arch/powerpc/include/asm/ptrace.h
+@@ -28,11 +28,23 @@
+
+ #ifdef __powerpc64__
+
++/*
++ * Size of redzone that userspace is allowed to use below the stack
++ * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
++ * the new ELFv2 little-endian ABI, so we allow the larger amount.
++ *
++ * For kernel code we allow a 288-byte redzone, in order to conserve
++ * kernel stack space; gcc currently only uses 288 bytes, and will
++ * hopefully allow explicit control of the redzone size in future.
++ */
++#define USER_REDZONE_SIZE 512
++#define KERNEL_REDZONE_SIZE 288
++
+ #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
+ #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
+ #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
+ #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
+- STACK_FRAME_OVERHEAD + 288)
++ STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
+ #define STACK_FRAME_MARKER 12
+
+ /* Size of dummy stack frame allocated when calling signal handler. */
+@@ -41,6 +53,8 @@
+
+ #else /* __powerpc64__ */
+
++#define USER_REDZONE_SIZE 0
++#define KERNEL_REDZONE_SIZE 0
+ #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
+ #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
+ #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
+diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
+index 779a78c26435..c16ceb1c8328 100644
+--- a/arch/powerpc/kernel/crash_dump.c
++++ b/arch/powerpc/kernel/crash_dump.c
+@@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+ {
+ void *vaddr;
++ phys_addr_t paddr;
+
+ if (!csize)
+ return 0;
+
+ csize = min_t(size_t, csize, PAGE_SIZE);
++ paddr = pfn << PAGE_SHIFT;
+
+- if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
+- vaddr = __va(pfn << PAGE_SHIFT);
++ if (memblock_is_region_memory(paddr, csize)) {
++ vaddr = __va(paddr);
+ csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ } else {
+- vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
++ vaddr = __ioremap(paddr, PAGE_SIZE, 0);
+ csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ iounmap(vaddr);
+ }
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index 2b0ad9845363..ace34137a501 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq)
+ mtlr r0
+ blr
+
++/*
++ * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
++ */
+ _GLOBAL(call_do_irq)
+ mflr r0
+ stw r0,4(r1)
+ lwz r10,THREAD+KSP_LIMIT(r2)
+- addi r11,r3,THREAD_INFO_GAP
++ addi r11,r4,THREAD_INFO_GAP
+ stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+ mr r1,r4
+ stw r10,8(r1)
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 1e7ba8817106..36b1d1daa236 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -65,8 +65,8 @@ struct rt_sigframe {
+ struct siginfo __user *pinfo;
+ void __user *puc;
+ struct siginfo info;
+- /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
+- char abigap[288];
++ /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
++ char abigap[USER_REDZONE_SIZE];
+ } __attribute__ ((aligned (16)));
+
+ static const char fmt32[] = KERN_INFO \
+diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
+index cf42e74514fa..b7eb5d4f4c89 100644
+--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
++++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
+@@ -463,8 +463,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose,
+ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
+ {
+ struct pci_controller *hose = pe->phb;
+- struct eeh_dev *edev;
+- struct pci_dev *dev;
++ struct pci_bus *bus;
+ int ret;
+
+ /*
+@@ -493,31 +492,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
+ if (pe->type & EEH_PE_PHB) {
+ ret = ioda_eeh_phb_reset(hose, option);
+ } else {
+- if (pe->type & EEH_PE_DEVICE) {
+- /*
+- * If it's device PE, we didn't refer to the parent
+- * PCI bus yet. So we have to figure it out indirectly.
+- */
+- edev = list_first_entry(&pe->edevs,
+- struct eeh_dev, list);
+- dev = eeh_dev_to_pci_dev(edev);
+- dev = dev->bus->self;
+- } else {
+- /*
+- * If it's bus PE, the parent PCI bus is already there
+- * and just pick it up.
+- */
+- dev = pe->bus->self;
+- }
+-
+- /*
+- * Do reset based on the fact that the direct upstream bridge
+- * is root bridge (port) or not.
+- */
+- if (dev->bus->number == 0)
++ bus = eeh_pe_bus_get(pe);
++ if (pci_is_root_bus(bus))
+ ret = ioda_eeh_root_reset(hose, option);
+ else
+- ret = ioda_eeh_bridge_reset(hose, dev, option);
++ ret = ioda_eeh_bridge_reset(hose, bus->self, option);
+ }
+
+ return ret;
+diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+index 82789e79e539..0ea99e3d4815 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+@@ -35,12 +35,7 @@
+ #include "offline_states.h"
+
+ /* This version can't take the spinlock, because it never returns */
+-static struct rtas_args rtas_stop_self_args = {
+- .token = RTAS_UNKNOWN_SERVICE,
+- .nargs = 0,
+- .nret = 1,
+- .rets = &rtas_stop_self_args.args[0],
+-};
++static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
+
+ static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
+ CPU_STATE_OFFLINE;
+@@ -93,15 +88,20 @@ void set_default_offline_state(int cpu)
+
+ static void rtas_stop_self(void)
+ {
+- struct rtas_args *args = &rtas_stop_self_args;
++ struct rtas_args args = {
++ .token = cpu_to_be32(rtas_stop_self_token),
++ .nargs = 0,
++ .nret = 1,
++ .rets = &args.args[0],
++ };
+
+ local_irq_disable();
+
+- BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
++ BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
+
+ printk("cpu %u (hwid %u) Ready to die...\n",
+ smp_processor_id(), hard_smp_processor_id());
+- enter_rtas(__pa(args));
++ enter_rtas(__pa(&args));
+
+ panic("Alas, I survived.\n");
+ }
+@@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void)
+ }
+ }
+
+- rtas_stop_self_args.token = rtas_token("stop-self");
++ rtas_stop_self_token = rtas_token("stop-self");
+ qcss_tok = rtas_token("query-cpu-stopped-state");
+
+- if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
++ if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
+ qcss_tok == RTAS_UNKNOWN_SERVICE) {
+ printk(KERN_INFO "CPU Hotplug not supported by firmware "
+ "- disabling.\n");
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 8a87a3224121..5edd3c0b437a 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
+ for (i = 0; i < cpuc->n_events; i++) {
+ if (event == cpuc->event_list[i]) {
+
++ if (i >= cpuc->n_events - cpuc->n_added)
++ --cpuc->n_added;
++
+ if (x86_pmu.put_event_constraints)
+ x86_pmu.put_event_constraints(cpuc, event);
+
+diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
+index 872079a67e4d..f7d0672481fd 100644
+--- a/arch/x86/kernel/pci-dma.c
++++ b/arch/x86/kernel/pci-dma.c
+@@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+ flag |= __GFP_ZERO;
+ again:
+ page = NULL;
+- if (!(flag & GFP_ATOMIC))
++ /* CMA can be used only in the context which permits sleeping */
++ if (flag & __GFP_WAIT)
+ page = dma_alloc_from_contiguous(dev, count, get_order(size));
++ /* fallback */
+ if (!page)
+ page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+ if (!page)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 16dc55a39fa3..92af83d79c97 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6141,7 +6141,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
+ frag->len -= len;
+ }
+
+- if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
++ if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
+ vcpu->mmio_needed = 0;
+
+ /* FIXME: return into emulator if single-stepping. */
+diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
+index 917488a0ab00..f2faa58f9a43 100644
+--- a/arch/xtensa/include/asm/traps.h
++++ b/arch/xtensa/include/asm/traps.h
+@@ -22,25 +22,37 @@ extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+
+ static inline void spill_registers(void)
+ {
+-
++#if XCHAL_NUM_AREGS > 16
+ __asm__ __volatile__ (
+- "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
+- "mov a12, a0\n\t"
+- "rsr a13, sar\n\t"
+- "xsr a14, ps\n\t"
+- "movi a0, _spill_registers\n\t"
+- "rsync\n\t"
+- "callx0 a0\n\t"
+- "mov a0, a12\n\t"
+- "wsr a13, sar\n\t"
+- "wsr a14, ps\n\t"
+- : :
+-#if defined(CONFIG_FRAME_POINTER)
+- : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
++ " call12 1f\n"
++ " _j 2f\n"
++ " retw\n"
++ " .align 4\n"
++ "1:\n"
++ " _entry a1, 48\n"
++ " addi a12, a0, 3\n"
++#if XCHAL_NUM_AREGS > 32
++ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
++ " _entry a1, 48\n"
++ " mov a12, a0\n"
++ " .endr\n"
++#endif
++ " _entry a1, 48\n"
++#if XCHAL_NUM_AREGS % 12 == 0
++ " mov a8, a8\n"
++#elif XCHAL_NUM_AREGS % 12 == 4
++ " mov a12, a12\n"
++#elif XCHAL_NUM_AREGS % 12 == 8
++ " mov a4, a4\n"
++#endif
++ " retw\n"
++ "2:\n"
++ : : : "a12", "a13", "memory");
+ #else
+- : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
++ __asm__ __volatile__ (
++ " mov a12, a12\n"
++ : : : "memory");
+ #endif
+- "memory");
+ }
+
+ #endif /* _XTENSA_TRAPS_H */
+diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
+index 21dbe6bdb8ed..b61e25146a2f 100644
+--- a/arch/xtensa/kernel/entry.S
++++ b/arch/xtensa/kernel/entry.S
+@@ -1117,6 +1117,13 @@ ENDPROC(fast_syscall_spill_registers)
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
++ * Note that we only need to restore the bits in windowstart that have not
++ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
++ * rotated windowstart with only those bits set for frames that haven't been
++ * spilled yet. Because a3 is rotated such that bit 0 represents the register
++ * frame for the current windowbase - 1, we need to rotate a3 left by the
++ * value of the current windowbase + 1 and move it to windowstart.
++ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+@@ -1131,10 +1138,15 @@ ENTRY(fast_syscall_spill_registers_fixup)
+ /* We need to make sure the current registers (a0-a3) are preserved.
+ * To do this, we simply set the bit for the current window frame
+ * in WS, so that the exception handlers save them to the task stack.
++ *
++ * Note: we use a3 to set the windowbase, so we take a special care
++ * of it, saving it in the original _spill_registers frame across
++ * the exception handler call.
+ */
+
+ xsr a3, excsave1 # get spill-mask
+ slli a3, a3, 1 # shift left by one
++ addi a3, a3, 1 # set the bit for the current window frame
+
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
+@@ -1794,6 +1806,43 @@ ENTRY(system_call)
+
+ ENDPROC(system_call)
+
++/*
++ * Spill live registers on the kernel stack macro.
++ *
++ * Entry condition: ps.woe is set, ps.excm is cleared
++ * Exit condition: windowstart has single bit set
++ * May clobber: a12, a13
++ */
++ .macro spill_registers_kernel
++
++#if XCHAL_NUM_AREGS > 16
++ call12 1f
++ _j 2f
++ retw
++ .align 4
++1:
++ _entry a1, 48
++ addi a12, a0, 3
++#if XCHAL_NUM_AREGS > 32
++ .rept (XCHAL_NUM_AREGS - 32) / 12
++ _entry a1, 48
++ mov a12, a0
++ .endr
++#endif
++ _entry a1, 48
++#if XCHAL_NUM_AREGS % 12 == 0
++ mov a8, a8
++#elif XCHAL_NUM_AREGS % 12 == 4
++ mov a12, a12
++#elif XCHAL_NUM_AREGS % 12 == 8
++ mov a4, a4
++#endif
++ retw
++2:
++#else
++ mov a12, a12
++#endif
++ .endm
+
+ /*
+ * Task switch.
+@@ -1806,21 +1855,20 @@ ENTRY(_switch_to)
+
+ entry a1, 16
+
+- mov a12, a2 # preserve 'prev' (a2)
+- mov a13, a3 # and 'next' (a3)
++ mov a10, a2 # preserve 'prev' (a2)
++ mov a11, a3 # and 'next' (a3)
+
+ l32i a4, a2, TASK_THREAD_INFO
+ l32i a5, a3, TASK_THREAD_INFO
+
+- save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
++ save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+
+- s32i a0, a12, THREAD_RA # save return address
+- s32i a1, a12, THREAD_SP # save stack pointer
++ s32i a0, a10, THREAD_RA # save return address
++ s32i a1, a10, THREAD_SP # save stack pointer
+
+ /* Disable ints while we manipulate the stack pointer. */
+
+- movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
+- xsr a14, ps
++ rsil a14, LOCKLEVEL
+ rsr a3, excsave1
+ rsync
+ s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
+@@ -1835,7 +1883,7 @@ ENTRY(_switch_to)
+
+ /* Flush register file. */
+
+- call0 _spill_registers # destroys a3, a4, and SAR
++ spill_registers_kernel
+
+ /* Set kernel stack (and leave critical section)
+ * Note: It's save to set it here. The stack will not be overwritten
+@@ -1851,13 +1899,13 @@ ENTRY(_switch_to)
+
+ /* restore context of the task 'next' */
+
+- l32i a0, a13, THREAD_RA # restore return address
+- l32i a1, a13, THREAD_SP # restore stack pointer
++ l32i a0, a11, THREAD_RA # restore return address
++ l32i a1, a11, THREAD_SP # restore stack pointer
+
+- load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
++ load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+
+ wsr a14, ps
+- mov a2, a12 # return 'prev'
++ mov a2, a10 # return 'prev'
+ rsync
+
+ retw
+diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
+index 41c5e1b799ef..f658e0948703 100644
+--- a/drivers/acpi/pci_irq.c
++++ b/drivers/acpi/pci_irq.c
+@@ -432,6 +432,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
+ pin_name(pin));
+ }
+
++ kfree(entry);
+ return 0;
+ }
+
+diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
+index e7dd2c1fee79..5e47d7bf4745 100644
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -59,6 +59,12 @@ struct throttling_tstate {
+ int target_state; /* target T-state */
+ };
+
++struct acpi_processor_throttling_arg {
++ struct acpi_processor *pr;
++ int target_state;
++ bool force;
++};
++
+ #define THROTTLING_PRECHANGE (1)
+ #define THROTTLING_POSTCHANGE (2)
+
+@@ -1063,16 +1069,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+ return 0;
+ }
+
++static long acpi_processor_throttling_fn(void *data)
++{
++ struct acpi_processor_throttling_arg *arg = data;
++ struct acpi_processor *pr = arg->pr;
++
++ return pr->throttling.acpi_processor_set_throttling(pr,
++ arg->target_state, arg->force);
++}
++
+ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force)
+ {
+- cpumask_var_t saved_mask;
+ int ret = 0;
+ unsigned int i;
+ struct acpi_processor *match_pr;
+ struct acpi_processor_throttling *p_throttling;
++ struct acpi_processor_throttling_arg arg;
+ struct throttling_tstate t_state;
+- cpumask_var_t online_throttling_cpus;
+
+ if (!pr)
+ return -EINVAL;
+@@ -1083,14 +1097,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+ return -EINVAL;
+
+- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+- return -ENOMEM;
+-
+- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
+- free_cpumask_var(saved_mask);
+- return -ENOMEM;
+- }
+-
+ if (cpu_is_offline(pr->id)) {
+ /*
+ * the cpu pointed by pr->id is offline. Unnecessary to change
+@@ -1099,17 +1105,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ return -ENODEV;
+ }
+
+- cpumask_copy(saved_mask, &current->cpus_allowed);
+ t_state.target_state = state;
+ p_throttling = &(pr->throttling);
+- cpumask_and(online_throttling_cpus, cpu_online_mask,
+- p_throttling->shared_cpu_map);
++
+ /*
+ * The throttling notifier will be called for every
+ * affected cpu in order to get one proper T-state.
+ * The notifier event is THROTTLING_PRECHANGE.
+ */
+- for_each_cpu(i, online_throttling_cpus) {
++ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
+ &t_state);
+@@ -1121,21 +1125,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ * it can be called only for the cpu pointed by pr.
+ */
+ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
+- /* FIXME: use work_on_cpu() */
+- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+- /* Can't migrate to the pr->id CPU. Exit */
+- ret = -ENODEV;
+- goto exit;
+- }
+- ret = p_throttling->acpi_processor_set_throttling(pr,
+- t_state.target_state, force);
++ arg.pr = pr;
++ arg.target_state = state;
++ arg.force = force;
++ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
+ } else {
+ /*
+ * When the T-state coordination is SW_ALL or HW_ALL,
+ * it is necessary to set T-state for every affected
+ * cpus.
+ */
+- for_each_cpu(i, online_throttling_cpus) {
++ for_each_cpu_and(i, cpu_online_mask,
++ p_throttling->shared_cpu_map) {
+ match_pr = per_cpu(processors, i);
+ /*
+ * If the pointer is invalid, we will report the
+@@ -1156,13 +1157,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ "on CPU %d\n", i));
+ continue;
+ }
+- t_state.cpu = i;
+- /* FIXME: use work_on_cpu() */
+- if (set_cpus_allowed_ptr(current, cpumask_of(i)))
+- continue;
+- ret = match_pr->throttling.
+- acpi_processor_set_throttling(
+- match_pr, t_state.target_state, force);
++
++ arg.pr = match_pr;
++ arg.target_state = state;
++ arg.force = force;
++ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
++ &arg);
+ }
+ }
+ /*
+@@ -1171,17 +1171,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ * affected cpu to update the T-states.
+ * The notifier event is THROTTLING_POSTCHANGE
+ */
+- for_each_cpu(i, online_throttling_cpus) {
++ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
+ &t_state);
+ }
+- /* restore the previous state */
+- /* FIXME: use work_on_cpu() */
+- set_cpus_allowed_ptr(current, saved_mask);
+-exit:
+- free_cpumask_var(online_throttling_cpus);
+- free_cpumask_var(saved_mask);
++
+ return ret;
+ }
+
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index f193285968f8..5708e44376fe 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -729,6 +729,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
+ union acpi_object *o;
+ struct acpi_video_device_brightness *br = NULL;
+ int result = -EINVAL;
++ u32 value;
+
+ if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
+@@ -759,7 +760,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
+ printk(KERN_ERR PREFIX "Invalid data\n");
+ continue;
+ }
+- br->levels[count] = (u32) o->integer.value;
++ value = (u32) o->integer.value;
++ /* Skip duplicate entries */
++ if (count > 2 && br->levels[count - 1] == value)
++ continue;
++
++ br->levels[count] = value;
+
+ if (br->levels[count] > max_level)
+ max_level = br->levels[count];
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index cfc6073c0487..dc11b7a64376 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -61,6 +61,7 @@ enum board_ids {
+ /* board IDs by feature in alphabetical order */
+ board_ahci,
+ board_ahci_ign_iferr,
++ board_ahci_noncq,
+ board_ahci_nosntf,
+ board_ahci_yes_fbs,
+
+@@ -119,6 +120,13 @@ static const struct ata_port_info ahci_port_info[] = {
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
++ [board_ahci_noncq] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
+ [board_ahci_nosntf] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+ .flags = AHCI_FLAG_COMMON,
+@@ -450,6 +458,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+
++ /*
++ * Samsung SSDs found on some macbooks. NCQ times out.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=60731
++ */
++ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
++
+ /* Enmotus */
+ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index 20fd337a5731..7ccc084bf1df 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ * otherwise. Don't try hard to recover it.
+ */
+ ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
+- } else if (vendor == 0x197b && devid == 0x2352) {
+- /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
++ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
++ /*
++ * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
++ * 0x0325: jmicron JMB394.
++ */
+ ata_for_each_link(link, ap, EDGE) {
+ /* SRST breaks detection and disks get misclassified
+ * LPM disabled to avoid potential problems
+diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
+index d67fc351343c..b7695e804635 100644
+--- a/drivers/ata/sata_sil.c
++++ b/drivers/ata/sata_sil.c
+@@ -157,6 +157,7 @@ static const struct sil_drivelist {
+ { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
+ { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
+ { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
++ { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
+ { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
+ { }
+ };
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 10a4467c63f1..701212ba38b7 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -1529,6 +1529,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
++ case PM_RESTORE_PREPARE:
+ kill_requests_without_uevent();
+ device_cache_fw_images();
+ break;
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index 2344a9ed17f3..47b6931fd42f 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -1100,7 +1100,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
+ {
+ struct powernow_k8_data *data;
+ struct init_on_cpu init_on_cpu;
+- int rc;
++ int rc, cpu;
+
+ smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
+ if (rc)
+@@ -1169,7 +1169,9 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
+ pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+
+- per_cpu(powernow_data, pol->cpu) = data;
++ /* Point all the CPUs in this policy to the same data */
++ for_each_cpu(cpu, pol->cpus)
++ per_cpu(powernow_data, cpu) = data;
+
+ return 0;
+
+@@ -1184,6 +1186,7 @@ err_out:
+ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+ {
+ struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
++ int cpu;
+
+ if (!data)
+ return -EINVAL;
+@@ -1194,7 +1197,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+
+ kfree(data->powernow_table);
+ kfree(data);
+- per_cpu(powernow_data, pol->cpu) = NULL;
++ for_each_cpu(cpu, pol->cpus)
++ per_cpu(powernow_data, cpu) = NULL;
+
+ return 0;
+ }
+diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
+index 5ff6fc1819dc..a6d117728224 100644
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+ attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
+ for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
+ chan = ioat_chan_by_index(instance, bit);
+- tasklet_schedule(&chan->cleanup_task);
++ if (test_bit(IOAT_RUN, &chan->state))
++ tasklet_schedule(&chan->cleanup_task);
+ }
+
+ writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+@@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+ {
+ struct ioat_chan_common *chan = data;
+
+- tasklet_schedule(&chan->cleanup_task);
++ if (test_bit(IOAT_RUN, &chan->state))
++ tasklet_schedule(&chan->cleanup_task);
+
+ return IRQ_HANDLED;
+ }
+@@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
+ chan->timer.function = device->timer_fn;
+ chan->timer.data = data;
+ tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
+- tasklet_disable(&chan->cleanup_task);
+ }
+
+ /**
+@@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
+ writel(((u64) chan->completion_dma) >> 32,
+ chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+- tasklet_enable(&chan->cleanup_task);
++ set_bit(IOAT_RUN, &chan->state);
+ ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
+ dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
+ __func__, ioat->desccount);
+ return ioat->desccount;
+ }
+
++void ioat_stop(struct ioat_chan_common *chan)
++{
++ struct ioatdma_device *device = chan->device;
++ struct pci_dev *pdev = device->pdev;
++ int chan_id = chan_num(chan);
++ struct msix_entry *msix;
++
++ /* 1/ stop irq from firing tasklets
++ * 2/ stop the tasklet from re-arming irqs
++ */
++ clear_bit(IOAT_RUN, &chan->state);
++
++ /* flush inflight interrupts */
++ switch (device->irq_mode) {
++ case IOAT_MSIX:
++ msix = &device->msix_entries[chan_id];
++ synchronize_irq(msix->vector);
++ break;
++ case IOAT_MSI:
++ case IOAT_INTX:
++ synchronize_irq(pdev->irq);
++ break;
++ default:
++ break;
++ }
++
++ /* flush inflight timers */
++ del_timer_sync(&chan->timer);
++
++ /* flush inflight tasklet runs */
++ tasklet_kill(&chan->cleanup_task);
++
++ /* final cleanup now that everything is quiesced and can't re-arm */
++ device->cleanup_fn((unsigned long) &chan->common);
++}
++
+ /**
+ * ioat1_dma_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+@@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
+ if (ioat->desccount == 0)
+ return;
+
+- tasklet_disable(&chan->cleanup_task);
+- del_timer_sync(&chan->timer);
+- ioat1_cleanup(ioat);
++ ioat_stop(chan);
+
+ /* Delay 100ms after reset to allow internal DMA logic to quiesce
+ * before removing DMA descriptor resources.
+@@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+ static void ioat1_cleanup_event(unsigned long data)
+ {
+ struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
++ struct ioat_chan_common *chan = &ioat->base;
+
+ ioat1_cleanup(ioat);
++ if (!test_bit(IOAT_RUN, &chan->state))
++ return;
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ }
+
+diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
+index 54fb7b9ff9aa..a1d78847e5a5 100644
+--- a/drivers/dma/ioat/dma.h
++++ b/drivers/dma/ioat/dma.h
+@@ -370,6 +370,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+ void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
+ void ioat_kobject_del(struct ioatdma_device *device);
+ int ioat_dma_setup_interrupts(struct ioatdma_device *device);
++void ioat_stop(struct ioat_chan_common *chan);
+ extern const struct sysfs_ops ioat_sysfs_ops;
+ extern struct ioat_sysfs_entry ioat_version_attr;
+ extern struct ioat_sysfs_entry ioat_cap_attr;
+diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
+index b925e1b1d139..1cd761026d84 100644
+--- a/drivers/dma/ioat/dma_v2.c
++++ b/drivers/dma/ioat/dma_v2.c
+@@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
+ void ioat2_cleanup_event(unsigned long data)
+ {
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
++ struct ioat_chan_common *chan = &ioat->base;
+
+ ioat2_cleanup(ioat);
++ if (!test_bit(IOAT_RUN, &chan->state))
++ return;
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ }
+
+@@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
+ ioat->issued = 0;
+ ioat->tail = 0;
+ ioat->alloc_order = order;
++ set_bit(IOAT_RUN, &chan->state);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+
+- tasklet_enable(&chan->cleanup_task);
+ ioat2_start_null_desc(ioat);
+
+ /* check that we got off the ground */
+@@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
+ } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
+ if (is_ioat_active(status) || is_ioat_idle(status)) {
+- set_bit(IOAT_RUN, &chan->state);
+ return 1 << ioat->alloc_order;
+ } else {
+ u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+@@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
+ if (!ioat->ring)
+ return;
+
+- tasklet_disable(&chan->cleanup_task);
+- del_timer_sync(&chan->timer);
+- device->cleanup_fn((unsigned long) c);
++ ioat_stop(chan);
+ device->reset_hw(chan);
+- clear_bit(IOAT_RUN, &chan->state);
+
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index 3efc4dcf2095..476017f7ea02 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -627,8 +627,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
+ static void ioat3_cleanup_event(unsigned long data)
+ {
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
++ struct ioat_chan_common *chan = &ioat->base;
+
+ ioat3_cleanup(ioat);
++ if (!test_bit(IOAT_RUN, &chan->state))
++ return;
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ }
+
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index 82d2b97ad942..20f22ecb712e 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -1640,6 +1640,7 @@ static void dma_tasklet(unsigned long data)
+ struct d40_chan *d40c = (struct d40_chan *) data;
+ struct d40_desc *d40d;
+ unsigned long flags;
++ bool callback_active;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+@@ -1667,6 +1668,7 @@ static void dma_tasklet(unsigned long data)
+ }
+
+ /* Callback to client */
++ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
+ callback = d40d->txd.callback;
+ callback_param = d40d->txd.callback_param;
+
+@@ -1689,7 +1691,7 @@ static void dma_tasklet(unsigned long data)
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
++ if (callback_active && callback)
+ callback(callback_param);
+
+ return;
+diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
+index 9004c64b169e..841eee387478 100644
+--- a/drivers/edac/i7300_edac.c
++++ b/drivers/edac/i7300_edac.c
+@@ -943,33 +943,35 @@ static int i7300_get_devices(struct mem_ctl_info *mci)
+
+ /* Attempt to 'get' the MCH register we want */
+ pdev = NULL;
+- while (!pvt->pci_dev_16_1_fsb_addr_map ||
+- !pvt->pci_dev_16_2_fsb_err_regs) {
+- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
+- if (!pdev) {
+- /* End of list, leave */
+- i7300_printk(KERN_ERR,
+- "'system address,Process Bus' "
+- "device not found:"
+- "vendor 0x%x device 0x%x ERR funcs "
+- "(broken BIOS?)\n",
+- PCI_VENDOR_ID_INTEL,
+- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
+- goto error;
+- }
+-
++ while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
++ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
++ pdev))) {
+ /* Store device 16 funcs 1 and 2 */
+ switch (PCI_FUNC(pdev->devfn)) {
+ case 1:
+- pvt->pci_dev_16_1_fsb_addr_map = pdev;
++ if (!pvt->pci_dev_16_1_fsb_addr_map)
++ pvt->pci_dev_16_1_fsb_addr_map =
++ pci_dev_get(pdev);
+ break;
+ case 2:
+- pvt->pci_dev_16_2_fsb_err_regs = pdev;
++ if (!pvt->pci_dev_16_2_fsb_err_regs)
++ pvt->pci_dev_16_2_fsb_err_regs =
++ pci_dev_get(pdev);
+ break;
+ }
+ }
+
++ if (!pvt->pci_dev_16_1_fsb_addr_map ||
++ !pvt->pci_dev_16_2_fsb_err_regs) {
++ /* At least one device was not found */
++ i7300_printk(KERN_ERR,
++ "'system address,Process Bus' device not found:"
++ "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
++ PCI_VENDOR_ID_INTEL,
++ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
++ goto error;
++ }
++
+ edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->pci_dev_16_0_fsb_ctlr),
+ pvt->pci_dev_16_0_fsb_ctlr->vendor,
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index 80a963d64e58..c67fb4d707d3 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1334,14 +1334,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
+ * is at addr 8086:2c40, instead of 8086:2c41. So, we need
+ * to probe for the alternate address in case of failure
+ */
+- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
++ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
++ pci_dev_get(*prev); /* pci_get_device will put it */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
++ }
+
+- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
++ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
++ !pdev) {
++ pci_dev_get(*prev); /* pci_get_device will put it */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
+ *prev);
++ }
+
+ if (!pdev) {
+ if (*prev) {
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index dd2d542e4651..9dcf34f9a22d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7946,6 +7946,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ if (ring->id == RCS)
+ len += 6;
+
++ /*
++ * BSpec MI_DISPLAY_FLIP for IVB:
++ * "The full packet must be contained within the same cache line."
++ *
++ * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
++ * cacheline, if we ever start emitting more commands before
++ * the MI_DISPLAY_FLIP we may need to first emit everything else,
++ * then do the cacheline alignment, and finally emit the
++ * MI_DISPLAY_FLIP.
++ */
++ ret = intel_ring_cacheline_align(ring);
++ if (ret)
++ goto err_unpin;
++
+ ret = intel_ring_begin(ring, len);
+ if (ret)
+ goto err_unpin;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 5a97f7356843..b0191f25cd55 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -450,6 +450,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
++ int retry;
+
+ intel_dp_check_edp(intel_dp);
+ if (send_bytes > 16)
+@@ -460,18 +461,20 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+- for (;;) {
++ for (retry = 0; retry < 7; retry++) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+- break;
++ return send_bytes;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+- udelay(100);
++ usleep_range(400, 500);
+ else
+ return -EIO;
+ }
+- return send_bytes;
++
++ DRM_ERROR("too many retries, giving up\n");
++ return -EIO;
+ }
+
+ /* Write a single byte to the aux channel in native mode */
+@@ -493,6 +496,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
++ int retry;
+
+ intel_dp_check_edp(intel_dp);
+ msg[0] = AUX_NATIVE_READ << 4;
+@@ -503,7 +507,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+- for (;;) {
++ for (retry = 0; retry < 7; retry++) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+@@ -516,10 +520,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+- udelay(100);
++ usleep_range(400, 500);
+ else
+ return -EIO;
+ }
++
++ DRM_ERROR("too many retries, giving up\n");
++ return -EIO;
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 43719bbb2595..7507fe036b6e 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1545,6 +1545,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
+ return 0;
+ }
+
++/* Align the ring tail to a cacheline boundary */
++int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
++{
++ int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
++ int ret;
++
++ if (num_dwords == 0)
++ return 0;
++
++ ret = intel_ring_begin(ring, num_dwords);
++ if (ret)
++ return ret;
++
++ while (num_dwords--)
++ intel_ring_emit(ring, MI_NOOP);
++
++ intel_ring_advance(ring);
++
++ return 0;
++}
++
+ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ {
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
+index 68b1ca974d59..8eecbd28074a 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -231,6 +231,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
+ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+
+ int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
++int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
+ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ u32 data)
+ {
+diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+index c168ae3eaa97..355e9fdedc5f 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
++++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+@@ -1112,7 +1112,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
+ if (conf != ~0) {
+ if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp.or) - 1) * 0x08;
+- u32 ctrl = nv_rd32(priv, 0x610798 + soff);
++ u32 ctrl = nv_rd32(priv, 0x610794 + soff);
+ u32 datarate;
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index 32c6b0a60fb3..95aae9110405 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -382,6 +382,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
+ if (ret)
+ goto fail_device;
+
++ dev->irq_enabled = true;
++
+ /* workaround an odd issue on nvc1 by disabling the device's
+ * nosnoop capability. hopefully won't cause issues until a
+ * better fix is found - assuming there is one...
+@@ -481,6 +483,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_object *device;
+
++ dev->irq_enabled = false;
+ device = drm->client.base.device;
+ drm_put_dev(dev);
+
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 0ee2cf5cf76e..dcb652a6f924 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1744,6 +1744,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ return ATOM_PPLL1;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
++ } else if (ASIC_IS_DCE41(rdev)) {
++ /* Don't share PLLs on DCE4.1 chips */
++ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
++ if (rdev->clock.dp_extclk)
++ /* skip PPLL programming if using ext clock */
++ return ATOM_PPLL_INVALID;
++ }
++ pll_in_use = radeon_get_pll_use_mask(crtc);
++ if (!(pll_in_use & (1 << ATOM_PPLL1)))
++ return ATOM_PPLL1;
++ if (!(pll_in_use & (1 << ATOM_PPLL2)))
++ return ATOM_PPLL2;
++ DRM_ERROR("unable to allocate a PPLL\n");
++ return ATOM_PPLL_INVALID;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+ * depending on the asic:
+@@ -1771,7 +1785,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+- } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
++ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = radeon_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 86ee09783925..2a2879e53bd5 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -231,7 +231,7 @@ static void dce6_audio_enable(struct radeon_device *rdev,
+ bool enable)
+ {
+ WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
+- AUDIO_ENABLED);
++ enable ? AUDIO_ENABLED : 0);
+ DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index ffb36c1ee005..5f07d1bfbd76 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -5369,9 +5369,9 @@ void evergreen_fini(struct radeon_device *rdev)
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+- evergreen_pcie_gart_fini(rdev);
+ uvd_v1_0_fini(rdev);
+ radeon_uvd_fini(rdev);
++ evergreen_pcie_gart_fini(rdev);
+ r600_vram_scratch_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
+index f26339028154..db0fa617e2f5 100644
+--- a/drivers/gpu/drm/radeon/ni_dpm.c
++++ b/drivers/gpu/drm/radeon/ni_dpm.c
+@@ -2592,7 +2592,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
+ if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ enable_sq_ramping = false;
+
+- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
++ if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ enable_sq_ramping = false;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index d7e7c25feaaf..b8db0d7b5089 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -216,7 +216,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
+ memcpy(&output, info->buffer.pointer, size);
+
+ /* TODO: check version? */
+- printk("ATPX version %u\n", output.version);
++ printk("ATPX version %u, functions 0x%08x\n",
++ output.version, output.function_bits);
+
+ radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index 4a3b3c55a568..7456ce186f29 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -500,6 +500,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+
+ radeon_vm_init(rdev, &fpriv->vm);
+
++ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
++ if (r)
++ return r;
++
+ /* map the ib pool buffer read only into
+ * virtual address space */
+ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+@@ -507,6 +511,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+ r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_SNOOPED);
++
++ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index 6d916fc93116..83936473f8e4 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -170,6 +170,8 @@ void radeon_uvd_fini(struct radeon_device *rdev)
+
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
++ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
++
+ release_firmware(rdev->uvd_fw);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 9f5846743c9e..99dd9d8fcf72 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1921,9 +1921,9 @@ void rv770_fini(struct radeon_device *rdev)
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+- rv770_pcie_gart_fini(rdev);
+ uvd_v1_0_fini(rdev);
+ radeon_uvd_fini(rdev);
++ rv770_pcie_gart_fini(rdev);
+ r600_vram_scratch_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
+index a7626358c95d..029b65e6c589 100644
+--- a/drivers/hwmon/max1668.c
++++ b/drivers/hwmon/max1668.c
+@@ -243,7 +243,7 @@ static ssize_t set_temp_min(struct device *dev,
+ data->temp_min[index] = clamp_val(temp/1000, -128, 127);
+ if (i2c_smbus_write_byte_data(client,
+ MAX1668_REG_LIML_WR(index),
+- data->temp_max[index]))
++ data->temp_min[index]))
+ count = -EIO;
+ mutex_unlock(&data->update_lock);
+
+diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
+index 41c64a43bcab..ac2d69e34c8c 100644
+--- a/drivers/iio/gyro/Kconfig
++++ b/drivers/iio/gyro/Kconfig
+@@ -70,7 +70,7 @@ config IIO_ST_GYRO_3AXIS
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics gyroscopes:
+- L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
++ L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
+
+ This driver can also be built as a module. If so, these modules
+ will be created:
+diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
+index f8f2bf84a5a2..c197360c450b 100644
+--- a/drivers/iio/gyro/st_gyro.h
++++ b/drivers/iio/gyro/st_gyro.h
+@@ -19,7 +19,6 @@
+ #define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
+ #define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
+ #define L3GD20_GYRO_DEV_NAME "l3gd20"
+-#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
+ #define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
+ #define LSM330_GYRO_DEV_NAME "lsm330_gyro"
+
+diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
+index e13c2b0bf3d1..46cb5ec2b066 100644
+--- a/drivers/iio/gyro/st_gyro_core.c
++++ b/drivers/iio/gyro/st_gyro_core.c
+@@ -167,11 +167,10 @@ static const struct st_sensors st_gyro_sensors[] = {
+ .wai = ST_GYRO_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = L3GD20_GYRO_DEV_NAME,
+- [1] = L3GD20H_GYRO_DEV_NAME,
+- [2] = LSM330D_GYRO_DEV_NAME,
+- [3] = LSM330DLC_GYRO_DEV_NAME,
+- [4] = L3G4IS_GYRO_DEV_NAME,
+- [5] = LSM330_GYRO_DEV_NAME,
++ [1] = LSM330D_GYRO_DEV_NAME,
++ [2] = LSM330DLC_GYRO_DEV_NAME,
++ [3] = L3G4IS_GYRO_DEV_NAME,
++ [4] = LSM330_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
+index 16b8b8d70bf1..23c12f361b05 100644
+--- a/drivers/iio/gyro/st_gyro_i2c.c
++++ b/drivers/iio/gyro/st_gyro_i2c.c
+@@ -55,7 +55,6 @@ static const struct i2c_device_id st_gyro_id_table[] = {
+ { LSM330DL_GYRO_DEV_NAME },
+ { LSM330DLC_GYRO_DEV_NAME },
+ { L3GD20_GYRO_DEV_NAME },
+- { L3GD20H_GYRO_DEV_NAME },
+ { L3G4IS_GYRO_DEV_NAME },
+ { LSM330_GYRO_DEV_NAME },
+ {},
+diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
+index 94763e25caf9..b4ad3be26687 100644
+--- a/drivers/iio/gyro/st_gyro_spi.c
++++ b/drivers/iio/gyro/st_gyro_spi.c
+@@ -54,7 +54,6 @@ static const struct spi_device_id st_gyro_id_table[] = {
+ { LSM330DL_GYRO_DEV_NAME },
+ { LSM330DLC_GYRO_DEV_NAME },
+ { L3GD20_GYRO_DEV_NAME },
+- { L3GD20H_GYRO_DEV_NAME },
+ { L3G4IS_GYRO_DEV_NAME },
+ { LSM330_GYRO_DEV_NAME },
+ {},
+diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
+index 7a04f54ef961..e7e12a5f5c2d 100644
+--- a/drivers/input/misc/arizona-haptics.c
++++ b/drivers/input/misc/arizona-haptics.c
+@@ -77,16 +77,14 @@ static void arizona_haptics_work(struct work_struct *work)
+ return;
+ }
+
++ mutex_unlock(dapm_mutex);
++
+ ret = snd_soc_dapm_sync(arizona->dapm);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
+ ret);
+- mutex_unlock(dapm_mutex);
+ return;
+ }
+-
+- mutex_unlock(dapm_mutex);
+-
+ } else {
+ /* This disable sequence will be a noop if already enabled */
+ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+@@ -99,16 +97,15 @@ static void arizona_haptics_work(struct work_struct *work)
+ return;
+ }
+
++ mutex_unlock(dapm_mutex);
++
+ ret = snd_soc_dapm_sync(arizona->dapm);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
+ ret);
+- mutex_unlock(dapm_mutex);
+ return;
+ }
+
+- mutex_unlock(dapm_mutex);
+-
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_HAPTICS_CONTROL_1,
+ ARIZONA_HAP_CTRL_MASK,
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 0046a619527d..24a60b9979ca 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -190,6 +190,9 @@
+ #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
+ #define CBAR_VMID_SHIFT 0
+ #define CBAR_VMID_MASK 0xff
++#define CBAR_S1_BPSHCFG_SHIFT 8
++#define CBAR_S1_BPSHCFG_MASK 3
++#define CBAR_S1_BPSHCFG_NSH 3
+ #define CBAR_S1_MEMATTR_SHIFT 12
+ #define CBAR_S1_MEMATTR_MASK 0xf
+ #define CBAR_S1_MEMATTR_WB 0xf
+@@ -392,7 +395,7 @@ struct arm_smmu_domain {
+ struct arm_smmu_cfg root_cfg;
+ phys_addr_t output_mask;
+
+- struct mutex lock;
++ spinlock_t lock;
+ };
+
+ static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+@@ -646,11 +649,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+ if (smmu->version == 1)
+ reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
+
+- /* Use the weakest memory type, so it is overridden by the pte */
+- if (stage1)
+- reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+- else
++ /*
++ * Use the weakest shareability/memory types, so they are
++ * overridden by the ttbcr/pte.
++ */
++ if (stage1) {
++ reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
++ (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
++ } else {
+ reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
++ }
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
+
+ if (smmu->version > 1) {
+@@ -897,7 +905,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
+ goto out_free_domain;
+ smmu_domain->root_cfg.pgd = pgd;
+
+- mutex_init(&smmu_domain->lock);
++ spin_lock_init(&smmu_domain->lock);
+ domain->priv = smmu_domain;
+ return 0;
+
+@@ -1134,7 +1142,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ * Sanity check the domain. We don't currently support domains
+ * that cross between different SMMU chains.
+ */
+- mutex_lock(&smmu_domain->lock);
++ spin_lock(&smmu_domain->lock);
+ if (!smmu_domain->leaf_smmu) {
+ /* Now that we have a master, we can finalise the domain */
+ ret = arm_smmu_init_domain_context(domain, dev);
+@@ -1149,7 +1157,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ dev_name(device_smmu->dev));
+ goto err_unlock;
+ }
+- mutex_unlock(&smmu_domain->lock);
++ spin_unlock(&smmu_domain->lock);
+
+ /* Looks ok, so add the device to the domain */
+ master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+@@ -1159,7 +1167,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ return arm_smmu_domain_add_master(smmu_domain, master);
+
+ err_unlock:
+- mutex_unlock(&smmu_domain->lock);
++ spin_unlock(&smmu_domain->lock);
+ return ret;
+ }
+
+@@ -1206,7 +1214,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
+
+ if (pmd_none(*pmd)) {
+ /* Allocate a new set of tables */
+- pgtable_t table = alloc_page(PGALLOC_GFP);
++ pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
+ if (!table)
+ return -ENOMEM;
+
+@@ -1308,9 +1316,14 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
+
+ #ifndef __PAGETABLE_PMD_FOLDED
+ if (pud_none(*pud)) {
+- pmd = pmd_alloc_one(NULL, addr);
++ pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
+ if (!pmd)
+ return -ENOMEM;
++
++ pud_populate(NULL, pud, pmd);
++ arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
++
++ pmd += pmd_index(addr);
+ } else
+ #endif
+ pmd = pmd_offset(pud, addr);
+@@ -1319,8 +1332,6 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
+ next = pmd_addr_end(addr, end);
+ ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
+ flags, stage);
+- pud_populate(NULL, pud, pmd);
+- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+ phys += next - addr;
+ } while (pmd++, addr = next, addr < end);
+
+@@ -1337,9 +1348,14 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
+
+ #ifndef __PAGETABLE_PUD_FOLDED
+ if (pgd_none(*pgd)) {
+- pud = pud_alloc_one(NULL, addr);
++ pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
+ if (!pud)
+ return -ENOMEM;
++
++ pgd_populate(NULL, pgd, pud);
++ arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
++
++ pud += pud_index(addr);
+ } else
+ #endif
+ pud = pud_offset(pgd, addr);
+@@ -1348,8 +1364,6 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
+ next = pud_addr_end(addr, end);
+ ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
+ flags, stage);
+- pgd_populate(NULL, pud, pgd);
+- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+ phys += next - addr;
+ } while (pud++, addr = next, addr < end);
+
+@@ -1388,7 +1402,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
+ if (paddr & ~output_mask)
+ return -ERANGE;
+
+- mutex_lock(&smmu_domain->lock);
++ spin_lock(&smmu_domain->lock);
+ pgd += pgd_index(iova);
+ end = iova + size;
+ do {
+@@ -1404,7 +1418,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
+ } while (pgd++, iova != end);
+
+ out_unlock:
+- mutex_unlock(&smmu_domain->lock);
++ spin_unlock(&smmu_domain->lock);
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
+index 92c41ab4dbfd..2cb474ad8809 100644
+--- a/drivers/irqchip/irq-metag-ext.c
++++ b/drivers/irqchip/irq-metag-ext.c
+@@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct irq_data *data,
+ * one cpu (the interrupt code doesn't support it), so we just
+ * pick the first cpu we find in 'cpumask'.
+ */
+- cpu = cpumask_any(cpumask);
++ cpu = cpumask_any_and(cpumask, cpu_online_mask);
+ thread = cpu_2_hwthread_id[cpu];
+
+ metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
+diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
+index 8e94d7a3b20d..c16c186d97d3 100644
+--- a/drivers/irqchip/irq-metag.c
++++ b/drivers/irqchip/irq-metag.c
+@@ -201,7 +201,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
+ * one cpu (the interrupt code doesn't support it), so we just
+ * pick the first cpu we find in 'cpumask'.
+ */
+- cpu = cpumask_any(cpumask);
++ cpu = cpumask_any_and(cpumask, cpu_online_mask);
+ thread = cpu_2_hwthread_id[cpu];
+
+ metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
+diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
+index e51d40031884..8e41be62812e 100644
+--- a/drivers/irqchip/irq-orion.c
++++ b/drivers/irqchip/irq-orion.c
+@@ -111,7 +111,8 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
+ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
+ {
+ struct irq_domain *d = irq_get_handler_data(irq);
+- struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq);
++
++ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
+ u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
+ gc->mask_cache;
+
+@@ -123,6 +124,19 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
+ }
+ }
+
++/*
++ * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
++ * To avoid interrupt events on stale irqs, we clear them before unmask.
++ */
++static unsigned int orion_bridge_irq_startup(struct irq_data *d)
++{
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
++ ct->chip.irq_ack(d);
++ ct->chip.irq_unmask(d);
++ return 0;
++}
++
+ static int __init orion_bridge_irq_init(struct device_node *np,
+ struct device_node *parent)
+ {
+@@ -143,7 +157,7 @@ static int __init orion_bridge_irq_init(struct device_node *np,
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
+- handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
++ handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: unable to alloc irq domain gc\n", np->name);
+ return ret;
+@@ -176,12 +190,14 @@ static int __init orion_bridge_irq_init(struct device_node *np,
+
+ gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
+ gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
++ gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+- /* mask all interrupts */
++ /* mask and clear all interrupts */
+ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
++ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
+
+ irq_set_handler_data(irq, domain);
+ irq_set_chained_handler(irq, orion_bridge_irq_handler);
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 799e479db93b..709ce1b2582e 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1623,8 +1623,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+- if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+- r = scsi_verify_blk_ioctl(NULL, cmd);
++ if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
++ int err = scsi_verify_blk_ioctl(NULL, cmd);
++ if (err)
++ r = err;
++ }
+
+ if (r == -ENOTCONN && !fatal_signal_pending(current))
+ queue_work(kmultipathd, &m->process_queued_ios);
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 7da347665552..3bb4506582a9 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1489,6 +1489,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
+ return r;
+ }
+
++bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
++{
++ bool r = false;
++ struct dm_thin_device *td, *tmp;
++
++ down_read(&pmd->root_lock);
++ list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
++ if (td->changed) {
++ r = td->changed;
++ break;
++ }
++ }
++ up_read(&pmd->root_lock);
++
++ return r;
++}
++
+ bool dm_thin_aborted_changes(struct dm_thin_device *td)
+ {
+ bool r;
+diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
+index 2edf5dbac76a..c6d123bb768a 100644
+--- a/drivers/md/dm-thin-metadata.h
++++ b/drivers/md/dm-thin-metadata.h
+@@ -161,6 +161,8 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
+ */
+ bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
+
++bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
++
+ bool dm_thin_aborted_changes(struct dm_thin_device *td);
+
+ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index bc0c97d7921e..e9587101b04f 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1354,7 +1354,8 @@ static void process_deferred_bios(struct pool *pool)
+ bio_list_init(&pool->deferred_flush_bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+- if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
++ if (bio_list_empty(&bios) &&
++ !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
+ return;
+
+ if (commit(pool)) {
+@@ -2847,6 +2848,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
+
+ if (get_pool_mode(tc->pool) == PM_FAIL) {
+ ti->error = "Couldn't open thin device, Pool is in fail mode";
++ r = -EINVAL;
+ goto bad_thin_open;
+ }
+
+@@ -2858,7 +2860,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
+
+ r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
+ if (r)
+- goto bad_thin_open;
++ goto bad_target_max_io_len;
+
+ ti->num_flush_bios = 1;
+ ti->flush_supported = true;
+@@ -2879,6 +2881,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
+
+ return 0;
+
++bad_target_max_io_len:
++ dm_pool_close_thin_device(tc->td);
+ bad_thin_open:
+ __pool_dec(tc->pool);
+ bad_pool_lookup:
+diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
+index 13af7e50021e..8103e4362132 100644
+--- a/drivers/mfd/da9055-i2c.c
++++ b/drivers/mfd/da9055-i2c.c
+@@ -53,17 +53,25 @@ static int da9055_i2c_remove(struct i2c_client *i2c)
+ return 0;
+ }
+
++/*
++ * DO NOT change the device Ids. The naming is intentionally specific as both
++ * the PMIC and CODEC parts of this chip are instantiated separately as I2C
++ * devices (both have configurable I2C addresses, and are to all intents and
++ * purposes separate). As a result there are specific DA9055 ids for PMIC
++ * and CODEC, which must be different to operate together.
++ */
+ static struct i2c_device_id da9055_i2c_id[] = {
+- {"da9055", 0},
++ {"da9055-pmic", 0},
+ { }
+ };
++MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
+
+ static struct i2c_driver da9055_i2c_driver = {
+ .probe = da9055_i2c_probe,
+ .remove = da9055_i2c_remove,
+ .id_table = da9055_i2c_id,
+ .driver = {
+- .name = "da9055",
++ .name = "da9055-pmic",
+ .owner = THIS_MODULE,
+ },
+ };
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index 8c33f943abbf..b66cec93ebb3 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -660,7 +660,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
+ goto err;
+
+ cb->fop_type = MEI_FOP_READ;
+- cl->read_cb = cb;
+ if (dev->hbuf_is_ready) {
+ dev->hbuf_is_ready = false;
+ if (mei_hbm_cl_flow_control_req(dev, cl)) {
+@@ -671,6 +670,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
+ } else {
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
++
++ cl->read_cb = cb;
++
+ return rets;
+ err:
+ mei_io_cb_free(cb);
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 0d8f427ade93..b3c22527b938 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -1861,8 +1861,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
+ BOND_AD_INFO(bond).agg_select_timer = timeout;
+ }
+
+-static u16 aggregator_identifier;
+-
+ /**
+ * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
+ * @bond: bonding struct to work on
+@@ -1876,7 +1874,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+ if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+ bond->dev->dev_addr)) {
+
+- aggregator_identifier = 0;
++ BOND_AD_INFO(bond).aggregator_identifier = 0;
+
+ BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
+ BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
+@@ -1947,7 +1945,7 @@ int bond_3ad_bind_slave(struct slave *slave)
+ ad_initialize_agg(aggregator);
+
+ aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
+- aggregator->aggregator_identifier = (++aggregator_identifier);
++ aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
+ aggregator->slave = slave;
+ aggregator->is_active = 0;
+ aggregator->num_of_ports = 0;
+diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
+index 5d91ad0cc041..1f081c89753f 100644
+--- a/drivers/net/bonding/bond_3ad.h
++++ b/drivers/net/bonding/bond_3ad.h
+@@ -253,6 +253,7 @@ struct ad_system {
+ struct ad_bond_info {
+ struct ad_system system; /* 802.3ad system structure */
+ u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
++ u16 aggregator_identifier;
+ };
+
+ struct ad_slave_info {
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 1870c4731a57..539239d8e9ab 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -324,19 +324,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ }
+
+ if (!priv->echo_skb[idx]) {
+- struct sock *srcsk = skb->sk;
+
+- if (atomic_read(&skb->users) != 1) {
+- struct sk_buff *old_skb = skb;
+-
+- skb = skb_clone(old_skb, GFP_ATOMIC);
+- kfree_skb(old_skb);
+- if (!skb)
+- return;
+- } else
+- skb_orphan(skb);
+-
+- skb->sk = srcsk;
++ skb = can_create_echo_skb(skb);
++ if (!skb)
++ return;
+
+ /* make settings for echo to reduce code in irq context */
+ skb->protocol = htons(ETH_P_CAN);
+diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
+index 36bd6fa1c7f3..db2ff0340388 100644
+--- a/drivers/net/can/janz-ican3.c
++++ b/drivers/net/can/janz-ican3.c
+@@ -19,6 +19,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
++#include <linux/can/skb.h>
+ #include <linux/can/error.h>
+
+ #include <linux/mfd/janz.h>
+@@ -1134,20 +1135,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
+ */
+ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
+ {
+- struct sock *srcsk = skb->sk;
+-
+- if (atomic_read(&skb->users) != 1) {
+- struct sk_buff *old_skb = skb;
+-
+- skb = skb_clone(old_skb, GFP_ATOMIC);
+- kfree_skb(old_skb);
+- if (!skb)
+- return;
+- } else {
+- skb_orphan(skb);
+- }
+-
+- skb->sk = srcsk;
++ skb = can_create_echo_skb(skb);
++ if (!skb)
++ return;
+
+ /* save this skb for tx interrupt echo handling */
+ skb_queue_tail(&mod->echoq, skb);
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 4b2d5ed62b11..cc3df8aebb87 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -474,6 +474,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
+ return err;
+
+ dev->nchannels = msg.u.cardinfo.nchannels;
++ if (dev->nchannels > MAX_NET_DEVICES)
++ return -EINVAL;
+
+ return 0;
+ }
+diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
+index 0a2a5ee79a17..4e94057ef5cf 100644
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -46,6 +46,7 @@
+ #include <linux/if_ether.h>
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
++#include <linux/can/skb.h>
+ #include <linux/slab.h>
+ #include <net/rtnetlink.h>
+
+@@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
+ stats->rx_packets++;
+ stats->rx_bytes += cfd->len;
+ }
+- kfree_skb(skb);
++ consume_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* perform standard echo handling for CAN network interfaces */
+
+ if (loop) {
+- struct sock *srcsk = skb->sk;
+
+- skb = skb_share_check(skb, GFP_ATOMIC);
++ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return NETDEV_TX_OK;
+
+ /* receive with packet counting */
+- skb->sk = srcsk;
+ vcan_rx(skb, dev);
+ } else {
+ /* no looped packets => no counting */
+- kfree_skb(skb);
++ consume_skb(skb);
+ }
+ return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 3ff1f272c6c8..c0acf98d1ea5 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -13956,12 +13956,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+
+ tg3_netif_stop(tp);
+
++ tg3_set_mtu(dev, tp, new_mtu);
++
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+
+- tg3_set_mtu(dev, tp, new_mtu);
+-
+ /* Reset PHY, otherwise the read DMA engine will be in a mode that
+ * breaks all requests to 256 bytes.
+ */
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 386a3df53678..20643833f0e6 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -918,7 +918,8 @@ static const struct driver_info ax88178_info = {
+ .status = asix_status,
+ .link_reset = ax88178_link_reset,
+ .reset = ax88178_reset,
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
++ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ };
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 846cc19c04f2..5e2bac650bd8 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1120,6 +1120,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ u16 hdr_off;
+ u32 *pkt_hdr;
+
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ skb_trim(skb, skb->len - 4);
+ memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
+ le32_to_cpus(&rx_hdr);
+diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
+index a7e3f4e55bf3..82ab61d62804 100644
+--- a/drivers/net/usb/gl620a.c
++++ b/drivers/net/usb/gl620a.c
+@@ -86,6 +86,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ u32 size;
+ u32 count;
+
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ header = (struct gl_header *) skb->data;
+
+ // get the packet count of the received skb
+diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
+index 03832d3780aa..9237c45883cd 100644
+--- a/drivers/net/usb/mcs7830.c
++++ b/drivers/net/usb/mcs7830.c
+@@ -529,8 +529,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
+ u8 status;
+
+- if (skb->len == 0) {
+- dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len) {
++ dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
+ return 0;
+ }
+
+diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
+index 93e0716a118c..7f4a3a41c4f8 100644
+--- a/drivers/net/usb/net1080.c
++++ b/drivers/net/usb/net1080.c
+@@ -366,6 +366,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ struct nc_trailer *trailer;
+ u16 hdr_len, packet_len;
+
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ if (!(skb->len & 0x01)) {
+ netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
+ skb->len, dev->net->hard_header_len, dev->hard_mtu,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 818ce90185b5..558469fda3b7 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
+ __be16 proto;
+
+- /* usbnet rx_complete guarantees that skb->len is at least
+- * hard_header_len, so we can inspect the dest address without
+- * checking skb->len
+- */
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ proto = htons(ETH_P_IP);
+@@ -710,6 +710,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
+ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
++ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
+ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
+ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
+index cc49aac70224..691fca4e4c2d 100644
+--- a/drivers/net/usb/rndis_host.c
++++ b/drivers/net/usb/rndis_host.c
+@@ -494,6 +494,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
+ */
+ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ /* peripheral may have batched packets to us... */
+ while (likely(skb->len)) {
+ struct rndis_data_hdr *hdr = (void *)skb->data;
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 66ebbacf066f..12afae0451e6 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -2108,6 +2108,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
+
+ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ while (skb->len > 0) {
+ u32 rx_cmd_a, rx_cmd_b, align_count, size;
+ struct sk_buff *ax_skb;
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 3f38ba868f61..9375b8c6410b 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1725,6 +1725,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
+
+ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ while (skb->len > 0) {
+ u32 header, align_count;
+ struct sk_buff *ax_skb;
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index aba04f561760..a91fa49b81c3 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -543,17 +543,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
+ }
+ // else network stack removes extra byte if we forced a short packet
+
+- if (skb->len) {
+- /* all data was already cloned from skb inside the driver */
+- if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+- dev_kfree_skb_any(skb);
+- else
+- usbnet_skb_return(dev, skb);
++ /* all data was already cloned from skb inside the driver */
++ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
++ goto done;
++
++ if (skb->len < ETH_HLEN) {
++ dev->net->stats.rx_errors++;
++ dev->net->stats.rx_length_errors++;
++ netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
++ } else {
++ usbnet_skb_return(dev, skb);
+ return;
+ }
+
+- netif_dbg(dev, rx_err, dev->net, "drop\n");
+- dev->net->stats.rx_errors++;
+ done:
+ skb_queue_tail(&dev->done, skb);
+ }
+@@ -575,13 +577,6 @@ static void rx_complete (struct urb *urb)
+ switch (urb_status) {
+ /* success */
+ case 0:
+- if (skb->len < dev->net->hard_header_len) {
+- state = rx_cleanup;
+- dev->net->stats.rx_errors++;
+- dev->net->stats.rx_length_errors++;
+- netif_dbg(dev, rx_err, dev->net,
+- "rx length %d\n", skb->len);
+- }
+ break;
+
+ /* stalls need manual reset. this is rare ... except that
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+index 56aee067f324..a6ad79f61bf9 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
++++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+@@ -15,6 +15,8 @@
+ #ifndef RTL8187_H
+ #define RTL8187_H
+
++#include <linux/cache.h>
++
+ #include "rtl818x.h"
+ #include "leds.h"
+
+@@ -139,7 +141,10 @@ struct rtl8187_priv {
+ u8 aifsn[4];
+ u8 rfkill_mask;
+ struct {
+- __le64 buf;
++ union {
++ __le64 buf;
++ u8 dummy1[L1_CACHE_BYTES];
++ } ____cacheline_aligned;
+ struct sk_buff_head queue;
+ } b_tx_status; /* This queue is used by both -b and non-b devices */
+ struct mutex io_mutex;
+@@ -147,7 +152,8 @@ struct rtl8187_priv {
+ u8 bits8;
+ __le16 bits16;
+ __le32 bits32;
+- } *io_dmabuf;
++ u8 dummy2[L1_CACHE_BYTES];
++ } *io_dmabuf ____cacheline_aligned;
+ bool rfkill_off;
+ u16 seqno;
+ };
+diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
+index 0d81f766fd0f..a56e9b3c96c2 100644
+--- a/drivers/net/wireless/rtlwifi/ps.c
++++ b/drivers/net/wireless/rtlwifi/ps.c
+@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
+
+ /*<2> Enable Adapter */
+ if (rtlpriv->cfg->ops->hw_init(hw))
+- return 1;
++ return false;
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ /*<3> Enable Interrupt */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+index a82b30a1996c..2eb0b38384dd 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+@@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
+ bool is92c;
+ int err;
+ u8 tmp_u1b;
++ unsigned long flags;
+
+ rtlpci->being_init_adapter = true;
++
++ /* Since this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
++
+ rtlpriv->intf_ops->disable_aspm(hw);
+ rtstatus = _rtl92ce_init_mac(hw);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ err = 1;
+- return err;
++ goto exit;
+ }
+
+ err = rtl92c_download_fw(hw);
+@@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
+ err = 1;
+- return err;
++ goto exit;
+ }
+
+ rtlhal->last_hmeboxnum = 0;
+@@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+ }
+ rtl92c_dm_init(hw);
++exit:
++ local_irq_restore(flags);
+ rtlpci->being_init_adapter = false;
+ return err;
+ }
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index 729d5a101d62..1953c1680986 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -56,14 +56,6 @@
+ #define PCIE_DEBUG_CTRL 0x1a60
+ #define PCIE_DEBUG_SOFT_RESET BIT(20)
+
+-/*
+- * This product ID is registered by Marvell, and used when the Marvell
+- * SoC is not the root complex, but an endpoint on the PCIe bus. It is
+- * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI
+- * bridge.
+- */
+-#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846
+-
+ /* PCI configuration space of a PCI-to-PCI bridge */
+ struct mvebu_sw_pci_bridge {
+ u16 vendor;
+@@ -357,7 +349,8 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
+
+ bridge->class = PCI_CLASS_BRIDGE_PCI;
+ bridge->vendor = PCI_VENDOR_ID_MARVELL;
+- bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID;
++ bridge->device = readl(port->base + PCIE_DEV_ID_OFF) >> 16;
++ bridge->revision = readl(port->base + PCIE_DEV_REV_OFF) & 0xff;
+ bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
+ bridge->cache_line_size = 0x10;
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index bdd64b1b4817..863bc4bb4806 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1120,6 +1120,8 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
+ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ {
+ int err;
++ u16 cmd;
++ u8 pin;
+
+ err = pci_set_power_state(dev, PCI_D0);
+ if (err < 0 && err != -EIO)
+@@ -1129,6 +1131,14 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ return err;
+ pci_fixup_device(pci_fixup_enable, dev);
+
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++ if (pin) {
++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ if (cmd & PCI_COMMAND_INTX_DISABLE)
++ pci_write_config_word(dev, PCI_COMMAND,
++ cmd & ~PCI_COMMAND_INTX_DISABLE);
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
+index b9f2653e4ef9..b5b11a545e14 100644
+--- a/drivers/regulator/da9063-regulator.c
++++ b/drivers/regulator/da9063-regulator.c
+@@ -1,3 +1,4 @@
++
+ /*
+ * Regulator driver for DA9063 PMIC series
+ *
+@@ -60,7 +61,8 @@ struct da9063_regulator_info {
+ .desc.ops = &da9063_ldo_ops, \
+ .desc.min_uV = (min_mV) * 1000, \
+ .desc.uV_step = (step_mV) * 1000, \
+- .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \
++ .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \
++ + (DA9063_V##regl_name##_BIAS)), \
+ .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
+ .desc.enable_mask = DA9063_LDO_EN, \
+ .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 3bb0a1d1622a..e4fa6fb7e72a 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -3186,7 +3186,8 @@ restart:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
+ "SRR cmd %p (se_cmd %p, tag %d, op %x), "
+ "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
+- se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
++ se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
++ cmd->sg_cnt, cmd->offset);
+
+ qlt_handle_srr(vha, sctio, imm);
+
+diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
+index 98ac020bf912..69fd236345cb 100644
+--- a/drivers/staging/android/binder.c
++++ b/drivers/staging/android/binder.c
+@@ -2903,7 +2903,7 @@ static int binder_node_release(struct binder_node *node, int refs)
+ refs++;
+
+ if (!ref->death)
+- goto out;
++ continue;
+
+ death++;
+
+@@ -2916,7 +2916,6 @@ static int binder_node_release(struct binder_node *node, int refs)
+ BUG();
+ }
+
+-out:
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "node %d now dead, refs %d, death %d\n",
+ node->debug_id, refs, death);
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 606d6f059972..85f692ddd992 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -55,6 +55,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ /****** 8188EUS ********/
+ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
++ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+ {} /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 94c26acfd5a4..938426ae30de 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -106,7 +106,7 @@ static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
+
+ do {
+ /* flush any pending transfer */
+- hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
++ hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
+ while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
+ cpu_relax();
+ } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
+@@ -206,7 +206,7 @@ static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
+ if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+- hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
++ hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
+
+ while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
+ cpu_relax();
+diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
+index c58fcf1ebe41..95d163cfb626 100644
+--- a/drivers/usb/gadget/bcm63xx_udc.c
++++ b/drivers/usb/gadget/bcm63xx_udc.c
+@@ -361,24 +361,30 @@ static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+ bcm_writel(val, udc->iudma_regs + off);
+ }
+
+-static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
++static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
+ {
+- return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
++ return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
++ (ENETDMA_CHAN_WIDTH * chan));
+ }
+
+-static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
++static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
++ int chan)
+ {
+- bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
++ bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
++ (ENETDMA_CHAN_WIDTH * chan));
+ }
+
+-static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
++static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
+ {
+- return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
++ return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
++ (ENETDMA_CHAN_WIDTH * chan));
+ }
+
+-static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
++static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
++ int chan)
+ {
+- bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
++ bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
++ (ENETDMA_CHAN_WIDTH * chan));
+ }
+
+ static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
+@@ -639,7 +645,7 @@ static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
+ } while (!last_bd);
+
+ usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
+- ENETDMAC_CHANCFG_REG(iudma->ch_idx));
++ ENETDMAC_CHANCFG_REG, iudma->ch_idx);
+ }
+
+ /**
+@@ -695,9 +701,9 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
+ bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
+
+ /* stop DMA, then wait for the hardware to wrap up */
+- usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
++ usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
+
+- while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
++ while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
+ ENETDMAC_CHANCFG_EN_MASK) {
+ udelay(1);
+
+@@ -714,10 +720,10 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
+ dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
+ ch_idx);
+ usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
+- ENETDMAC_CHANCFG_REG(ch_idx));
++ ENETDMAC_CHANCFG_REG, ch_idx);
+ }
+ }
+- usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
++ usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
+
+ /* don't leave "live" HW-owned entries for the next guy to step on */
+ for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
+@@ -729,11 +735,11 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
+
+ /* set up IRQs, UBUS burst size, and BD base for this channel */
+ usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
+- ENETDMAC_IRMASK_REG(ch_idx));
+- usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
++ ENETDMAC_IRMASK_REG, ch_idx);
++ usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
+
+- usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
+- usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
++ usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
++ usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
+ }
+
+ /**
+@@ -2036,7 +2042,7 @@ static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
+ spin_lock(&udc->lock);
+
+ usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
+- ENETDMAC_IR_REG(iudma->ch_idx));
++ ENETDMAC_IR_REG, iudma->ch_idx);
+ bep = iudma->bep;
+ rc = iudma_read(udc, iudma);
+
+@@ -2176,18 +2182,18 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
+ seq_printf(s, " [ep%d]:\n",
+ max_t(int, iudma_defaults[ch_idx].ep_num, 0));
+ seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
+- usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
+- usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
+- usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
+- usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
++ usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
++ usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
++ usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
++ usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
+
+- sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
+- sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
++ sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
++ sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
+ seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
+- usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
++ usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
+ sram2 >> 16, sram2 & 0xffff,
+ sram3 >> 16, sram3 & 0xffff,
+- usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
++ usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
+ seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
+ iudma->n_bds);
+
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 86ab9fd9fe9e..784f6242b70e 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -682,8 +682,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
+ u32 status, masked_status, pcd_status = 0, cmd;
+ int bh;
++ unsigned long flags;
+
+- spin_lock (&ehci->lock);
++ /*
++ * For threadirqs option we use spin_lock_irqsave() variant to prevent
++ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
++ * in interrupt context even when threadirqs is specified. We can go
++ * back to spin_lock() variant when hrtimer callbacks become threaded.
++ */
++ spin_lock_irqsave(&ehci->lock, flags);
+
+ status = ehci_readl(ehci, &ehci->regs->status);
+
+@@ -701,7 +708,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+
+ /* Shared IRQ? */
+ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
+- spin_unlock(&ehci->lock);
++ spin_unlock_irqrestore(&ehci->lock, flags);
+ return IRQ_NONE;
+ }
+
+@@ -819,7 +826,7 @@ dead:
+
+ if (bh)
+ ehci_work (ehci);
+- spin_unlock (&ehci->lock);
++ spin_unlock_irqrestore(&ehci->lock, flags);
+ if (pcd_status)
+ usb_hcd_poll_rh_status(hcd);
+ return IRQ_HANDLED;
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 835fc0844a66..1bb85bee2625 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -238,6 +238,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+ int port;
+ int mask;
+ int changed;
++ bool fs_idle_delay;
+
+ ehci_dbg(ehci, "suspend root hub\n");
+
+@@ -272,6 +273,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+ ehci->bus_suspended = 0;
+ ehci->owned_ports = 0;
+ changed = 0;
++ fs_idle_delay = false;
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *reg = &ehci->regs->port_status [port];
+@@ -300,16 +302,32 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+ }
+
+ if (t1 != t2) {
++ /*
++ * On some controllers, Wake-On-Disconnect will
++ * generate false wakeup signals until the bus
++ * switches over to full-speed idle. For their
++ * sake, add a delay if we need one.
++ */
++ if ((t2 & PORT_WKDISC_E) &&
++ ehci_port_speed(ehci, t2) ==
++ USB_PORT_STAT_HIGH_SPEED)
++ fs_idle_delay = true;
+ ehci_writel(ehci, t2, reg);
+ changed = 1;
+ }
+ }
++ spin_unlock_irq(&ehci->lock);
++
++ if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
++ /*
++ * Wait for HCD to enter low-power mode or for the bus
++ * to switch to full-speed idle.
++ */
++ usleep_range(5000, 5500);
++ }
+
+ if (changed && ehci->has_tdi_phy_lpm) {
+- spin_unlock_irq(&ehci->lock);
+- msleep(5); /* 5 ms for HCD to enter low-power mode */
+ spin_lock_irq(&ehci->lock);
+-
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
+@@ -322,8 +340,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+ port, (t3 & HOSTPC_PHCD) ?
+ "succeeded" : "failed");
+ }
++ spin_unlock_irq(&ehci->lock);
+ }
+- spin_unlock_irq(&ehci->lock);
+
+ /* Apparently some devices need a >= 1-uframe delay here */
+ if (ehci->bus_suspended)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index eea9e7b6af4c..b7f715fead15 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -908,6 +908,8 @@ static struct usb_device_id id_table_combined [] = {
+ /* Crucible Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
++ /* Cressi Devices */
++ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 1e2d369df86e..e599fbfcde5f 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1320,3 +1320,9 @@
+ * Manufacturer: Smart GSM Team
+ */
+ #define FTDI_Z3X_PID 0x0011
++
++/*
++ * Product: Cressi PC Interface
++ * Manufacturer: Cressi
++ */
++#define FTDI_CRESSI_PID 0x87d0
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 216d20affba8..68fc9fe65936 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1526,7 +1526,8 @@ static const struct usb_device_id option_ids[] = {
+ /* Cinterion */
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 831eb4fd197d..b12176f2013c 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -70,7 +70,12 @@ enum {
+ };
+
+ struct vhost_net_ubuf_ref {
+- struct kref kref;
++ /* refcount follows semantics similar to kref:
++ * 0: object is released
++ * 1: no outstanding ubufs
++ * >1: outstanding ubufs
++ */
++ atomic_t refcount;
+ wait_queue_head_t wait;
+ struct vhost_virtqueue *vq;
+ };
+@@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
+ vhost_net_zcopy_mask |= 0x1 << vq;
+ }
+
+-static void vhost_net_zerocopy_done_signal(struct kref *kref)
+-{
+- struct vhost_net_ubuf_ref *ubufs;
+-
+- ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
+- wake_up(&ubufs->wait);
+-}
+-
+ static struct vhost_net_ubuf_ref *
+ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
+ {
+@@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
+ ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
+ if (!ubufs)
+ return ERR_PTR(-ENOMEM);
+- kref_init(&ubufs->kref);
++ atomic_set(&ubufs->refcount, 1);
+ init_waitqueue_head(&ubufs->wait);
+ ubufs->vq = vq;
+ return ubufs;
+ }
+
+-static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
++static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+ {
+- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
++ int r = atomic_sub_return(1, &ubufs->refcount);
++ if (unlikely(!r))
++ wake_up(&ubufs->wait);
++ return r;
+ }
+
+ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
+ {
+- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+- wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
++ vhost_net_ubuf_put(ubufs);
++ wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
+ }
+
+ static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
+@@ -306,22 +306,21 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
+ {
+ struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
+ struct vhost_virtqueue *vq = ubufs->vq;
+- int cnt = atomic_read(&ubufs->kref.refcount);
++ int cnt;
+
+ /* set len to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = success ?
+ VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+- vhost_net_ubuf_put(ubufs);
++ cnt = vhost_net_ubuf_put(ubufs);
+
+ /*
+ * Trigger polling thread if guest stopped submitting new buffers:
+- * in this case, the refcount after decrement will eventually reach 1
+- * so here it is 2.
++ * in this case, the refcount after decrement will eventually reach 1.
+ * We also trigger polling periodically after each 16 packets
+ * (the value 16 here is more or less arbitrary, it's tuned to trigger
+ * less than 10% of times).
+ */
+- if (cnt <= 2 || !(cnt % 16))
++ if (cnt <= 1 || !(cnt % 16))
+ vhost_poll_queue(&vq->poll);
+ }
+
+@@ -420,7 +419,7 @@ static void handle_tx(struct vhost_net *net)
+ msg.msg_control = ubuf;
+ msg.msg_controllen = sizeof(ubuf);
+ ubufs = nvq->ubufs;
+- kref_get(&ubufs->kref);
++ atomic_inc(&ubufs->refcount);
+ nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
+ } else {
+ msg.msg_control = NULL;
+@@ -785,7 +784,7 @@ static void vhost_net_flush(struct vhost_net *n)
+ vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
+ mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
+ n->tx_flush = false;
+- kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
++ atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
+ mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
+ }
+ }
+diff --git a/fs/attr.c b/fs/attr.c
+index 1449adb14ef6..8dd5825ec708 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -182,11 +182,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
+ return -EPERM;
+ }
+
+- if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
+- if (attr->ia_size != inode->i_size)
+- inode_inc_iversion(inode);
+- }
+-
+ if ((ia_valid & ATTR_MODE)) {
+ umode_t amode = attr->ia_mode;
+ /* Flag setting protected by i_mutex */
+diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
+index 6dea2b90b4d5..76273c1d26a6 100644
+--- a/fs/bio-integrity.c
++++ b/fs/bio-integrity.c
+@@ -458,7 +458,7 @@ static int bio_integrity_verify(struct bio *bio)
+ bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
+ bix.sector_size = bi->sector_size;
+
+- bio_for_each_segment(bv, bio, i) {
++ bio_for_each_segment_all(bv, bio, i) {
+ void *kaddr = kmap_atomic(bv->bv_page);
+ bix.data_buf = kaddr + bv->bv_offset;
+ bix.data_size = bv->bv_len;
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 6aad98cb343f..6e9ff8fac75a 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -1012,6 +1012,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
+ bytes = min(bytes, working_bytes);
+ kaddr = kmap_atomic(page_out);
+ memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
++ if (*pg_index == (vcnt - 1) && *pg_offset == 0)
++ memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(page_out);
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index b544a44d696e..c1123ecde6c9 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -39,7 +39,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
+ struct extent_buffer *src_buf);
+ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
+ int level, int slot);
+-static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
++static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb);
+ static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
+
+@@ -475,6 +475,8 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+ * the index is the shifted logical of the *new* root node for root replace
+ * operations, or the shifted logical of the affected block for all other
+ * operations.
++ *
++ * Note: must be called with write lock (tree_mod_log_write_lock).
+ */
+ static noinline int
+ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
+@@ -483,24 +485,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
+ struct rb_node **new;
+ struct rb_node *parent = NULL;
+ struct tree_mod_elem *cur;
+- int ret = 0;
+
+ BUG_ON(!tm);
+
+- tree_mod_log_write_lock(fs_info);
+- if (list_empty(&fs_info->tree_mod_seq_list)) {
+- tree_mod_log_write_unlock(fs_info);
+- /*
+- * Ok we no longer care about logging modifications, free up tm
+- * and return 0. Any callers shouldn't be using tm after
+- * calling tree_mod_log_insert, but if they do we can just
+- * change this to return a special error code to let the callers
+- * do their own thing.
+- */
+- kfree(tm);
+- return 0;
+- }
+-
+ spin_lock(&fs_info->tree_mod_seq_lock);
+ tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+@@ -518,18 +505,13 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
+ new = &((*new)->rb_left);
+ else if (cur->seq > tm->seq)
+ new = &((*new)->rb_right);
+- else {
+- ret = -EEXIST;
+- kfree(tm);
+- goto out;
+- }
++ else
++ return -EEXIST;
+ }
+
+ rb_link_node(&tm->node, parent, new);
+ rb_insert_color(&tm->node, tm_root);
+-out:
+- tree_mod_log_write_unlock(fs_info);
+- return ret;
++ return 0;
+ }
+
+ /*
+@@ -545,19 +527,38 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
+ return 1;
+ if (eb && btrfs_header_level(eb) == 0)
+ return 1;
++
++ tree_mod_log_write_lock(fs_info);
++ if (list_empty(&(fs_info)->tree_mod_seq_list)) {
++ tree_mod_log_write_unlock(fs_info);
++ return 1;
++ }
++
+ return 0;
+ }
+
+-static inline int
+-__tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
+- struct extent_buffer *eb, int slot,
+- enum mod_log_op op, gfp_t flags)
++/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
++static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
++ struct extent_buffer *eb)
++{
++ smp_mb();
++ if (list_empty(&(fs_info)->tree_mod_seq_list))
++ return 0;
++ if (eb && btrfs_header_level(eb) == 0)
++ return 0;
++
++ return 1;
++}
++
++static struct tree_mod_elem *
++alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
++ enum mod_log_op op, gfp_t flags)
+ {
+ struct tree_mod_elem *tm;
+
+ tm = kzalloc(sizeof(*tm), flags);
+ if (!tm)
+- return -ENOMEM;
++ return NULL;
+
+ tm->index = eb->start >> PAGE_CACHE_SHIFT;
+ if (op != MOD_LOG_KEY_ADD) {
+@@ -567,8 +568,9 @@ __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
+ tm->op = op;
+ tm->slot = slot;
+ tm->generation = btrfs_node_ptr_generation(eb, slot);
++ RB_CLEAR_NODE(&tm->node);
+
+- return __tree_mod_log_insert(fs_info, tm);
++ return tm;
+ }
+
+ static noinline int
+@@ -576,10 +578,27 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int slot,
+ enum mod_log_op op, gfp_t flags)
+ {
+- if (tree_mod_dont_log(fs_info, eb))
++ struct tree_mod_elem *tm;
++ int ret;
++
++ if (!tree_mod_need_log(fs_info, eb))
++ return 0;
++
++ tm = alloc_tree_mod_elem(eb, slot, op, flags);
++ if (!tm)
++ return -ENOMEM;
++
++ if (tree_mod_dont_log(fs_info, eb)) {
++ kfree(tm);
+ return 0;
++ }
++
++ ret = __tree_mod_log_insert(fs_info, tm);
++ tree_mod_log_write_unlock(fs_info);
++ if (ret)
++ kfree(tm);
+
+- return __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
++ return ret;
+ }
+
+ static noinline int
+@@ -587,53 +606,95 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int dst_slot, int src_slot,
+ int nr_items, gfp_t flags)
+ {
+- struct tree_mod_elem *tm;
+- int ret;
++ struct tree_mod_elem *tm = NULL;
++ struct tree_mod_elem **tm_list = NULL;
++ int ret = 0;
+ int i;
++ int locked = 0;
+
+- if (tree_mod_dont_log(fs_info, eb))
++ if (!tree_mod_need_log(fs_info, eb))
+ return 0;
+
++ tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
++ if (!tm_list)
++ return -ENOMEM;
++
++ tm = kzalloc(sizeof(*tm), flags);
++ if (!tm) {
++ ret = -ENOMEM;
++ goto free_tms;
++ }
++
++ tm->index = eb->start >> PAGE_CACHE_SHIFT;
++ tm->slot = src_slot;
++ tm->move.dst_slot = dst_slot;
++ tm->move.nr_items = nr_items;
++ tm->op = MOD_LOG_MOVE_KEYS;
++
++ for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
++ tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
++ MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
++ if (!tm_list[i]) {
++ ret = -ENOMEM;
++ goto free_tms;
++ }
++ }
++
++ if (tree_mod_dont_log(fs_info, eb))
++ goto free_tms;
++ locked = 1;
++
+ /*
+ * When we override something during the move, we log these removals.
+ * This can only happen when we move towards the beginning of the
+ * buffer, i.e. dst_slot < src_slot.
+ */
+ for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
+- ret = __tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
+- MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
+- BUG_ON(ret < 0);
++ ret = __tree_mod_log_insert(fs_info, tm_list[i]);
++ if (ret)
++ goto free_tms;
+ }
+
+- tm = kzalloc(sizeof(*tm), flags);
+- if (!tm)
+- return -ENOMEM;
++ ret = __tree_mod_log_insert(fs_info, tm);
++ if (ret)
++ goto free_tms;
++ tree_mod_log_write_unlock(fs_info);
++ kfree(tm_list);
+
+- tm->index = eb->start >> PAGE_CACHE_SHIFT;
+- tm->slot = src_slot;
+- tm->move.dst_slot = dst_slot;
+- tm->move.nr_items = nr_items;
+- tm->op = MOD_LOG_MOVE_KEYS;
++ return 0;
++free_tms:
++ for (i = 0; i < nr_items; i++) {
++ if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
++ rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
++ kfree(tm_list[i]);
++ }
++ if (locked)
++ tree_mod_log_write_unlock(fs_info);
++ kfree(tm_list);
++ kfree(tm);
+
+- return __tree_mod_log_insert(fs_info, tm);
++ return ret;
+ }
+
+-static inline void
+-__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
++static inline int
++__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
++ struct tree_mod_elem **tm_list,
++ int nritems)
+ {
+- int i;
+- u32 nritems;
++ int i, j;
+ int ret;
+
+- if (btrfs_header_level(eb) == 0)
+- return;
+-
+- nritems = btrfs_header_nritems(eb);
+ for (i = nritems - 1; i >= 0; i--) {
+- ret = __tree_mod_log_insert_key(fs_info, eb, i,
+- MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
+- BUG_ON(ret < 0);
++ ret = __tree_mod_log_insert(fs_info, tm_list[i]);
++ if (ret) {
++ for (j = nritems - 1; j > i; j--)
++ rb_erase(&tm_list[j]->node,
++ &fs_info->tree_mod_log);
++ return ret;
++ }
+ }
++
++ return 0;
+ }
+
+ static noinline int
+@@ -642,17 +703,38 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *new_root, gfp_t flags,
+ int log_removal)
+ {
+- struct tree_mod_elem *tm;
++ struct tree_mod_elem *tm = NULL;
++ struct tree_mod_elem **tm_list = NULL;
++ int nritems = 0;
++ int ret = 0;
++ int i;
+
+- if (tree_mod_dont_log(fs_info, NULL))
++ if (!tree_mod_need_log(fs_info, NULL))
+ return 0;
+
+- if (log_removal)
+- __tree_mod_log_free_eb(fs_info, old_root);
++ if (log_removal && btrfs_header_level(old_root) > 0) {
++ nritems = btrfs_header_nritems(old_root);
++ tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
++ flags);
++ if (!tm_list) {
++ ret = -ENOMEM;
++ goto free_tms;
++ }
++ for (i = 0; i < nritems; i++) {
++ tm_list[i] = alloc_tree_mod_elem(old_root, i,
++ MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
++ if (!tm_list[i]) {
++ ret = -ENOMEM;
++ goto free_tms;
++ }
++ }
++ }
+
+ tm = kzalloc(sizeof(*tm), flags);
+- if (!tm)
+- return -ENOMEM;
++ if (!tm) {
++ ret = -ENOMEM;
++ goto free_tms;
++ }
+
+ tm->index = new_root->start >> PAGE_CACHE_SHIFT;
+ tm->old_root.logical = old_root->start;
+@@ -660,7 +742,30 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
+ tm->generation = btrfs_header_generation(old_root);
+ tm->op = MOD_LOG_ROOT_REPLACE;
+
+- return __tree_mod_log_insert(fs_info, tm);
++ if (tree_mod_dont_log(fs_info, NULL))
++ goto free_tms;
++
++ if (tm_list)
++ ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
++ if (!ret)
++ ret = __tree_mod_log_insert(fs_info, tm);
++
++ tree_mod_log_write_unlock(fs_info);
++ if (ret)
++ goto free_tms;
++ kfree(tm_list);
++
++ return ret;
++
++free_tms:
++ if (tm_list) {
++ for (i = 0; i < nritems; i++)
++ kfree(tm_list[i]);
++ kfree(tm_list);
++ }
++ kfree(tm);
++
++ return ret;
+ }
+
+ static struct tree_mod_elem *
+@@ -729,31 +834,75 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
+ return __tree_mod_log_search(fs_info, start, min_seq, 0);
+ }
+
+-static noinline void
++static noinline int
+ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
+ struct extent_buffer *src, unsigned long dst_offset,
+ unsigned long src_offset, int nr_items)
+ {
+- int ret;
++ int ret = 0;
++ struct tree_mod_elem **tm_list = NULL;
++ struct tree_mod_elem **tm_list_add, **tm_list_rem;
+ int i;
++ int locked = 0;
+
+- if (tree_mod_dont_log(fs_info, NULL))
+- return;
++ if (!tree_mod_need_log(fs_info, NULL))
++ return 0;
+
+ if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
+- return;
++ return 0;
+
++ tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
++ GFP_NOFS);
++ if (!tm_list)
++ return -ENOMEM;
++
++ tm_list_add = tm_list;
++ tm_list_rem = tm_list + nr_items;
+ for (i = 0; i < nr_items; i++) {
+- ret = __tree_mod_log_insert_key(fs_info, src,
+- i + src_offset,
+- MOD_LOG_KEY_REMOVE, GFP_NOFS);
+- BUG_ON(ret < 0);
+- ret = __tree_mod_log_insert_key(fs_info, dst,
+- i + dst_offset,
+- MOD_LOG_KEY_ADD,
+- GFP_NOFS);
+- BUG_ON(ret < 0);
++ tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
++ MOD_LOG_KEY_REMOVE, GFP_NOFS);
++ if (!tm_list_rem[i]) {
++ ret = -ENOMEM;
++ goto free_tms;
++ }
++
++ tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
++ MOD_LOG_KEY_ADD, GFP_NOFS);
++ if (!tm_list_add[i]) {
++ ret = -ENOMEM;
++ goto free_tms;
++ }
++ }
++
++ if (tree_mod_dont_log(fs_info, NULL))
++ goto free_tms;
++ locked = 1;
++
++ for (i = 0; i < nr_items; i++) {
++ ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
++ if (ret)
++ goto free_tms;
++ ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
++ if (ret)
++ goto free_tms;
++ }
++
++ tree_mod_log_write_unlock(fs_info);
++ kfree(tm_list);
++
++ return 0;
++
++free_tms:
++ for (i = 0; i < nr_items * 2; i++) {
++ if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
++ rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
++ kfree(tm_list[i]);
+ }
++ if (locked)
++ tree_mod_log_write_unlock(fs_info);
++ kfree(tm_list);
++
++ return ret;
+ }
+
+ static inline void
+@@ -772,18 +921,58 @@ tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
+ {
+ int ret;
+
+- ret = __tree_mod_log_insert_key(fs_info, eb, slot,
++ ret = tree_mod_log_insert_key(fs_info, eb, slot,
+ MOD_LOG_KEY_REPLACE,
+ atomic ? GFP_ATOMIC : GFP_NOFS);
+ BUG_ON(ret < 0);
+ }
+
+-static noinline void
++static noinline int
+ tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
+ {
++ struct tree_mod_elem **tm_list = NULL;
++ int nritems = 0;
++ int i;
++ int ret = 0;
++
++ if (btrfs_header_level(eb) == 0)
++ return 0;
++
++ if (!tree_mod_need_log(fs_info, NULL))
++ return 0;
++
++ nritems = btrfs_header_nritems(eb);
++ tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
++ GFP_NOFS);
++ if (!tm_list)
++ return -ENOMEM;
++
++ for (i = 0; i < nritems; i++) {
++ tm_list[i] = alloc_tree_mod_elem(eb, i,
++ MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
++ if (!tm_list[i]) {
++ ret = -ENOMEM;
++ goto free_tms;
++ }
++ }
++
+ if (tree_mod_dont_log(fs_info, eb))
+- return;
+- __tree_mod_log_free_eb(fs_info, eb);
++ goto free_tms;
++
++ ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
++ tree_mod_log_write_unlock(fs_info);
++ if (ret)
++ goto free_tms;
++ kfree(tm_list);
++
++ return 0;
++
++free_tms:
++ for (i = 0; i < nritems; i++)
++ kfree(tm_list[i]);
++ kfree(tm_list);
++
++ return ret;
+ }
+
+ static noinline void
+@@ -1041,8 +1230,13 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ btrfs_set_node_ptr_generation(parent, parent_slot,
+ trans->transid);
+ btrfs_mark_buffer_dirty(parent);
+- if (last_ref)
+- tree_mod_log_free_eb(root->fs_info, buf);
++ if (last_ref) {
++ ret = tree_mod_log_free_eb(root->fs_info, buf);
++ if (ret) {
++ btrfs_abort_transaction(trans, root, ret);
++ return ret;
++ }
++ }
+ btrfs_free_tree_block(trans, root, buf, parent_start,
+ last_ref);
+ }
+@@ -3023,8 +3217,12 @@ static int push_node_left(struct btrfs_trans_handle *trans,
+ } else
+ push_items = min(src_nritems - 8, push_items);
+
+- tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
+- push_items);
++ ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
++ push_items);
++ if (ret) {
++ btrfs_abort_transaction(trans, root, ret);
++ return ret;
++ }
+ copy_extent_buffer(dst, src,
+ btrfs_node_key_ptr_offset(dst_nritems),
+ btrfs_node_key_ptr_offset(0),
+@@ -3094,8 +3292,12 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
+ (dst_nritems) *
+ sizeof(struct btrfs_key_ptr));
+
+- tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
+- src_nritems - push_items, push_items);
++ ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
++ src_nritems - push_items, push_items);
++ if (ret) {
++ btrfs_abort_transaction(trans, root, ret);
++ return ret;
++ }
+ copy_extent_buffer(dst, src,
+ btrfs_node_key_ptr_offset(0),
+ btrfs_node_key_ptr_offset(src_nritems - push_items),
+@@ -3296,7 +3498,12 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
+ btrfs_header_chunk_tree_uuid(split),
+ BTRFS_UUID_SIZE);
+
+- tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
++ ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
++ mid, c_nritems - mid);
++ if (ret) {
++ btrfs_abort_transaction(trans, root, ret);
++ return ret;
++ }
+ copy_extent_buffer(split, c,
+ btrfs_node_key_ptr_offset(0),
+ btrfs_node_key_ptr_offset(mid),
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1b63d29e44b7..3d03d2e0849c 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4354,8 +4354,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
+ * these flags set. For all other operations the VFS set these flags
+ * explicitly if it wants a timestamp update.
+ */
+- if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
+- inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
++ if (newsize != oldsize) {
++ inode_inc_iversion(inode);
++ if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
++ inode->i_ctime = inode->i_mtime =
++ current_fs_time(inode->i_sb);
++ }
+
+ if (newsize > oldsize) {
+ truncate_pagecache(inode, newsize);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 7ddddf2e2504..81476e18a789 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -2381,7 +2381,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *poffset)
+ {
+ unsigned long nr_pages, i;
+- size_t copied, len, cur_len;
++ size_t bytes, copied, len, cur_len;
+ ssize_t total_written = 0;
+ loff_t offset;
+ struct iov_iter it;
+@@ -2436,14 +2436,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
+
+ save_len = cur_len;
+ for (i = 0; i < nr_pages; i++) {
+- copied = min_t(const size_t, cur_len, PAGE_SIZE);
++ bytes = min_t(const size_t, cur_len, PAGE_SIZE);
+ copied = iov_iter_copy_from_user(wdata->pages[i], &it,
+- 0, copied);
++ 0, bytes);
+ cur_len -= copied;
+ iov_iter_advance(&it, copied);
++ /*
++ * If we didn't copy as much as we expected, then that
++ * may mean we trod into an unmapped area. Stop copying
++ * at that point. On the next pass through the big
++ * loop, we'll likely end up getting a zero-length
++ * write and bailing out of it.
++ */
++ if (copied < bytes)
++ break;
+ }
+ cur_len = save_len - cur_len;
+
++ /*
++ * If we have no data to send, then that probably means that
++ * the copy above failed altogether. That's most likely because
++ * the address in the iovec was bogus. Set the rc to -EFAULT,
++ * free anything we allocated and bail out.
++ */
++ if (!cur_len) {
++ for (i = 0; i < nr_pages; i++)
++ put_page(wdata->pages[i]);
++ kfree(wdata);
++ rc = -EFAULT;
++ break;
++ }
++
++ /*
++ * i + 1 now represents the number of pages we actually used in
++ * the copy phase above. Bring nr_pages down to that, and free
++ * any pages that we didn't use.
++ */
++ for ( ; nr_pages > i + 1; nr_pages--)
++ put_page(wdata->pages[nr_pages - 1]);
++
+ wdata->sync_mode = WB_SYNC_ALL;
+ wdata->nr_pages = nr_pages;
+ wdata->offset = (__u64)offset;
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+index c38350851b08..bc0bb9c34f72 100644
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -57,4 +57,7 @@
+ #define SMB2_CMACAES_SIZE (16)
+ #define SMB3_SIGNKEY_SIZE (16)
+
++/* Maximum buffer size value we can send with 1 credit */
++#define SMB2_MAX_BUFFER_SIZE 65536
++
+ #endif /* _SMB2_GLOB_H */
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 861b33214144..027a0c6f7ca0 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -182,11 +182,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+ /* start with specified wsize, or default */
+ wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
+ wsize = min_t(unsigned int, wsize, server->max_write);
+- /*
+- * limit write size to 2 ** 16, because we don't support multicredit
+- * requests now.
+- */
+- wsize = min_t(unsigned int, wsize, 2 << 15);
++ /* set it to the maximum buffer size value we can send with 1 credit */
++ wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+
+ return wsize;
+ }
+@@ -200,11 +197,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+ /* start with specified rsize, or default */
+ rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
+ rsize = min_t(unsigned int, rsize, server->max_read);
+- /*
+- * limit write size to 2 ** 16, because we don't support multicredit
+- * requests now.
+- */
+- rsize = min_t(unsigned int, rsize, 2 << 15);
++ /* set it to the maximum buffer size value we can send with 1 credit */
++ rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
+
+ return rsize;
+ }
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index edccb5252462..06d29e3f5d10 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -413,7 +413,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+
+ /* SMB2 only has an extended negflavor */
+ server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
+- server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
++ /* set it to the maximum buffer size value we can send with 1 credit */
++ server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
++ SMB2_MAX_BUFFER_SIZE);
+ server->max_read = le32_to_cpu(rsp->MaxReadSize);
+ server->max_write = le32_to_cpu(rsp->MaxWriteSize);
+ /* BB Do we need to validate the SecurityMode? */
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 745faaa7ef95..a9d2bf941066 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -770,6 +770,8 @@ do { \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
+ (einode)->xtime.tv_sec = \
+ (signed)le32_to_cpu((raw_inode)->xtime); \
++ else \
++ (einode)->xtime.tv_sec = 0; \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
+ ext4_decode_extra_time(&(einode)->xtime, \
+ raw_inode->xtime ## _extra); \
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index f76027fe58ae..e678549ec994 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3937,6 +3937,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+ } else
+ err = ret;
+ map->m_flags |= EXT4_MAP_MAPPED;
++ map->m_pblk = newblock;
+ if (allocated > map->m_len)
+ allocated = map->m_len;
+ map->m_len = allocated;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 1ddee3dfabe3..55fe412b2410 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4582,6 +4582,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ if (attr->ia_size > sbi->s_bitmap_maxbytes)
+ return -EFBIG;
+ }
++
++ if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
++ inode_inc_iversion(inode);
++
+ if (S_ISREG(inode->i_mode) &&
+ (attr->ia_size < inode->i_size)) {
+ if (ext4_should_order_data(inode)) {
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index a569d335f804..d011b69ae8ae 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -144,7 +144,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
+ handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
+ if (IS_ERR(handle)) {
+ err = -EINVAL;
+- goto swap_boot_out;
++ goto journal_err_out;
+ }
+
+ /* Protect extent tree against block allocations via delalloc */
+@@ -202,6 +202,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
+
+ ext4_double_up_write_data_sem(inode, inode_bl);
+
++journal_err_out:
+ ext4_inode_resume_unlocked_dio(inode);
+ ext4_inode_resume_unlocked_dio(inode_bl);
+
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index c5adbb318a90..f3b84cd9de56 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -243,6 +243,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
+ ext4_group_t group;
+ ext4_group_t last_group;
+ unsigned overhead;
++ __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
+
+ BUG_ON(flex_gd->count == 0 || group_data == NULL);
+
+@@ -266,7 +267,7 @@ next_group:
+ src_group++;
+ for (; src_group <= last_group; src_group++) {
+ overhead = ext4_group_overhead_blocks(sb, src_group);
+- if (overhead != 0)
++ if (overhead == 0)
+ last_blk += group_data[src_group - group].blocks_count;
+ else
+ break;
+@@ -280,8 +281,7 @@ next_group:
+ group = ext4_get_group_number(sb, start_blk - 1);
+ group -= group_data[0].group;
+ group_data[group].free_blocks_count--;
+- if (flexbg_size > 1)
+- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
++ flex_gd->bg_flags[group] &= uninit_mask;
+ }
+
+ /* Allocate inode bitmaps */
+@@ -292,22 +292,30 @@ next_group:
+ group = ext4_get_group_number(sb, start_blk - 1);
+ group -= group_data[0].group;
+ group_data[group].free_blocks_count--;
+- if (flexbg_size > 1)
+- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
++ flex_gd->bg_flags[group] &= uninit_mask;
+ }
+
+ /* Allocate inode tables */
+ for (; it_index < flex_gd->count; it_index++) {
+- if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk)
++ unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
++ ext4_fsblk_t next_group_start;
++
++ if (start_blk + itb > last_blk)
+ goto next_group;
+ group_data[it_index].inode_table = start_blk;
+- group = ext4_get_group_number(sb, start_blk - 1);
++ group = ext4_get_group_number(sb, start_blk);
++ next_group_start = ext4_group_first_block_no(sb, group + 1);
+ group -= group_data[0].group;
+- group_data[group].free_blocks_count -=
+- EXT4_SB(sb)->s_itb_per_group;
+- if (flexbg_size > 1)
+- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+
++ if (start_blk + itb > next_group_start) {
++ flex_gd->bg_flags[group + 1] &= uninit_mask;
++ overhead = start_blk + itb - next_group_start;
++ group_data[group + 1].free_blocks_count -= overhead;
++ itb -= overhead;
++ }
++
++ group_data[group].free_blocks_count -= itb;
++ flex_gd->bg_flags[group] &= uninit_mask;
+ start_blk += EXT4_SB(sb)->s_itb_per_group;
+ }
+
+@@ -401,7 +409,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
+ start = ext4_group_first_block_no(sb, group);
+ group -= flex_gd->groups[0].group;
+
+- count2 = sb->s_blocksize * 8 - (block - start);
++ count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
+ if (count2 > count)
+ count2 = count;
+
+@@ -620,7 +628,7 @@ handle_ib:
+ if (err)
+ goto out;
+ count = group_table_count[j];
+- start = group_data[i].block_bitmap;
++ start = (&group_data[i].block_bitmap)[j];
+ block = start;
+ }
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index b947e0af9956..d9711dc42164 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3667,16 +3667,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ for (i = 0; i < 4; i++)
+ sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
+ sbi->s_def_hash_version = es->s_def_hash_version;
+- i = le32_to_cpu(es->s_flags);
+- if (i & EXT2_FLAGS_UNSIGNED_HASH)
+- sbi->s_hash_unsigned = 3;
+- else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
++ if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
++ i = le32_to_cpu(es->s_flags);
++ if (i & EXT2_FLAGS_UNSIGNED_HASH)
++ sbi->s_hash_unsigned = 3;
++ else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
+ #ifdef __CHAR_UNSIGNED__
+- es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
+- sbi->s_hash_unsigned = 3;
++ if (!(sb->s_flags & MS_RDONLY))
++ es->s_flags |=
++ cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
++ sbi->s_hash_unsigned = 3;
+ #else
+- es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
++ if (!(sb->s_flags & MS_RDONLY))
++ es->s_flags |=
++ cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
+ #endif
++ }
+ }
+
+ /* Handle clustersize */
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index b0b74e58697b..7272cc6977ec 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -514,11 +514,13 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
+ * similarly constrained call sites
+ */
+ ret = start_this_handle(journal, handle, GFP_NOFS);
+- if (ret < 0)
++ if (ret < 0) {
+ jbd2_journal_free_reserved(handle);
++ return ret;
++ }
+ handle->h_type = type;
+ handle->h_line_no = line_no;
+- return ret;
++ return 0;
+ }
+ EXPORT_SYMBOL(jbd2_journal_start_reserved);
+
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index eda8879171c4..0ee22ab9ef97 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -164,17 +164,16 @@ static void nfs_zap_caches_locked(struct inode *inode)
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
+ nfs_fscache_invalidate(inode);
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+- | NFS_INO_INVALID_LABEL
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_PAGECACHE;
+ } else
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+- | NFS_INO_INVALID_LABEL
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_PAGECACHE;
++ nfs_zap_label_cache_locked(nfsi);
+ }
+
+ void nfs_zap_caches(struct inode *inode)
+@@ -266,6 +265,13 @@ nfs_init_locked(struct inode *inode, void *opaque)
+ }
+
+ #ifdef CONFIG_NFS_V4_SECURITY_LABEL
++static void nfs_clear_label_invalid(struct inode *inode)
++{
++ spin_lock(&inode->i_lock);
++ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
++ spin_unlock(&inode->i_lock);
++}
++
+ void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
+ struct nfs4_label *label)
+ {
+@@ -289,6 +295,7 @@ void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
+ __func__,
+ (char *)label->label,
+ label->len, error);
++ nfs_clear_label_invalid(inode);
+ }
+ }
+
+@@ -1599,7 +1606,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ inode->i_blocks = fattr->du.nfs2.blocks;
+
+ /* Update attrtimeo value if we're out of the unstable period */
+- if (invalid & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) {
++ if (invalid & NFS_INO_INVALID_ATTR) {
+ nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = now;
+@@ -1612,7 +1619,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ }
+ }
+ invalid &= ~NFS_INO_INVALID_ATTR;
+- invalid &= ~NFS_INO_INVALID_LABEL;
+ /* Don't invalidate the data if we were to blame */
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
+ || S_ISLNK(inode->i_mode)))
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 38da8c2b81ac..a84dbf238512 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -266,6 +266,18 @@ extern const u32 nfs41_maxgetdevinfo_overhead;
+ extern struct rpc_procinfo nfs4_procedures[];
+ #endif
+
++#ifdef CONFIG_NFS_V4_SECURITY_LABEL
++static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
++{
++ if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL))
++ nfsi->cache_validity |= NFS_INO_INVALID_LABEL;
++}
++#else
++static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
++{
++}
++#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
++
+ /* proc.c */
+ void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
+ extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index ebced8d71157..26c07f9efdb3 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1015,8 +1015,11 @@ int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
+ if (ret == -EIO)
+ /* A lost lock - don't even consider delegations */
+ goto out;
+- if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
++ /* returns true if delegation stateid found and copied */
++ if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) {
++ ret = 0;
+ goto out;
++ }
+ if (ret != -ENOENT)
+ /* nfs4_copy_delegation_stateid() didn't over-write
+ * dst, so it still has the lock stateid which we now
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 831d49a4111f..cfc8dcc16043 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -581,9 +581,17 @@ int dquot_scan_active(struct super_block *sb,
+ dqstats_inc(DQST_LOOKUPS);
+ dqput(old_dquot);
+ old_dquot = dquot;
+- ret = fn(dquot, priv);
+- if (ret < 0)
+- goto out;
++ /*
++ * ->release_dquot() can be racing with us. Our reference
++ * protects us from new calls to it so just wait for any
++ * outstanding call and recheck the DQ_ACTIVE_B after that.
++ */
++ wait_on_dquot(dquot);
++ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
++ ret = fn(dquot, priv);
++ if (ret < 0)
++ goto out;
++ }
+ spin_lock(&dq_list_lock);
+ /* We are safe to continue now because our dquot could not
+ * be moved out of the inuse list while we hold the reference */
+diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
+index 2f0543f7510c..f9bbbb472663 100644
+--- a/include/linux/can/skb.h
++++ b/include/linux/can/skb.h
+@@ -11,7 +11,9 @@
+ #define CAN_SKB_H
+
+ #include <linux/types.h>
++#include <linux/skbuff.h>
+ #include <linux/can.h>
++#include <net/sock.h>
+
+ /*
+ * The struct can_skb_priv is used to transport additional information along
+@@ -42,4 +44,40 @@ static inline void can_skb_reserve(struct sk_buff *skb)
+ skb_reserve(skb, sizeof(struct can_skb_priv));
+ }
+
++static inline void can_skb_destructor(struct sk_buff *skb)
++{
++ sock_put(skb->sk);
++}
++
++static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
++{
++ if (sk) {
++ sock_hold(sk);
++ skb->destructor = can_skb_destructor;
++ skb->sk = sk;
++ }
++}
++
++/*
++ * returns an unshared skb owned by the original sock to be echo'ed back
++ */
++static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
++{
++ if (skb_shared(skb)) {
++ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
++
++ if (likely(nskb)) {
++ can_skb_set_owner(nskb, skb->sk);
++ consume_skb(skb);
++ return nskb;
++ } else {
++ kfree_skb(skb);
++ return NULL;
++ }
++ }
++
++ /* we can assume to have an unshared skb with proper owner */
++ return skb;
++}
++
+ #endif /* CAN_SKB_H */
+diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
+index f6c82de12541..d6ad91f26038 100644
+--- a/include/linux/ipc_namespace.h
++++ b/include/linux/ipc_namespace.h
+@@ -119,9 +119,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
+ * the new maximum will handle anyone else. I may have to revisit this
+ * in the future.
+ */
+-#define MIN_QUEUESMAX 1
+ #define DFLT_QUEUESMAX 256
+-#define HARD_QUEUESMAX 1024
+ #define MIN_MSGMAX 1
+ #define DFLT_MSG 10U
+ #define DFLT_MSGMAX 10
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 21eae43348fb..9f2a0cbc7d06 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2917,7 +2917,12 @@ void netdev_change_features(struct net_device *dev);
+ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
+ struct net_device *dev);
+
+-netdev_features_t netif_skb_features(struct sk_buff *skb);
++netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
++ const struct net_device *dev);
++static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
++{
++ return netif_skb_dev_features(skb, skb->dev);
++}
+
+ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
+ {
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index efa1649a822a..9995165ff3d0 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2393,6 +2393,8 @@ extern void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+ extern struct sk_buff *skb_segment(struct sk_buff *skb,
+ netdev_features_t features);
+
++unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
++
+ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+ {
+@@ -2816,5 +2818,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
+ {
+ return !skb->head_frag || skb_cloned(skb);
+ }
++
++/**
++ * skb_gso_network_seglen - Return length of individual segments of a gso packet
++ *
++ * @skb: GSO skb
++ *
++ * skb_gso_network_seglen is used to determine the real size of the
++ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
++ *
++ * The MAC/L2 header is not accounted for.
++ */
++static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
++{
++ unsigned int hdr_len = skb_transport_header(skb) -
++ skb_network_header(skb);
++ return hdr_len + skb_gso_transport_seglen(skb);
++}
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_SKBUFF_H */
+diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
+index 383d638340b8..5bb8bfe67149 100644
+--- a/ipc/mq_sysctl.c
++++ b/ipc/mq_sysctl.c
+@@ -22,6 +22,16 @@ static void *get_mq(ctl_table *table)
+ return which;
+ }
+
++static int proc_mq_dointvec(ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct ctl_table mq_table;
++ memcpy(&mq_table, table, sizeof(mq_table));
++ mq_table.data = get_mq(table);
++
++ return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
++}
++
+ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -33,12 +43,10 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
+ lenp, ppos);
+ }
+ #else
++#define proc_mq_dointvec NULL
+ #define proc_mq_dointvec_minmax NULL
+ #endif
+
+-static int msg_queues_limit_min = MIN_QUEUESMAX;
+-static int msg_queues_limit_max = HARD_QUEUESMAX;
+-
+ static int msg_max_limit_min = MIN_MSGMAX;
+ static int msg_max_limit_max = HARD_MSGMAX;
+
+@@ -51,9 +59,7 @@ static ctl_table mq_sysctls[] = {
+ .data = &init_ipc_ns.mq_queues_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_mq_dointvec_minmax,
+- .extra1 = &msg_queues_limit_min,
+- .extra2 = &msg_queues_limit_max,
++ .proc_handler = proc_mq_dointvec,
+ },
+ {
+ .procname = "msg_max",
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index ae1996d3c539..bb0248fc5187 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -433,9 +433,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
+ error = -EACCES;
+ goto out_unlock;
+ }
+- if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
+- (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
+- !capable(CAP_SYS_RESOURCE))) {
++
++ if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
++ !capable(CAP_SYS_RESOURCE)) {
+ error = -ENOSPC;
+ goto out_unlock;
+ }
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index b6fd78344c53..c4f8bc79d075 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1612,10 +1612,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
+ mutex_lock(&cgroup_mutex);
+ mutex_lock(&cgroup_root_mutex);
+
+- root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
+- 0, 1, GFP_KERNEL);
+- if (root_cgrp->id < 0)
++ ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
++ if (ret < 0)
+ goto unlock_drop;
++ root_cgrp->id = ret;
+
+ /* Check for name clashes with existing mounts */
+ ret = -EBUSY;
+@@ -2877,10 +2877,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
+ */
+ update_before = cgroup_serial_nr_next;
+
+- mutex_unlock(&cgroup_mutex);
+-
+ /* add/rm files for all cgroups created before */
+- rcu_read_lock();
+ css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
+ struct cgroup *cgrp = css->cgroup;
+
+@@ -2889,23 +2886,19 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
+
+ inode = cgrp->dentry->d_inode;
+ dget(cgrp->dentry);
+- rcu_read_unlock();
+-
+ dput(prev);
+ prev = cgrp->dentry;
+
++ mutex_unlock(&cgroup_mutex);
+ mutex_lock(&inode->i_mutex);
+ mutex_lock(&cgroup_mutex);
+ if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
+ ret = cgroup_addrm_files(cgrp, cfts, is_add);
+- mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&inode->i_mutex);
+-
+- rcu_read_lock();
+ if (ret)
+ break;
+ }
+- rcu_read_unlock();
++ mutex_unlock(&cgroup_mutex);
+ dput(prev);
+ deactivate_super(sb);
+ return ret;
+@@ -3024,9 +3017,14 @@ static void cgroup_enable_task_cg_lists(void)
+ * We should check if the process is exiting, otherwise
+ * it will race with cgroup_exit() in that the list
+ * entry won't be deleted though the process has exited.
++ * Do it while holding siglock so that we don't end up
++ * racing against cgroup_exit().
+ */
++ spin_lock_irq(&p->sighand->siglock);
+ if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
+ list_add(&p->cg_list, &task_css_set(p)->tasks);
++ spin_unlock_irq(&p->sighand->siglock);
++
+ task_unlock(p);
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+@@ -4395,7 +4393,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ struct cgroup *cgrp;
+ struct cgroup_name *name;
+ struct cgroupfs_root *root = parent->root;
+- int err = 0;
++ int err;
+ struct cgroup_subsys *ss;
+ struct super_block *sb = root->sb;
+
+@@ -4405,8 +4403,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ return -ENOMEM;
+
+ name = cgroup_alloc_name(dentry);
+- if (!name)
++ if (!name) {
++ err = -ENOMEM;
+ goto err_free_cgrp;
++ }
+ rcu_assign_pointer(cgrp->name, name);
+
+ /*
+@@ -4414,8 +4414,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ * a half-baked cgroup.
+ */
+ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+- if (cgrp->id < 0)
++ if (cgrp->id < 0) {
++ err = -ENOMEM;
+ goto err_free_name;
++ }
+
+ /*
+ * Only live parents can have children. Note that the liveliness
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 953c14348375..fea4f6cf7e90 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -193,7 +193,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+ {
+- int ret = proc_dointvec(table, write, buffer, lenp, ppos);
++ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+@@ -7788,14 +7788,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
+ static void __perf_event_exit_context(void *__info)
+ {
+ struct perf_event_context *ctx = __info;
+- struct perf_event *event, *tmp;
++ struct perf_event *event;
+
+ perf_pmu_rotate_stop(ctx->pmu);
+
+- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
+- __perf_remove_from_context(event);
+- list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
++ rcu_read_lock();
++ list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
+ __perf_remove_from_context(event);
++ rcu_read_unlock();
+ }
+
+ static void perf_event_exit_cpu_context(int cpu)
+@@ -7819,11 +7819,11 @@ static void perf_event_exit_cpu(int cpu)
+ {
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+
++ perf_event_exit_cpu_context(cpu);
++
+ mutex_lock(&swhash->hlist_mutex);
+ swevent_hlist_release(swhash);
+ mutex_unlock(&swhash->hlist_mutex);
+-
+- perf_event_exit_cpu_context(cpu);
+ }
+ #else
+ static inline void perf_event_exit_cpu(int cpu) { }
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index b2f06f3c6a3f..2a9db916c3f5 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1049,6 +1049,7 @@ static struct ctl_table kern_table[] = {
+ .maxlen = sizeof(sysctl_perf_event_sample_rate),
+ .mode = 0644,
+ .proc_handler = perf_proc_update_handler,
++ .extra1 = &one,
+ },
+ {
+ .procname = "perf_cpu_time_max_percent",
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 93c265286e8c..60fee69c37be 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1843,6 +1843,12 @@ static void destroy_worker(struct worker *worker)
+ if (worker->flags & WORKER_IDLE)
+ pool->nr_idle--;
+
++ /*
++ * Once WORKER_DIE is set, the kworker may destroy itself at any
++ * point. Pin to ensure the task stays until we're done with it.
++ */
++ get_task_struct(worker->task);
++
+ list_del_init(&worker->entry);
+ worker->flags |= WORKER_DIE;
+
+@@ -1851,6 +1857,7 @@ static void destroy_worker(struct worker *worker)
+ spin_unlock_irq(&pool->lock);
+
+ kthread_stop(worker->task);
++ put_task_struct(worker->task);
+ kfree(worker);
+
+ spin_lock_irq(&pool->lock);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 292a266e0d42..dd7789ce7572 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1160,8 +1160,10 @@ alloc:
+ } else {
+ ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+ pmd, orig_pmd, page, haddr);
+- if (ret & VM_FAULT_OOM)
++ if (ret & VM_FAULT_OOM) {
+ split_huge_page(page);
++ ret |= VM_FAULT_FALLBACK;
++ }
+ put_page(page);
+ }
+ count_vm_event(THP_FAULT_FALLBACK);
+@@ -1173,9 +1175,10 @@ alloc:
+ if (page) {
+ split_huge_page(page);
+ put_page(page);
+- }
++ } else
++ split_huge_page_pmd(vma, address, pmd);
++ ret |= VM_FAULT_FALLBACK;
+ count_vm_event(THP_FAULT_FALLBACK);
+- ret |= VM_FAULT_OOM;
+ goto out;
+ }
+
+diff --git a/mm/memory.c b/mm/memory.c
+index d176154c243f..22e67a2c955b 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3756,7 +3756,6 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, flags);
+
+-retry:
+ pgd = pgd_offset(mm, address);
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+@@ -3794,20 +3793,13 @@ retry:
+ if (dirty && !pmd_write(orig_pmd)) {
+ ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+ orig_pmd);
+- /*
+- * If COW results in an oom, the huge pmd will
+- * have been split, so retry the fault on the
+- * pte for a smaller charge.
+- */
+- if (unlikely(ret & VM_FAULT_OOM))
+- goto retry;
+- return ret;
++ if (!(ret & VM_FAULT_FALLBACK))
++ return ret;
+ } else {
+ huge_pmd_set_accessed(mm, vma, address, pmd,
+ orig_pmd, dirty);
++ return 0;
+ }
+-
+- return 0;
+ }
+ }
+
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 990afab2be1b..c76a4388a5d7 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
+ int count = nr_pages;
+ while (nr_pages) {
+ s = rest_of_page(data);
+- pages[index++] = kmap_to_page(data);
++ if (is_vmalloc_addr(data))
++ pages[index++] = vmalloc_to_page(data);
++ else
++ pages[index++] = kmap_to_page(data);
+ data += s;
+ nr_pages--;
+ }
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index ca04163635da..0d3400167ef3 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -219,6 +219,33 @@ static void br_netpoll_cleanup(struct net_device *dev)
+ br_netpoll_disable(p);
+ }
+
++static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
++{
++ struct netpoll *np;
++ int err;
++
++ np = kzalloc(sizeof(*p->np), gfp);
++ if (!np)
++ return -ENOMEM;
++
++ err = __netpoll_setup(np, p->dev, gfp);
++ if (err) {
++ kfree(np);
++ return err;
++ }
++
++ p->np = np;
++ return err;
++}
++
++int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
++{
++ if (!p->br->dev->npinfo)
++ return 0;
++
++ return __br_netpoll_enable(p, gfp);
++}
++
+ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
+ gfp_t gfp)
+ {
+@@ -229,7 +256,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
+ list_for_each_entry(p, &br->port_list, list) {
+ if (!p->dev)
+ continue;
+- err = br_netpoll_enable(p, gfp);
++ err = __br_netpoll_enable(p, gfp);
+ if (err)
+ goto fail;
+ }
+@@ -242,28 +269,6 @@ fail:
+ goto out;
+ }
+
+-int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+-{
+- struct netpoll *np;
+- int err;
+-
+- if (!p->br->dev->npinfo)
+- return 0;
+-
+- np = kzalloc(sizeof(*p->np), gfp);
+- if (!np)
+- return -ENOMEM;
+-
+- err = __netpoll_setup(np, p->dev, gfp);
+- if (err) {
+- kfree(np);
+- return err;
+- }
+-
+- p->np = np;
+- return err;
+-}
+-
+ void br_netpoll_disable(struct net_bridge_port *p)
+ {
+ struct netpoll *np = p->np;
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 3ab8dd2e1282..ae3f07eb6cd7 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -57,6 +57,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/can.h>
+ #include <linux/can/core.h>
++#include <linux/can/skb.h>
+ #include <linux/ratelimit.h>
+ #include <net/net_namespace.h>
+ #include <net/sock.h>
+@@ -290,7 +291,7 @@ int can_send(struct sk_buff *skb, int loop)
+ return -ENOMEM;
+ }
+
+- newskb->sk = skb->sk;
++ can_skb_set_owner(newskb, skb->sk);
+ newskb->ip_summed = CHECKSUM_UNNECESSARY;
+ newskb->pkt_type = PACKET_BROADCAST;
+ }
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 46f20bfafc0e..b57452a65fb9 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -268,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op)
+
+ /* send with loopback */
+ skb->dev = dev;
+- skb->sk = op->sk;
++ can_skb_set_owner(skb, op->sk);
+ can_send(skb, 1);
+
+ /* update statistics */
+@@ -1223,7 +1223,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
+
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ skb->dev = dev;
+- skb->sk = sk;
++ can_skb_set_owner(skb, sk);
+ err = can_send(skb, 1); /* send with loopback */
+ dev_put(dev);
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3d1387461279..b32797590b40 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2398,7 +2398,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
+ * 2. No high memory really exists on this machine.
+ */
+
+-static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
++static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+ {
+ #ifdef CONFIG_HIGHMEM
+ int i;
+@@ -2478,34 +2478,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
+ }
+
+ static netdev_features_t harmonize_features(struct sk_buff *skb,
+- netdev_features_t features)
++ const struct net_device *dev,
++ netdev_features_t features)
+ {
+ if (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, skb_network_protocol(skb))) {
+ features &= ~NETIF_F_ALL_CSUM;
+- } else if (illegal_highdma(skb->dev, skb)) {
++ } else if (illegal_highdma(dev, skb)) {
+ features &= ~NETIF_F_SG;
+ }
+
+ return features;
+ }
+
+-netdev_features_t netif_skb_features(struct sk_buff *skb)
++netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
++ const struct net_device *dev)
+ {
+ __be16 protocol = skb->protocol;
+- netdev_features_t features = skb->dev->features;
++ netdev_features_t features = dev->features;
+
+- if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
++ if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+ features &= ~NETIF_F_GSO_MASK;
+
+ if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ } else if (!vlan_tx_tag_present(skb)) {
+- return harmonize_features(skb, features);
++ return harmonize_features(skb, dev, features);
+ }
+
+- features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
++ features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
+@@ -2513,9 +2515,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
+ NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+
+- return harmonize_features(skb, features);
++ return harmonize_features(skb, dev, features);
+ }
+-EXPORT_SYMBOL(netif_skb_features);
++EXPORT_SYMBOL(netif_skb_dev_features);
+
+ /*
+ * Returns true if either:
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index f409e0bd35c0..185c341fafbd 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -745,6 +745,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
+ attach_rules(&ops->rules_list, dev);
+ break;
+
++ case NETDEV_CHANGENAME:
++ list_for_each_entry(ops, &net->rules_ops, list) {
++ detach_rules(&ops->rules_list, dev);
++ attach_rules(&ops->rules_list, dev);
++ }
++ break;
++
+ case NETDEV_UNREGISTER:
+ list_for_each_entry(ops, &net->rules_ops, list)
+ detach_rules(&ops->rules_list, dev);
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 0c1482c6ff98..462cdc97fad8 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -943,6 +943,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
+ {
+ char *cur=opt, *delim;
+ int ipv6;
++ bool ipversion_set = false;
+
+ if (*cur != '@') {
+ if ((delim = strchr(cur, '@')) == NULL)
+@@ -955,6 +956,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
+ cur++;
+
+ if (*cur != '/') {
++ ipversion_set = true;
+ if ((delim = strchr(cur, '/')) == NULL)
+ goto parse_failed;
+ *delim = 0;
+@@ -997,7 +999,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
+ ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
+ if (ipv6 < 0)
+ goto parse_failed;
+- else if (np->ipv6 != (bool)ipv6)
++ else if (ipversion_set && np->ipv6 != (bool)ipv6)
+ goto parse_failed;
+ else
+ np->ipv6 = (bool)ipv6;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 2c7baa809913..21571dc4f2df 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -47,6 +47,8 @@
+ #include <linux/in.h>
+ #include <linux/inet.h>
+ #include <linux/slab.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
+ #include <linux/netdevice.h>
+ #ifdef CONFIG_NET_CLS_ACT
+ #include <net/pkt_sched.h>
+@@ -3519,3 +3521,26 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ nf_reset_trace(skb);
+ }
+ EXPORT_SYMBOL_GPL(skb_scrub_packet);
++
++/**
++ * skb_gso_transport_seglen - Return length of individual segments of a gso packet
++ *
++ * @skb: GSO skb
++ *
++ * skb_gso_transport_seglen is used to determine the real size of the
++ * individual segments, including Layer4 headers (TCP/UDP).
++ *
++ * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
++ */
++unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
++{
++ const struct skb_shared_info *shinfo = skb_shinfo(skb);
++ unsigned int hdr_len;
++
++ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
++ hdr_len = tcp_hdrlen(skb);
++ else
++ hdr_len = sizeof(struct udphdr);
++ return hdr_len + shinfo->gso_size;
++}
++EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 5cec994ee2f3..831a0d0af49f 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1795,7 +1795,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ while (order) {
+ if (npages >= 1 << order) {
+ page = alloc_pages(sk->sk_allocation |
+- __GFP_COMP | __GFP_NOWARN,
++ __GFP_COMP |
++ __GFP_NOWARN |
++ __GFP_NORETRY,
+ order);
+ if (page)
+ goto fill_page;
+@@ -1857,7 +1859,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+ gfp_t gfp = sk->sk_allocation;
+
+ if (order)
+- gfp |= __GFP_COMP | __GFP_NOWARN;
++ gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
+ pfrag->page = alloc_pages(gfp, order);
+ if (likely(pfrag->page)) {
+ pfrag->offset = 0;
+diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
+index 008f33703a33..ceabe6f13216 100644
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -1261,7 +1261,27 @@ static struct header_ops lowpan_header_ops = {
+ .create = lowpan_header_create,
+ };
+
++static struct lock_class_key lowpan_tx_busylock;
++static struct lock_class_key lowpan_netdev_xmit_lock_key;
++
++static void lowpan_set_lockdep_class_one(struct net_device *dev,
++ struct netdev_queue *txq,
++ void *_unused)
++{
++ lockdep_set_class(&txq->_xmit_lock,
++ &lowpan_netdev_xmit_lock_key);
++}
++
++
++static int lowpan_dev_init(struct net_device *dev)
++{
++ netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
++ dev->qdisc_tx_busylock = &lowpan_tx_busylock;
++ return 0;
++}
++
+ static const struct net_device_ops lowpan_netdev_ops = {
++ .ndo_init = lowpan_dev_init,
+ .ndo_start_xmit = lowpan_xmit,
+ .ndo_set_mac_address = lowpan_set_address,
+ };
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index a1b5bcbd04ae..f4b34d8f92fe 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1435,7 +1435,8 @@ static size_t inet_nlmsg_size(void)
+ + nla_total_size(4) /* IFA_ADDRESS */
+ + nla_total_size(4) /* IFA_LOCAL */
+ + nla_total_size(4) /* IFA_BROADCAST */
+- + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
++ + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
++ + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
+ }
+
+ static inline u32 cstamp_delta(unsigned long cstamp)
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index 694de3b7aebf..98d7e53d2afd 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -39,6 +39,71 @@
+ #include <net/route.h>
+ #include <net/xfrm.h>
+
++static bool ip_may_fragment(const struct sk_buff *skb)
++{
++ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
++ !skb->local_df;
++}
++
++static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
++{
++ if (skb->len <= mtu || skb->local_df)
++ return false;
++
++ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
++ return false;
++
++ return true;
++}
++
++static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
++{
++ unsigned int mtu;
++
++ if (skb->local_df || !skb_is_gso(skb))
++ return false;
++
++ mtu = dst_mtu(skb_dst(skb));
++
++ /* if seglen > mtu, do software segmentation for IP fragmentation on
++ * output. DF bit cannot be set since ip_forward would have sent
++ * icmp error.
++ */
++ return skb_gso_network_seglen(skb) > mtu;
++}
++
++/* called if GSO skb needs to be fragmented on forward */
++static int ip_forward_finish_gso(struct sk_buff *skb)
++{
++ struct dst_entry *dst = skb_dst(skb);
++ netdev_features_t features;
++ struct sk_buff *segs;
++ int ret = 0;
++
++ features = netif_skb_dev_features(skb, dst->dev);
++ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
++ if (IS_ERR(segs)) {
++ kfree_skb(skb);
++ return -ENOMEM;
++ }
++
++ consume_skb(skb);
++
++ do {
++ struct sk_buff *nskb = segs->next;
++ int err;
++
++ segs->next = NULL;
++ err = dst_output(segs);
++
++ if (err && ret == 0)
++ ret = err;
++ segs = nskb;
++ } while (segs);
++
++ return ret;
++}
++
+ static int ip_forward_finish(struct sk_buff *skb)
+ {
+ struct ip_options *opt = &(IPCB(skb)->opt);
+@@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
+ if (unlikely(opt->optlen))
+ ip_forward_options(skb);
+
++ if (ip_gso_exceeds_dst_mtu(skb))
++ return ip_forward_finish_gso(skb);
++
+ return dst_output(skb);
+ }
+
+@@ -88,8 +156,7 @@ int ip_forward(struct sk_buff *skb)
+ if (opt->is_strictroute && rt->rt_uses_gateway)
+ goto sr_failed;
+
+- if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
+- (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
++ if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, dst_mtu(&rt->dst))) {
+ IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(dst_mtu(&rt->dst)));
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 62290b5124c8..2557b9a52373 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1596,6 +1596,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ rth->rt_gateway = 0;
+ rth->rt_uses_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
++ RT_CACHE_STAT_INC(in_slow_tot);
+
+ rth->dst.input = ip_forward;
+ rth->dst.output = ip_output;
+@@ -1697,8 +1698,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ if (err != 0)
+ goto no_route;
+
+- RT_CACHE_STAT_INC(in_slow_tot);
+-
+ if (res.type == RTN_BROADCAST)
+ goto brd_input;
+
+@@ -1767,6 +1766,7 @@ local_input:
+ rth->rt_gateway = 0;
+ rth->rt_uses_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
++ RT_CACHE_STAT_INC(in_slow_tot);
+ if (res.type == RTN_UNREACHABLE) {
+ rth->dst.input= ip_error;
+ rth->dst.error= -err;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index e912634b2f05..fb8227a8c004 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -696,7 +696,8 @@ static void tcp_tsq_handler(struct sock *sk)
+ if ((1 << sk->sk_state) &
+ (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
+- tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
++ tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
++ 0, GFP_ATOMIC);
+ }
+ /*
+ * One tasklest per cpu tries to send more skbs.
+@@ -1884,7 +1885,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+
+ if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+ set_bit(TSQ_THROTTLED, &tp->tsq_flags);
+- break;
++ /* It is possible TX completion already happened
++ * before we set TSQ_THROTTLED, so we must
++ * test again the condition.
++ * We abuse smp_mb__after_clear_bit() because
++ * there is no smp_mb__after_set_bit() yet
++ */
++ smp_mb__after_clear_bit();
++ if (atomic_read(&sk->sk_wmem_alloc) > limit)
++ break;
+ }
+
+ limit = mss_now;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index b6fa35e7425c..68fd4918315c 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -321,6 +321,20 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
+ return dst_output(skb);
+ }
+
++static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
++{
++ if (skb->len <= mtu || skb->local_df)
++ return false;
++
++ if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
++ return true;
++
++ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
++ return false;
++
++ return true;
++}
++
+ int ip6_forward(struct sk_buff *skb)
+ {
+ struct dst_entry *dst = skb_dst(skb);
+@@ -443,8 +457,7 @@ int ip6_forward(struct sk_buff *skb)
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
+- if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
+- (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
++ if (ip6_pkt_too_big(skb, mtu)) {
+ /* Again, force OUTPUT device used as source address */
+ skb->dev = dst->dev;
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index 6ddda282f9c7..fecd35af1935 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -21,7 +21,6 @@
+ #include <net/netlink.h>
+ #include <net/sch_generic.h>
+ #include <net/pkt_sched.h>
+-#include <net/tcp.h>
+
+
+ /* Simple Token Bucket Filter.
+@@ -122,16 +121,10 @@ struct tbf_sched_data {
+ * Return length of individual segments of a gso packet,
+ * including all headers (MAC, IP, TCP/UDP)
+ */
+-static unsigned int skb_gso_seglen(const struct sk_buff *skb)
++static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
+ {
+ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+- const struct skb_shared_info *shinfo = skb_shinfo(skb);
+-
+- if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+- hdr_len += tcp_hdrlen(skb);
+- else
+- hdr_len += sizeof(struct udphdr);
+- return hdr_len + shinfo->gso_size;
++ return hdr_len + skb_gso_transport_seglen(skb);
+ }
+
+ /* GSO packet is too big, segment it so that tbf can transmit
+@@ -176,7 +169,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ int ret;
+
+ if (qdisc_pkt_len(skb) > q->max_size) {
+- if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
++ if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
+ return tbf_segment(skb, sch);
+ return qdisc_reshape_fail(skb, sch);
+ }
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 911b71b26b0e..14c801528da8 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -65,6 +65,7 @@
+ #include <linux/crypto.h>
+ #include <linux/slab.h>
+ #include <linux/file.h>
++#include <linux/compat.h>
+
+ #include <net/ip.h>
+ #include <net/icmp.h>
+@@ -1369,11 +1370,19 @@ static int sctp_setsockopt_connectx(struct sock* sk,
+ /*
+ * New (hopefully final) interface for the API.
+ * We use the sctp_getaddrs_old structure so that use-space library
+- * can avoid any unnecessary allocations. The only defferent part
++ * can avoid any unnecessary allocations. The only different part
+ * is that we store the actual length of the address buffer into the
+- * addrs_num structure member. That way we can re-use the existing
++ * addrs_num structure member. That way we can re-use the existing
+ * code.
+ */
++#ifdef CONFIG_COMPAT
++struct compat_sctp_getaddrs_old {
++ sctp_assoc_t assoc_id;
++ s32 addr_num;
++ compat_uptr_t addrs; /* struct sockaddr * */
++};
++#endif
++
+ static int sctp_getsockopt_connectx3(struct sock* sk, int len,
+ char __user *optval,
+ int __user *optlen)
+@@ -1382,16 +1391,30 @@ static int sctp_getsockopt_connectx3(struct sock* sk, int len,
+ sctp_assoc_t assoc_id = 0;
+ int err = 0;
+
+- if (len < sizeof(param))
+- return -EINVAL;
++#ifdef CONFIG_COMPAT
++ if (is_compat_task()) {
++ struct compat_sctp_getaddrs_old param32;
+
+- if (copy_from_user(&param, optval, sizeof(param)))
+- return -EFAULT;
++ if (len < sizeof(param32))
++ return -EINVAL;
++ if (copy_from_user(&param32, optval, sizeof(param32)))
++ return -EFAULT;
+
+- err = __sctp_setsockopt_connectx(sk,
+- (struct sockaddr __user *)param.addrs,
+- param.addr_num, &assoc_id);
++ param.assoc_id = param32.assoc_id;
++ param.addr_num = param32.addr_num;
++ param.addrs = compat_ptr(param32.addrs);
++ } else
++#endif
++ {
++ if (len < sizeof(param))
++ return -EINVAL;
++ if (copy_from_user(&param, optval, sizeof(param)))
++ return -EFAULT;
++ }
+
++ err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
++ param.addrs, param.addr_num,
++ &assoc_id);
+ if (err == 0 || err == -EINPROGRESS) {
+ if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
+ return -EFAULT;
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index cc24323d3045..a7f9821d1760 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -108,6 +108,7 @@ struct gss_auth {
+ static DEFINE_SPINLOCK(pipe_version_lock);
+ static struct rpc_wait_queue pipe_version_rpc_waitqueue;
+ static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
++static void gss_put_auth(struct gss_auth *gss_auth);
+
+ static void gss_free_ctx(struct gss_cl_ctx *);
+ static const struct rpc_pipe_ops gss_upcall_ops_v0;
+@@ -320,6 +321,7 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
+ if (gss_msg->ctx != NULL)
+ gss_put_ctx(gss_msg->ctx);
+ rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
++ gss_put_auth(gss_msg->auth);
+ kfree(gss_msg);
+ }
+
+@@ -486,6 +488,7 @@ gss_alloc_msg(struct gss_auth *gss_auth,
+ default:
+ gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
+ };
++ kref_get(&gss_auth->kref);
+ return gss_msg;
+ }
+
+@@ -1053,6 +1056,12 @@ gss_free_callback(struct kref *kref)
+ }
+
+ static void
++gss_put_auth(struct gss_auth *gss_auth)
++{
++ kref_put(&gss_auth->kref, gss_free_callback);
++}
++
++static void
+ gss_destroy(struct rpc_auth *auth)
+ {
+ struct gss_auth *gss_auth = container_of(auth,
+@@ -1073,7 +1082,7 @@ gss_destroy(struct rpc_auth *auth)
+ gss_auth->gss_pipe[1] = NULL;
+ rpcauth_destroy_credcache(auth);
+
+- kref_put(&gss_auth->kref, gss_free_callback);
++ gss_put_auth(gss_auth);
+ }
+
+ /*
+@@ -1244,7 +1253,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
+ call_rcu(&cred->cr_rcu, gss_free_cred_callback);
+ if (ctx)
+ gss_put_ctx(ctx);
+- kref_put(&gss_auth->kref, gss_free_callback);
++ gss_put_auth(gss_auth);
+ }
+
+ static void
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index b752e1de2e7d..83a1daa642bb 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -504,6 +504,7 @@ static int xs_nospace(struct rpc_task *task)
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
++ struct sock *sk = transport->inet;
+ int ret = -EAGAIN;
+
+ dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
+@@ -521,7 +522,7 @@ static int xs_nospace(struct rpc_task *task)
+ * window size
+ */
+ set_bit(SOCK_NOSPACE, &transport->sock->flags);
+- transport->inet->sk_write_pending++;
++ sk->sk_write_pending++;
+ /* ...and wait for more buffer space */
+ xprt_wait_for_buffer_space(task, xs_nospace_callback);
+ }
+@@ -531,6 +532,9 @@ static int xs_nospace(struct rpc_task *task)
+ }
+
+ spin_unlock_bh(&xprt->transport_lock);
++
++ /* Race breaker in case memory is freed before above code is called */
++ sk->sk_write_space(sk);
+ return ret;
+ }
+
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index 7e5bceddc36f..4d35eb75f129 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -3261,10 +3261,10 @@ static int filename_write_helper(void *key, void *data, void *ptr)
+ if (rc)
+ return rc;
+
+- buf[0] = ft->stype;
+- buf[1] = ft->ttype;
+- buf[2] = ft->tclass;
+- buf[3] = otype->otype;
++ buf[0] = cpu_to_le32(ft->stype);
++ buf[1] = cpu_to_le32(ft->ttype);
++ buf[2] = cpu_to_le32(ft->tclass);
++ buf[3] = cpu_to_le32(otype->otype);
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 6e9876f27d95..a91ad743fca4 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -2662,60 +2662,6 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
+ }
+
+ /*
+- * PCM stuffs
+- */
+-static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid,
+- u32 stream_tag,
+- int channel_id, int format)
+-{
+- unsigned int oldval, newval;
+-
+- if (!nid)
+- return;
+-
+- snd_printdd(
+- "ca0132_setup_stream: NID=0x%x, stream=0x%x, "
+- "channel=%d, format=0x%x\n",
+- nid, stream_tag, channel_id, format);
+-
+- /* update the format-id if changed */
+- oldval = snd_hda_codec_read(codec, nid, 0,
+- AC_VERB_GET_STREAM_FORMAT,
+- 0);
+- if (oldval != format) {
+- msleep(20);
+- snd_hda_codec_write(codec, nid, 0,
+- AC_VERB_SET_STREAM_FORMAT,
+- format);
+- }
+-
+- oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
+- newval = (stream_tag << 4) | channel_id;
+- if (oldval != newval) {
+- snd_hda_codec_write(codec, nid, 0,
+- AC_VERB_SET_CHANNEL_STREAMID,
+- newval);
+- }
+-}
+-
+-static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
+-{
+- unsigned int val;
+-
+- if (!nid)
+- return;
+-
+- snd_printdd(KERN_INFO "ca0132_cleanup_stream: NID=0x%x\n", nid);
+-
+- val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
+- if (!val)
+- return;
+-
+- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
+- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
+-}
+-
+-/*
+ * PCM callbacks
+ */
+ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+@@ -2726,7 +2672,7 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ {
+ struct ca0132_spec *spec = codec->spec;
+
+- ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
++ snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
+
+ return 0;
+ }
+@@ -2745,7 +2691,7 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
+ msleep(50);
+
+- ca0132_cleanup_stream(codec, spec->dacs[0]);
++ snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
+
+ return 0;
+ }
+@@ -2822,10 +2768,8 @@ static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
+ unsigned int format,
+ struct snd_pcm_substream *substream)
+ {
+- struct ca0132_spec *spec = codec->spec;
+-
+- ca0132_setup_stream(codec, spec->adcs[substream->number],
+- stream_tag, 0, format);
++ snd_hda_codec_setup_stream(codec, hinfo->nid,
++ stream_tag, 0, format);
+
+ return 0;
+ }
+@@ -2839,7 +2783,7 @@ static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ if (spec->dsp_state == DSP_DOWNLOADING)
+ return 0;
+
+- ca0132_cleanup_stream(codec, hinfo->nid);
++ snd_hda_codec_cleanup_stream(codec, hinfo->nid);
+ return 0;
+ }
+
+@@ -4742,6 +4686,8 @@ static int patch_ca0132(struct hda_codec *codec)
+ return err;
+
+ codec->patch_ops = ca0132_patch_ops;
++ codec->pcm_format_first = 1;
++ codec->no_sticky_stream = 1;
+
+ return 0;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 003a7ce5791c..1be437f533a6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3578,6 +3578,8 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ alc283_chromebook_caps(codec);
+ spec->gen.hp_automute_hook = alc283_hp_automute_hook;
++ break;
++ case HDA_FIXUP_ACT_INIT:
+ /* MIC2-VREF control */
+ /* Set to manual mode */
+ val = alc_read_coef_idx(codec, 0x06);
+@@ -3686,6 +3688,7 @@ enum {
+ ALC271_FIXUP_HP_GATE_MIC_JACK,
+ ALC269_FIXUP_ACER_AC700,
+ ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
++ ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED,
+ ALC269VB_FIXUP_ORDISSIMO_EVE2,
+ ALC283_FIXUP_CHROME_BOOK,
+ ALC282_FIXUP_ASUS_TX300,
+@@ -3955,6 +3958,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ },
++ [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc269_fixup_limit_int_mic_boost,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HP_MUTE_LED_MIC1,
++ },
+ [ALC269VB_FIXUP_ORDISSIMO_EVE2] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -4041,6 +4050,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++ SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x21ed, "HP Falco Chromebook", ALC283_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+@@ -4571,6 +4581,25 @@ static void alc272_fixup_mario(struct hda_codec *codec,
+ "hda_codec: failed to override amp caps for NID 0x2\n");
+ }
+
++static const struct snd_pcm_chmap_elem asus_pcm_2_1_chmaps[] = {
++ { .channels = 2,
++ .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
++ { .channels = 4,
++ .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
++ SNDRV_CHMAP_NA, SNDRV_CHMAP_LFE } }, /* LFE only on right */
++ { }
++};
++
++/* override the 2.1 chmap */
++static void alc662_fixup_bass_chmap(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action == HDA_FIXUP_ACT_BUILD) {
++ struct alc_spec *spec = codec->spec;
++ spec->gen.pcm_rec[0].stream[0].chmap = asus_pcm_2_1_chmaps;
++ }
++}
++
+ enum {
+ ALC662_FIXUP_ASPIRE,
+ ALC662_FIXUP_IDEAPAD,
+@@ -4591,6 +4620,9 @@ enum {
+ ALC662_FIXUP_INV_DMIC,
+ ALC668_FIXUP_DELL_MIC_NO_PRESENCE,
+ ALC668_FIXUP_HEADSET_MODE,
++ ALC662_FIXUP_BASS_CHMAP,
++ ALC662_FIXUP_BASS_1A,
++ ALC662_FIXUP_BASS_1A_CHMAP,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -4765,6 +4797,25 @@ static const struct hda_fixup alc662_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mode_alc668,
+ },
++ [ALC662_FIXUP_BASS_CHMAP] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc662_fixup_bass_chmap,
++ .chained = true,
++ .chain_id = ALC662_FIXUP_ASUS_MODE4
++ },
++ [ALC662_FIXUP_BASS_1A] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ {0x1a, 0x80106111}, /* bass speaker */
++ {}
++ },
++ },
++ [ALC662_FIXUP_BASS_1A_CHMAP] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc662_fixup_bass_chmap,
++ .chained = true,
++ .chain_id = ALC662_FIXUP_BASS_1A,
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -4777,9 +4828,15 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+ SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+- SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
+- SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
++ SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
++ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
++ SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP),
+ SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+ SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index fba0cef1c47f..6133423821d1 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -83,6 +83,7 @@ enum {
+ STAC_DELL_M6_BOTH,
+ STAC_DELL_EQ,
+ STAC_ALIENWARE_M17X,
++ STAC_92HD89XX_HP_FRONT_JACK,
+ STAC_92HD73XX_MODELS
+ };
+
+@@ -97,6 +98,7 @@ enum {
+ STAC_92HD83XXX_HP_LED,
+ STAC_92HD83XXX_HP_INV_LED,
+ STAC_92HD83XXX_HP_MIC_LED,
++ STAC_HP_LED_GPIO10,
+ STAC_92HD83XXX_HEADSET_JACK,
+ STAC_92HD83XXX_HP,
+ STAC_HP_ENVY_BASS,
+@@ -1776,6 +1778,12 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
+ {}
+ };
+
++static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
++ { 0x0a, 0x02214030 },
++ { 0x0b, 0x02A19010 },
++ {}
++};
++
+ static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -1894,6 +1902,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
+ [STAC_92HD73XX_NO_JD] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd73xx_fixup_no_jd,
++ },
++ [STAC_92HD89XX_HP_FRONT_JACK] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = stac92hd89xx_hp_front_jack_pin_configs,
+ }
+ };
+
+@@ -1954,6 +1966,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ "Alienware M17x", STAC_ALIENWARE_M17X),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+ "Alienware M17x R3", STAC_DELL_EQ),
++ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
++ "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
+ {} /* terminator */
+ };
+
+@@ -2095,6 +2109,17 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
+ spec->mic_mute_led_gpio = 0x08; /* GPIO3 */
+ }
+
++static void stac92hd83xxx_fixup_hp_led_gpio10(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ struct sigmatel_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ spec->gpio_led = 0x10; /* GPIO4 */
++ spec->default_polarity = 0;
++ }
++}
++
+ static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -2161,6 +2186,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
+ .chained = true,
+ .chain_id = STAC_92HD83XXX_HP,
+ },
++ [STAC_HP_LED_GPIO10] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = stac92hd83xxx_fixup_hp_led_gpio10,
++ .chained = true,
++ .chain_id = STAC_92HD83XXX_HP,
++ },
+ [STAC_92HD83XXX_HEADSET_JACK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd83xxx_fixup_headset_jack,
+@@ -2232,6 +2263,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
+ "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1888,
+ "HP Envy Spectre", STAC_HP_ENVY_BASS),
++ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1899,
++ "HP Folio 13", STAC_HP_LED_GPIO10),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18df,
+ "HP Folio", STAC_92HD83XXX_HP_MIC_LED),
+ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x1900,
+diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
+index dc0284dc9e6f..76fdf0a598bc 100644
+--- a/sound/soc/codecs/da732x.c
++++ b/sound/soc/codecs/da732x.c
+@@ -1268,11 +1268,23 @@ static struct snd_soc_dai_driver da732x_dai[] = {
+ },
+ };
+
++static bool da732x_volatile(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case DA732X_REG_HPL_DAC_OFF_CNTL:
++ case DA732X_REG_HPR_DAC_OFF_CNTL:
++ return true;
++ default:
++ return false;
++ }
++}
++
+ static const struct regmap_config da732x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = DA732X_MAX_REG,
++ .volatile_reg = da732x_volatile,
+ .reg_defaults = da732x_reg_cache,
+ .num_reg_defaults = ARRAY_SIZE(da732x_reg_cache),
+ .cache_type = REGCACHE_RBTREE,
+diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
+index fc9802d1281d..620f80467bc8 100644
+--- a/sound/soc/codecs/da9055.c
++++ b/sound/soc/codecs/da9055.c
+@@ -1523,8 +1523,15 @@ static int da9055_remove(struct i2c_client *client)
+ return 0;
+ }
+
++/*
++ * DO NOT change the device Ids. The naming is intentionally specific as both
++ * the CODEC and PMIC parts of this chip are instantiated separately as I2C
++ * devices (both have configurable I2C addresses, and are to all intents and
++ * purposes separate). As a result there are specific DA9055 Ids for CODEC
++ * and PMIC, which must be different to operate together.
++ */
+ static const struct i2c_device_id da9055_i2c_id[] = {
+- { "da9055", 0 },
++ { "da9055-codec", 0 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
+@@ -1532,7 +1539,7 @@ MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
+ /* I2C codec control layer */
+ static struct i2c_driver da9055_i2c_driver = {
+ .driver = {
+- .name = "da9055",
++ .name = "da9055-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = da9055_i2c_probe,
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 0569a4c3ae00..5f728808eed4 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -1769,16 +1769,6 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+- if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+- ret = regcache_sync(max98090->regmap);
+-
+- if (ret != 0) {
+- dev_err(codec->dev,
+- "Failed to sync cache: %d\n", ret);
+- return ret;
+- }
+- }
+-
+ if (max98090->jack_state == M98090_JACK_STATE_HEADSET) {
+ /*
+ * Set to normal bias level.
+@@ -1792,6 +1782,16 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
++ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
++ ret = regcache_sync(max98090->regmap);
++ if (ret != 0) {
++ dev_err(codec->dev,
++ "Failed to sync cache: %d\n", ret);
++ return ret;
++ }
++ }
++ break;
++
+ case SND_SOC_BIAS_OFF:
+ /* Set internal pull-up to lowest power mode */
+ snd_soc_update_bits(codec, M98090_REG_JACK_DETECT,
+diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
+index 06edb396e733..2735361a4c3c 100644
+--- a/sound/soc/codecs/sta32x.c
++++ b/sound/soc/codecs/sta32x.c
+@@ -187,42 +187,42 @@ static const unsigned int sta32x_limiter_drc_release_tlv[] = {
+ 13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0),
+ };
+
+-static const struct soc_enum sta32x_drc_ac_enum =
+- SOC_ENUM_SINGLE(STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
+- 2, sta32x_drc_ac);
+-static const struct soc_enum sta32x_auto_eq_enum =
+- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
+- 3, sta32x_auto_eq_mode);
+-static const struct soc_enum sta32x_auto_gc_enum =
+- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
+- 4, sta32x_auto_gc_mode);
+-static const struct soc_enum sta32x_auto_xo_enum =
+- SOC_ENUM_SINGLE(STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
+- 16, sta32x_auto_xo_mode);
+-static const struct soc_enum sta32x_preset_eq_enum =
+- SOC_ENUM_SINGLE(STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
+- 32, sta32x_preset_eq_mode);
+-static const struct soc_enum sta32x_limiter_ch1_enum =
+- SOC_ENUM_SINGLE(STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
+- 3, sta32x_limiter_select);
+-static const struct soc_enum sta32x_limiter_ch2_enum =
+- SOC_ENUM_SINGLE(STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
+- 3, sta32x_limiter_select);
+-static const struct soc_enum sta32x_limiter_ch3_enum =
+- SOC_ENUM_SINGLE(STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
+- 3, sta32x_limiter_select);
+-static const struct soc_enum sta32x_limiter1_attack_rate_enum =
+- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxA_SHIFT,
+- 16, sta32x_limiter_attack_rate);
+-static const struct soc_enum sta32x_limiter2_attack_rate_enum =
+- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxA_SHIFT,
+- 16, sta32x_limiter_attack_rate);
+-static const struct soc_enum sta32x_limiter1_release_rate_enum =
+- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxR_SHIFT,
+- 16, sta32x_limiter_release_rate);
+-static const struct soc_enum sta32x_limiter2_release_rate_enum =
+- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxR_SHIFT,
+- 16, sta32x_limiter_release_rate);
++static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum,
++ STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
++ sta32x_drc_ac);
++static SOC_ENUM_SINGLE_DECL(sta32x_auto_eq_enum,
++ STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
++ sta32x_auto_eq_mode);
++static SOC_ENUM_SINGLE_DECL(sta32x_auto_gc_enum,
++ STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
++ sta32x_auto_gc_mode);
++static SOC_ENUM_SINGLE_DECL(sta32x_auto_xo_enum,
++ STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
++ sta32x_auto_xo_mode);
++static SOC_ENUM_SINGLE_DECL(sta32x_preset_eq_enum,
++ STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
++ sta32x_preset_eq_mode);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch1_enum,
++ STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
++ sta32x_limiter_select);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch2_enum,
++ STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
++ sta32x_limiter_select);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch3_enum,
++ STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
++ sta32x_limiter_select);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_attack_rate_enum,
++ STA32X_L1AR, STA32X_LxA_SHIFT,
++ sta32x_limiter_attack_rate);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_attack_rate_enum,
++ STA32X_L2AR, STA32X_LxA_SHIFT,
++ sta32x_limiter_attack_rate);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_release_rate_enum,
++ STA32X_L1AR, STA32X_LxR_SHIFT,
++ sta32x_limiter_release_rate);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_release_rate_enum,
++ STA32X_L2AR, STA32X_LxR_SHIFT,
++ sta32x_limiter_release_rate);
+
+ /* byte array controls for setting biquad, mixer, scaling coefficients;
+ * for biquads all five coefficients need to be set in one go,
+@@ -331,7 +331,7 @@ static int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
+
+ static int sta32x_cache_sync(struct snd_soc_codec *codec)
+ {
+- struct sta32x_priv *sta32x = codec->control_data;
++ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
+ unsigned int mute;
+ int rc;
+
+@@ -434,7 +434,7 @@ SOC_SINGLE_TLV("Treble Tone Control", STA32X_TONE, STA32X_TONE_TTC_SHIFT, 15, 0,
+ SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta32x_limiter1_attack_rate_enum),
+ SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta32x_limiter2_attack_rate_enum),
+ SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
+-SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
++SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter2_release_rate_enum),
+
+ /* depending on mode, the attack/release thresholds have
+ * two different enum definitions; provide both
+diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
+index 89a18d82f303..5bce21013485 100644
+--- a/sound/soc/codecs/wm8770.c
++++ b/sound/soc/codecs/wm8770.c
+@@ -196,8 +196,8 @@ static const char *ain_text[] = {
+ "AIN5", "AIN6", "AIN7", "AIN8"
+ };
+
+-static const struct soc_enum ain_enum =
+- SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
++static SOC_ENUM_DOUBLE_DECL(ain_enum,
++ WM8770_ADCMUX, 0, 4, ain_text);
+
+ static const struct snd_kcontrol_new ain_mux =
+ SOC_DAPM_ENUM("Capture Mux", ain_enum);
+diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
+index b0710d817a65..754f88e1fdab 100644
+--- a/sound/soc/codecs/wm8958-dsp2.c
++++ b/sound/soc/codecs/wm8958-dsp2.c
+@@ -153,7 +153,7 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
+
+ data32 &= 0xffffff;
+
+- wm8994_bulk_write(codec->control_data,
++ wm8994_bulk_write(wm8994->wm8994,
+ data32 & 0xffffff,
+ block_len / 2,
+ (void *)(data + 8));
+diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
+index e0305a148568..9edd68db9f48 100644
+--- a/sound/soc/txx9/txx9aclc-ac97.c
++++ b/sound/soc/txx9/txx9aclc-ac97.c
+@@ -183,14 +183,16 @@ static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
++
++ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
++ if (!drvdata)
++ return -ENOMEM;
++
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drvdata->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+- if (!drvdata)
+- return -ENOMEM;
+ platform_set_drvdata(pdev, drvdata);
+ drvdata->physbase = r->start;
+ if (sizeof(drvdata->physbase) > sizeof(r->start) &&
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index cc2dd1f0decb..0339d464791a 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -322,6 +322,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
+ { 0 } /* terminator */
+ };
+
++static const struct usbmix_name_map kef_x300a_map[] = {
++ { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
++ { 0 }
++};
++
+ /*
+ * Control map entries
+ */
+@@ -409,6 +414,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ .id = USB_ID(0x200c, 0x1018),
+ .map = ebox44_map,
+ },
++ {
++ .id = USB_ID(0x27ac, 0x1000),
++ .map = kef_x300a_map,
++ },
+ { 0 } /* terminator */
+ };
+
diff --git a/1014_linux-3.12.15.patch b/1014_linux-3.12.15.patch
new file mode 100644
index 00000000..f0ad7354
--- /dev/null
+++ b/1014_linux-3.12.15.patch
@@ -0,0 +1,7386 @@
+diff --git a/Makefile b/Makefile
+index 5d38a5a79b3a..517391a3093e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
+index f33679d2d3ee..50e1d850ee2e 100644
+--- a/arch/arm/mach-sa1100/include/mach/collie.h
++++ b/arch/arm/mach-sa1100/include/mach/collie.h
+@@ -13,6 +13,8 @@
+ #ifndef __ASM_ARCH_COLLIE_H
+ #define __ASM_ARCH_COLLIE_H
+
++#include "hardware.h" /* Gives GPIO_MAX */
++
+ extern void locomolcd_power(int on);
+
+ #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 0b27b6574296..965c28ff7b3b 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -136,10 +136,10 @@ extern struct page *empty_zero_page;
+ /*
+ * The following only work if pte_present(). Undefined behaviour otherwise.
+ */
+-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
+-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
+-#define pte_young(pte) (pte_val(pte) & PTE_AF)
+-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
++#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
++#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
++#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
++#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
+ #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
+ #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
+
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index e0331414c7d6..86479bbf4714 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -14,6 +14,7 @@
+ #define _ASM_MIPSREGS_H
+
+ #include <linux/linkage.h>
++#include <linux/types.h>
+ #include <asm/hazards.h>
+ #include <asm/war.h>
+
+diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
+index 599545738af3..c2dcfaa51987 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -478,13 +478,6 @@ BEGIN_FTR_SECTION_NESTED(945) \
+ std ra,TASKTHREADPPR(rb); \
+ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
+
+-#define RESTORE_PPR(ra, rb) \
+-BEGIN_FTR_SECTION_NESTED(946) \
+- ld ra,PACACURRENT(r13); \
+- ld rb,TASKTHREADPPR(ra); \
+- mtspr SPRN_PPR,rb; /* Restore PPR */ \
+-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
+-
+ #endif
+
+ /*
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index c04cdf70d487..7be37170fda7 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -820,6 +820,12 @@ fast_exception_return:
+ andi. r0,r3,MSR_RI
+ beq- unrecov_restore
+
++ /* Load PPR from thread struct before we clear MSR:RI */
++BEGIN_FTR_SECTION
++ ld r2,PACACURRENT(r13)
++ ld r2,TASKTHREADPPR(r2)
++END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
++
+ /*
+ * Clear RI before restoring r13. If we are returning to
+ * userspace and we take an exception after restoring r13,
+@@ -840,8 +846,10 @@ fast_exception_return:
+ */
+ andi. r0,r3,MSR_PR
+ beq 1f
++BEGIN_FTR_SECTION
++ mtspr SPRN_PPR,r2 /* Restore PPR */
++END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ACCOUNT_CPU_USER_EXIT(r2, r4)
+- RESTORE_PPR(r2, r4)
+ REST_GPR(13, r1)
+ 1:
+ mtspr SPRN_SRR1,r3
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 96d2fdf3aa9e..aa75b2beba7d 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -928,6 +928,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+ flush_altivec_to_thread(src);
+ flush_vsx_to_thread(src);
+ flush_spe_to_thread(src);
++ /*
++ * Flush TM state out so we can copy it. __switch_to_tm() does this
++ * flush but it removes the checkpointed state from the current CPU and
++ * transitions the CPU out of TM mode. Hence we need to call
++ * tm_recheckpoint_new_task() (on the same task) to restore the
++ * checkpointed state back and the TM mode.
++ */
++ __switch_to_tm(src);
++ tm_recheckpoint_new_task(src);
+
+ *dst = *src;
+
+diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
+index b47a0e1ab001..c712ecec13ba 100644
+--- a/arch/powerpc/kernel/reloc_64.S
++++ b/arch/powerpc/kernel/reloc_64.S
+@@ -81,6 +81,7 @@ _GLOBAL(relocate)
+
+ 6: blr
+
++.balign 8
+ p_dyn: .llong __dynamic_start - 0b
+ p_rela: .llong __rela_dyn_start - 0b
+ p_st: .llong _stext - 0b
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 7143793859fa..3e01afa21710 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -100,7 +100,7 @@ config S390
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CPU_DEVICES if !SMP
+ select GENERIC_SMP_IDLE_THREAD
+- select GENERIC_TIME_VSYSCALL_OLD
++ select GENERIC_TIME_VSYSCALL
+ select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+ select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
+ select HAVE_ARCH_SECCOMP_FILTER
+diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
+index 87a22092b68f..6c0281f30d44 100644
+--- a/arch/s390/appldata/appldata_base.c
++++ b/arch/s390/appldata/appldata_base.c
+@@ -527,6 +527,7 @@ static int __init appldata_init(void)
+ {
+ int rc;
+
++ init_virt_timer(&appldata_timer);
+ appldata_timer.function = appldata_timer_function;
+ appldata_timer.data = (unsigned long) &appldata_work;
+
+diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
+index a73eb2e1e918..bc9746a7d47c 100644
+--- a/arch/s390/include/asm/vdso.h
++++ b/arch/s390/include/asm/vdso.h
+@@ -26,8 +26,9 @@ struct vdso_data {
+ __u64 wtom_clock_nsec; /* 0x28 */
+ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
+ __u32 tz_dsttime; /* Type of dst correction 0x34 */
+- __u32 ectg_available;
+- __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
++ __u32 ectg_available; /* ECTG instruction present 0x38 */
++ __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
++ __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
+ };
+
+ struct vdso_per_cpu_data {
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index 2416138ebd3e..496116cd65ec 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -65,7 +65,8 @@ int main(void)
+ DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
+ DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
+ DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
+- DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
++ DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
++ DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
+ DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
+ DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
+ /* constants used by the vdso */
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index 064c3082ab33..dd95f1631621 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ }
+
+-static int s390_next_ktime(ktime_t expires,
++static int s390_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+ {
+- struct timespec ts;
+- u64 nsecs;
+-
+- ts.tv_sec = ts.tv_nsec = 0;
+- monotonic_to_bootbased(&ts);
+- nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
+- do_div(nsecs, 125);
+- S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+- /* Program the maximum value if we have an overflow (== year 2042) */
+- if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
+- S390_lowcore.clock_comparator = -1ULL;
++ S390_lowcore.clock_comparator = get_tod_clock() + delta;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ return 0;
+ }
+@@ -146,15 +136,14 @@ void init_cpu_timer(void)
+ cpu = smp_processor_id();
+ cd = &per_cpu(comparators, cpu);
+ cd->name = "comparator";
+- cd->features = CLOCK_EVT_FEAT_ONESHOT |
+- CLOCK_EVT_FEAT_KTIME;
++ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+ cd->mult = 16777;
+ cd->shift = 12;
+ cd->min_delta_ns = 1;
+ cd->max_delta_ns = LONG_MAX;
+ cd->rating = 400;
+ cd->cpumask = cpumask_of(cpu);
+- cd->set_next_ktime = s390_next_ktime;
++ cd->set_next_event = s390_next_event;
+ cd->set_mode = s390_set_mode;
+
+ clockevents_register_device(cd);
+@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
+ return &clocksource_tod;
+ }
+
+-void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
+- struct clocksource *clock, u32 mult)
++void update_vsyscall(struct timekeeper *tk)
+ {
+- if (clock != &clocksource_tod)
++ u64 nsecps;
++
++ if (tk->clock != &clocksource_tod)
+ return;
+
+ /* Make userspace gettimeofday spin until we're done. */
+ ++vdso_data->tb_update_count;
+ smp_wmb();
+- vdso_data->xtime_tod_stamp = clock->cycle_last;
+- vdso_data->xtime_clock_sec = wall_time->tv_sec;
+- vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
+- vdso_data->wtom_clock_sec = wtm->tv_sec;
+- vdso_data->wtom_clock_nsec = wtm->tv_nsec;
+- vdso_data->ntp_mult = mult;
++ vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
++ vdso_data->xtime_clock_sec = tk->xtime_sec;
++ vdso_data->xtime_clock_nsec = tk->xtime_nsec;
++ vdso_data->wtom_clock_sec =
++ tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
++ vdso_data->wtom_clock_nsec = tk->xtime_nsec +
++ + (tk->wall_to_monotonic.tv_nsec << tk->shift);
++ nsecps = (u64) NSEC_PER_SEC << tk->shift;
++ while (vdso_data->wtom_clock_nsec >= nsecps) {
++ vdso_data->wtom_clock_nsec -= nsecps;
++ vdso_data->wtom_clock_sec++;
++ }
++ vdso_data->tk_mult = tk->mult;
++ vdso_data->tk_shift = tk->shift;
+ smp_wmb();
+ ++vdso_data->tb_update_count;
+ }
+diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
+index b2224e0b974c..5be8e472f57d 100644
+--- a/arch/s390/kernel/vdso32/clock_gettime.S
++++ b/arch/s390/kernel/vdso32/clock_gettime.S
+@@ -38,25 +38,26 @@ __kernel_clock_gettime:
+ sl %r1,__VDSO_XTIME_STAMP+4(%r5)
+ brc 3,2f
+ ahi %r0,-1
+-2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
++2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ lr %r2,%r0
+- l %r0,__VDSO_NTP_MULT(%r5)
++ l %r0,__VDSO_TK_MULT(%r5)
+ ltr %r1,%r1
+ mr %r0,%r0
+ jnm 3f
+- a %r0,__VDSO_NTP_MULT(%r5)
++ a %r0,__VDSO_TK_MULT(%r5)
+ 3: alr %r0,%r2
+- srdl %r0,12
+- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
++ al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
+ al %r1,__VDSO_XTIME_NSEC+4(%r5)
+ brc 12,4f
+ ahi %r0,1
+-4: l %r2,__VDSO_XTIME_SEC+4(%r5)
+- al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
++4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
+ al %r1,__VDSO_WTOM_NSEC+4(%r5)
+ brc 12,5f
+ ahi %r0,1
+-5: al %r2,__VDSO_WTOM_SEC+4(%r5)
++5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
++ srdl %r0,0(%r2) /* >> tk->shift */
++ l %r2,__VDSO_XTIME_SEC+4(%r5)
++ al %r2,__VDSO_WTOM_SEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 1b
+ basr %r5,0
+@@ -86,20 +87,21 @@ __kernel_clock_gettime:
+ sl %r1,__VDSO_XTIME_STAMP+4(%r5)
+ brc 3,12f
+ ahi %r0,-1
+-12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
++12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ lr %r2,%r0
+- l %r0,__VDSO_NTP_MULT(%r5)
++ l %r0,__VDSO_TK_MULT(%r5)
+ ltr %r1,%r1
+ mr %r0,%r0
+ jnm 13f
+- a %r0,__VDSO_NTP_MULT(%r5)
++ a %r0,__VDSO_TK_MULT(%r5)
+ 13: alr %r0,%r2
+- srdl %r0,12
+- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
++ al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
+ al %r1,__VDSO_XTIME_NSEC+4(%r5)
+ brc 12,14f
+ ahi %r0,1
+-14: l %r2,__VDSO_XTIME_SEC+4(%r5)
++14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
++ srdl %r0,0(%r2) /* >> tk->shift */
++ l %r2,__VDSO_XTIME_SEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 11b
+ basr %r5,0
+diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
+index 2d3633175e3b..fd621a950f7c 100644
+--- a/arch/s390/kernel/vdso32/gettimeofday.S
++++ b/arch/s390/kernel/vdso32/gettimeofday.S
+@@ -35,15 +35,14 @@ __kernel_gettimeofday:
+ sl %r1,__VDSO_XTIME_STAMP+4(%r5)
+ brc 3,3f
+ ahi %r0,-1
+-3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
++3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ st %r0,24(%r15)
+- l %r0,__VDSO_NTP_MULT(%r5)
++ l %r0,__VDSO_TK_MULT(%r5)
+ ltr %r1,%r1
+ mr %r0,%r0
+ jnm 4f
+- a %r0,__VDSO_NTP_MULT(%r5)
++ a %r0,__VDSO_TK_MULT(%r5)
+ 4: al %r0,24(%r15)
+- srdl %r0,12
+ al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+ al %r1,__VDSO_XTIME_NSEC+4(%r5)
+ brc 12,5f
+@@ -51,6 +50,8 @@ __kernel_gettimeofday:
+ 5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 1b
++ l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
++ srdl %r0,0(%r4) /* >> tk->shift */
+ l %r4,24(%r15) /* get tv_sec from stack */
+ basr %r5,0
+ 6: ltr %r0,%r0
+diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
+index d46c95ed5f19..0add1072ba30 100644
+--- a/arch/s390/kernel/vdso64/clock_gettime.S
++++ b/arch/s390/kernel/vdso64/clock_gettime.S
+@@ -34,14 +34,15 @@ __kernel_clock_gettime:
+ tmll %r4,0x0001 /* pending update ? loop */
+ jnz 0b
+ stck 48(%r15) /* Store TOD clock */
++ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
++ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
++ alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
+ lg %r1,48(%r15)
+ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
+- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
+- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+- lg %r0,__VDSO_XTIME_SEC(%r5)
+- alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
+- alg %r0,__VDSO_WTOM_SEC(%r5)
++ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
++ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
++ alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
++ srlg %r1,%r1,0(%r2) /* >> tk->shift */
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 0b
+ larl %r5,13f
+@@ -62,12 +63,13 @@ __kernel_clock_gettime:
+ tmll %r4,0x0001 /* pending update ? loop */
+ jnz 5b
+ stck 48(%r15) /* Store TOD clock */
++ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ lg %r1,48(%r15)
+ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
+- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
+- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+- lg %r0,__VDSO_XTIME_SEC(%r5)
++ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
++ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
++ srlg %r1,%r1,0(%r2) /* >> tk->shift */
++ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 5b
+ larl %r5,13f
+diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
+index 36ee674722ec..d0860d1d0ccc 100644
+--- a/arch/s390/kernel/vdso64/gettimeofday.S
++++ b/arch/s390/kernel/vdso64/gettimeofday.S
+@@ -31,12 +31,13 @@ __kernel_gettimeofday:
+ stck 48(%r15) /* Store TOD clock */
+ lg %r1,48(%r15)
+ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
+- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
+- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
+- lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
++ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
++ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
++ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 0b
++ lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
++ srlg %r1,%r1,0(%r5) /* >> tk->shift */
+ larl %r5,5f
+ 2: clg %r1,0(%r5)
+ jl 3f
+diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
+index 2c37aadcbc35..32ce71375b21 100644
+--- a/arch/x86/include/asm/kdebug.h
++++ b/arch/x86/include/asm/kdebug.h
+@@ -21,7 +21,7 @@ enum die_val {
+ DIE_NMIUNKNOWN,
+ };
+
+-extern void printk_address(unsigned long address, int reliable);
++extern void printk_address(unsigned long address);
+ extern void die(const char *, struct pt_regs *,long);
+ extern int __must_check __die(const char *, struct pt_regs *, long);
+ extern void show_trace(struct task_struct *t, struct pt_regs *regs,
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index deb6421c9e69..d9c12d3022a7 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -25,12 +25,17 @@ unsigned int code_bytes = 64;
+ int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
+ static int die_counter;
+
+-void printk_address(unsigned long address, int reliable)
++static void printk_stack_address(unsigned long address, int reliable)
+ {
+ pr_cont(" [<%p>] %s%pB\n",
+ (void *)address, reliable ? "" : "? ", (void *)address);
+ }
+
++void printk_address(unsigned long address)
++{
++ pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
++}
++
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ static void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+@@ -151,7 +156,7 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
+ {
+ touch_nmi_watchdog();
+ printk(data);
+- printk_address(addr, reliable);
++ printk_stack_address(addr, reliable);
+ }
+
+ static const struct stacktrace_ops print_trace_ops = {
+@@ -281,7 +286,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+ #else
+ /* Executive summary in case the oops scrolled away */
+ printk(KERN_ALERT "RIP ");
+- printk_address(regs->ip, 1);
++ printk_address(regs->ip);
+ printk(" RSP <%016lx>\n", regs->sp);
+ #endif
+ return 0;
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 81ba27679f18..f36bd42d6f0c 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers)
+ /* This is global to keep gas from relaxing the jumps */
+ ENTRY(early_idt_handler)
+ cld
++
++ cmpl $2,(%esp) # X86_TRAP_NMI
++ je is_nmi # Ignore NMI
++
+ cmpl $2,%ss:early_recursion_flag
+ je hlt_loop
+ incl %ss:early_recursion_flag
+@@ -594,8 +598,9 @@ ex_entry:
+ pop %edx
+ pop %ecx
+ pop %eax
+- addl $8,%esp /* drop vector number and error code */
+ decl %ss:early_recursion_flag
++is_nmi:
++ addl $8,%esp /* drop vector number and error code */
+ iret
+ ENDPROC(early_idt_handler)
+
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index e1aabdb314c8..a468c0a65c42 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -343,6 +343,9 @@ early_idt_handlers:
+ ENTRY(early_idt_handler)
+ cld
+
++ cmpl $2,(%rsp) # X86_TRAP_NMI
++ je is_nmi # Ignore NMI
++
+ cmpl $2,early_recursion_flag(%rip)
+ jz 1f
+ incl early_recursion_flag(%rip)
+@@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
+ popq %rdx
+ popq %rcx
+ popq %rax
+- addq $16,%rsp # drop vector number and error code
+ decl early_recursion_flag(%rip)
++is_nmi:
++ addq $16,%rsp # drop vector number and error code
+ INTERRUPT_RETURN
+ ENDPROC(early_idt_handler)
+
+diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
+index 5d576ab34403..21935afebe19 100644
+--- a/arch/x86/kernel/i387.c
++++ b/arch/x86/kernel/i387.c
+@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
+
+ void __kernel_fpu_end(void)
+ {
+- if (use_eager_fpu())
+- math_state_restore();
+- else
++ if (use_eager_fpu()) {
++ /*
++ * For eager fpu, most the time, tsk_used_math() is true.
++ * Restore the user math as we are done with the kernel usage.
++ * At few instances during thread exit, signal handling etc,
++ * tsk_used_math() is false. Those few places will take proper
++ * actions, so we don't need to restore the math here.
++ */
++ if (likely(tsk_used_math(current)))
++ math_state_restore();
++ } else {
+ stts();
++ }
+ }
+ EXPORT_SYMBOL(__kernel_fpu_end);
+
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index bb1dc51bab05..8e9fe8dfd37b 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -63,7 +63,7 @@ void __show_regs(struct pt_regs *regs, int all)
+ unsigned int ds, cs, es;
+
+ printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
+- printk_address(regs->ip, 1);
++ printk_address(regs->ip);
+ printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
+ regs->sp, regs->flags);
+ printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
+diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
+index 04ee1e2e4c02..52dbf1e400dc 100644
+--- a/arch/x86/kernel/quirks.c
++++ b/arch/x86/kernel/quirks.c
+@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev)
+ return;
+
+ pci_read_config_dword(nb_ht, 0x60, &val);
+- node = val & 7;
++ node = pcibus_to_node(dev->bus) | (val & 7);
+ /*
+ * Some hardware may return an invalid node ID,
+ * so check it first:
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index c0bc80391e40..612c717747dd 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2993,10 +2993,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
+ u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
+ /* instruction emulation calls kvm_set_cr8() */
+ r = cr_interception(svm);
+- if (irqchip_in_kernel(svm->vcpu.kvm)) {
+- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
++ if (irqchip_in_kernel(svm->vcpu.kvm))
+ return r;
+- }
+ if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
+ return r;
+ kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+@@ -3558,6 +3556,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+ if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
++ clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
++
+ if (irr == -1)
+ return;
+
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index d8b1ff68dbb9..5b90bbcad9f6 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -596,7 +596,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+
+ printk(KERN_CONT " at %p\n", (void *) address);
+ printk(KERN_ALERT "IP:");
+- printk_address(regs->ip, 1);
++ printk_address(regs->ip);
+
+ dump_pagetable(address);
+ }
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index 9515f18898b2..f37dec579712 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -297,6 +297,54 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
+ },
+ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "ThinkPad Edge E530",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "ThinkPad Edge E530",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Acer Aspire V5-573G",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Acer Aspire V5-572G",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "ThinkPad T431s",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "ThinkPad T430",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
++ },
++ },
+
+ /*
+ * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 15986f32009e..3cc0b92e3544 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -70,6 +70,8 @@ enum ec_command {
+ #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
+ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
++#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
++ * when trying to clear the EC */
+
+ enum {
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+@@ -123,6 +125,7 @@ EXPORT_SYMBOL(first_ec);
+ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
+ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
+ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
++static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
+
+ /* --------------------------------------------------------------------------
+ Transaction Management
+@@ -468,6 +471,29 @@ acpi_handle ec_get_handle(void)
+
+ EXPORT_SYMBOL(ec_get_handle);
+
++static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
++
++/*
++ * Clears stale _Q events that might have accumulated in the EC.
++ * Run with locked ec mutex.
++ */
++static void acpi_ec_clear(struct acpi_ec *ec)
++{
++ int i, status;
++ u8 value = 0;
++
++ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
++ status = acpi_ec_query_unlocked(ec, &value);
++ if (status || !value)
++ break;
++ }
++
++ if (unlikely(i == ACPI_EC_CLEAR_MAX))
++ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
++ else
++ pr_info("%d stale EC events cleared\n", i);
++}
++
+ void acpi_ec_block_transactions(void)
+ {
+ struct acpi_ec *ec = first_ec;
+@@ -491,6 +517,10 @@ void acpi_ec_unblock_transactions(void)
+ mutex_lock(&ec->mutex);
+ /* Allow transactions to be carried out again */
+ clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
++
++ if (EC_FLAGS_CLEAR_ON_RESUME)
++ acpi_ec_clear(ec);
++
+ mutex_unlock(&ec->mutex);
+ }
+
+@@ -848,6 +878,13 @@ static int acpi_ec_add(struct acpi_device *device)
+
+ /* EC is fully operational, allow queries */
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
++
++ /* Clear stale _Q events if hardware might require that */
++ if (EC_FLAGS_CLEAR_ON_RESUME) {
++ mutex_lock(&ec->mutex);
++ acpi_ec_clear(ec);
++ mutex_unlock(&ec->mutex);
++ }
+ return ret;
+ }
+
+@@ -949,6 +986,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
+ return 0;
+ }
+
++/*
++ * On some hardware it is necessary to clear events accumulated by the EC during
++ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
++ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
++ *
++ * Ideally, the EC should also be instructed NOT to accumulate events during
++ * sleep (which Windows seems to do somehow), but the interface to control this
++ * behaviour is not known at this time.
++ *
++ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
++ * however it is very likely that other Samsung models are affected.
++ *
++ * On systems which don't accumulate _Q events during sleep, this extra check
++ * should be harmless.
++ */
++static int ec_clear_on_resume(const struct dmi_system_id *id)
++{
++ pr_debug("Detected system needing EC poll on resume.\n");
++ EC_FLAGS_CLEAR_ON_RESUME = 1;
++ return 0;
++}
++
+ static struct dmi_system_id ec_dmi_table[] __initdata = {
+ {
+ ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -992,6 +1053,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
++ {
++ ec_clear_on_resume, "Samsung hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
+ {},
+ };
+
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index b7201fc6f1e1..0bdacc5e26a3 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ memory24 = &ares->data.memory24;
++ if (!memory24->address_length)
++ return false;
+ acpi_dev_get_memresource(res, memory24->minimum,
+ memory24->address_length,
+ memory24->write_protect);
+ break;
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ memory32 = &ares->data.memory32;
++ if (!memory32->address_length)
++ return false;
+ acpi_dev_get_memresource(res, memory32->minimum,
+ memory32->address_length,
+ memory32->write_protect);
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ fixed_memory32 = &ares->data.fixed_memory32;
++ if (!fixed_memory32->address_length)
++ return false;
+ acpi_dev_get_memresource(res, fixed_memory32->address,
+ fixed_memory32->address_length,
+ fixed_memory32->write_protect);
+@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_IO:
+ io = &ares->data.io;
++ if (!io->address_length)
++ return false;
+ acpi_dev_get_ioresource(res, io->minimum,
+ io->address_length,
+ io->io_decode);
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ fixed_io = &ares->data.fixed_io;
++ if (!fixed_io->address_length)
++ return false;
+ acpi_dev_get_ioresource(res, fixed_io->address,
+ fixed_io->address_length,
+ ACPI_DECODE_10);
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 14df30580e15..99e5158456d8 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -75,6 +75,17 @@ static int acpi_sleep_prepare(u32 acpi_state)
+ return 0;
+ }
+
++static bool acpi_sleep_state_supported(u8 sleep_state)
++{
++ acpi_status status;
++ u8 type_a, type_b;
++
++ status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
++ return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
++ || (acpi_gbl_FADT.sleep_control.address
++ && acpi_gbl_FADT.sleep_status.address));
++}
++
+ #ifdef CONFIG_ACPI_SLEEP
+ static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+
+@@ -608,15 +619,9 @@ static void acpi_sleep_suspend_setup(void)
+ {
+ int i;
+
+- for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
+- acpi_status status;
+- u8 type_a, type_b;
+-
+- status = acpi_get_sleep_type_data(i, &type_a, &type_b);
+- if (ACPI_SUCCESS(status)) {
++ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
++ if (acpi_sleep_state_supported(i))
+ sleep_states[i] = 1;
+- }
+- }
+
+ suspend_set_ops(old_suspend_ordering ?
+ &acpi_suspend_ops_old : &acpi_suspend_ops);
+@@ -747,11 +752,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
+
+ static void acpi_sleep_hibernate_setup(void)
+ {
+- acpi_status status;
+- u8 type_a, type_b;
+-
+- status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
+- if (ACPI_FAILURE(status))
++ if (!acpi_sleep_state_supported(ACPI_STATE_S4))
+ return;
+
+ hibernation_set_ops(old_suspend_ordering ?
+@@ -800,8 +801,6 @@ static void acpi_power_off(void)
+
+ int __init acpi_sleep_init(void)
+ {
+- acpi_status status;
+- u8 type_a, type_b;
+ char supported[ACPI_S_STATE_COUNT * 3 + 1];
+ char *pos = supported;
+ int i;
+@@ -816,8 +815,7 @@ int __init acpi_sleep_init(void)
+ acpi_sleep_suspend_setup();
+ acpi_sleep_hibernate_setup();
+
+- status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
+- if (ACPI_SUCCESS(status)) {
++ if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
+ sleep_states[ACPI_STATE_S5] = 1;
+ pm_power_off_prepare = acpi_power_off_prepare;
+ pm_power_off = acpi_power_off;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index f3c361b5c5e5..c5d056e974f1 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+
+ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
++ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+
+ /* Blacklist entries taken from Silicon Image 3124/3132
+ Windows driver .inf file - also several Linux problem reports */
+@@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+
+ /* devices that don't properly handle queued TRIM commands */
+ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+- { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
++ { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
+ /*
+ * Some WD SATA-I drives spin up and down erratically when the link
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index de4aa409abe2..2c6d5e118ac1 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
+ old->config_rom_retries = 0;
+ fw_notice(card, "rediscovered device %s\n", dev_name(dev));
+
+- PREPARE_DELAYED_WORK(&old->work, fw_device_update);
++ old->workfn = fw_device_update;
+ fw_schedule_device_work(old, 0);
+
+ if (current_node == card->root_node)
+@@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
+ if (atomic_cmpxchg(&device->state,
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
++ device->workfn = fw_device_shutdown;
+ fw_schedule_device_work(device, SHUTDOWN_DELAY);
+ } else {
+ fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
+@@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
+ dev_name(&device->device), fw_rcode_string(ret));
+ gone:
+ atomic_set(&device->state, FW_DEVICE_GONE);
+- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
++ device->workfn = fw_device_shutdown;
+ fw_schedule_device_work(device, SHUTDOWN_DELAY);
+ out:
+ if (node_id == card->root_node->node_id)
+ fw_schedule_bm_work(card, 0);
+ }
+
++static void fw_device_workfn(struct work_struct *work)
++{
++ struct fw_device *device = container_of(to_delayed_work(work),
++ struct fw_device, work);
++ device->workfn(work);
++}
++
+ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ {
+ struct fw_device *device;
+@@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ * power-up after getting plugged in. We schedule the
+ * first config rom scan half a second after bus reset.
+ */
+- INIT_DELAYED_WORK(&device->work, fw_device_init);
++ device->workfn = fw_device_init;
++ INIT_DELAYED_WORK(&device->work, fw_device_workfn);
+ fw_schedule_device_work(device, INITIAL_DELAY);
+ break;
+
+@@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ if (atomic_cmpxchg(&device->state,
+ FW_DEVICE_RUNNING,
+ FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
+- PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
++ device->workfn = fw_device_refresh;
+ fw_schedule_device_work(device,
+ device->is_local ? 0 : INITIAL_DELAY);
+ }
+@@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ smp_wmb(); /* update node_id before generation */
+ device->generation = card->generation;
+ if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
+- PREPARE_DELAYED_WORK(&device->work, fw_device_update);
++ device->workfn = fw_device_update;
+ fw_schedule_device_work(device, 0);
+ }
+ break;
+@@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ device = node->data;
+ if (atomic_xchg(&device->state,
+ FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
+- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
++ device->workfn = fw_device_shutdown;
+ fw_schedule_device_work(device,
+ list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
+ }
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index 6b895986dc22..4af0a7bad7f2 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
+ if (rcode == RCODE_COMPLETE) {
+ fwnet_transmit_packet_done(ptask);
+ } else {
+- fwnet_transmit_packet_failed(ptask);
+-
+ if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
+ dev_err(&ptask->dev->netdev->dev,
+ "fwnet_write_complete failed: %x (skipped %d)\n",
+@@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
+
+ errors_skipped = 0;
+ last_rcode = rcode;
+- } else
++ } else {
+ errors_skipped++;
++ }
++ fwnet_transmit_packet_failed(ptask);
+ }
+ }
+
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 6aa8a86cb83b..ee805a57b72d 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
+ #define QUIRK_NO_MSI 0x10
+ #define QUIRK_TI_SLLZ059 0x20
+ #define QUIRK_IR_WAKE 0x40
+-#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
+
+ /* In case of multiple matches in ohci_quirks[], only the first one is used. */
+ static const struct {
+@@ -303,10 +302,7 @@ static const struct {
+ QUIRK_BE_HEADERS},
+
+ {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
+- QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
+-
+- {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
+- QUIRK_PHY_LCTRL_TIMEOUT},
++ QUIRK_NO_MSI},
+
+ {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
+ QUIRK_RESET_PACKET},
+@@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
+ ", disable MSI = " __stringify(QUIRK_NO_MSI)
+ ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
+ ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
+- ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
+ ")");
+
+ #define OHCI_PARAM_DEBUG_AT_AR 1
+@@ -2295,9 +2290,6 @@ static int ohci_enable(struct fw_card *card,
+ * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
+ * cannot actually use the phy at that time. These need tens of
+ * millisecods pause between LPS write and first phy access too.
+- *
+- * But do not wait for 50msec on Agere/LSI cards. Their phy
+- * arbitration state machine may time out during such a long wait.
+ */
+
+ reg_write(ohci, OHCI1394_HCControlSet,
+@@ -2305,11 +2297,8 @@ static int ohci_enable(struct fw_card *card,
+ OHCI1394_HCControl_postedWriteEnable);
+ flush_writes(ohci);
+
+- if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
++ for (lps = 0, i = 0; !lps && i < 3; i++) {
+ msleep(50);
+-
+- for (lps = 0, i = 0; !lps && i < 150; i++) {
+- msleep(1);
+ lps = reg_read(ohci, OHCI1394_HCControlSet) &
+ OHCI1394_HCControl_LPS;
+ }
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 281029daf98c..7aef911fdc71 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
+ */
+ int generation;
+ int retries;
++ work_func_t workfn;
+ struct delayed_work work;
+ bool has_sdev;
+ bool blocked;
+@@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
+ /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
+ sbp2_set_busy_timeout(lu);
+
+- PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
++ lu->workfn = sbp2_reconnect;
+ sbp2_agent_reset(lu);
+
+ /* This was a re-login. */
+@@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
+ * If a bus reset happened, sbp2_update will have requeued
+ * lu->work already. Reset the work from reconnect to login.
+ */
+- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
++ lu->workfn = sbp2_login;
+ }
+
+ static void sbp2_reconnect(struct work_struct *work)
+@@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
+ lu->retries++ >= 5) {
+ dev_err(tgt_dev(tgt), "failed to reconnect\n");
+ lu->retries = 0;
+- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
++ lu->workfn = sbp2_login;
+ }
+ sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+
+@@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
+ sbp2_conditionally_unblock(lu);
+ }
+
++static void sbp2_lu_workfn(struct work_struct *work)
++{
++ struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
++ struct sbp2_logical_unit, work);
++ lu->workfn(work);
++}
++
+ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+ {
+ struct sbp2_logical_unit *lu;
+@@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+ lu->blocked = false;
+ ++tgt->dont_block;
+ INIT_LIST_HEAD(&lu->orb_list);
+- INIT_DELAYED_WORK(&lu->work, sbp2_login);
++ lu->workfn = sbp2_login;
++ INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
+
+ list_add_tail(&lu->link, &tgt->lu_list);
+ return 0;
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 2ad27880cd04..2bef0e4cfda8 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -376,7 +376,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
+ void intel_detect_pch(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct pci_dev *pch;
++ struct pci_dev *pch = NULL;
+
+ /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+ * (which really amounts to a PCH but no South Display).
+@@ -397,12 +397,9 @@ void intel_detect_pch(struct drm_device *dev)
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+- while (pch) {
+- struct pci_dev *curr = pch;
++ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+- unsigned short id;
+- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
++ unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ dev_priv->pch_id = id;
+
+ if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+@@ -428,18 +425,16 @@ void intel_detect_pch(struct drm_device *dev)
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ WARN_ON(!IS_ULT(dev));
+- } else {
+- goto check_next;
+- }
+- pci_dev_put(pch);
++ } else
++ continue;
++
+ break;
+ }
+-check_next:
+- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
+- pci_dev_put(curr);
+ }
+ if (!pch)
+- DRM_DEBUG_KMS("No PCH found?\n");
++ DRM_DEBUG_KMS("No PCH found.\n");
++
++ pci_dev_put(pch);
+ }
+
+ bool i915_semaphore_is_enabled(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 4148cc85bf7f..4d302f3dec89 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -834,7 +834,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+ {
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+- if (IS_G4X(dev))
++ if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+ return 165000;
+ else if (IS_HASWELL(dev))
+ return 300000;
+@@ -887,8 +887,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ * outputs. We also need to check that the higher clock still fits
+ * within limits.
+ */
+- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
+- && HAS_PCH_SPLIT(dev)) {
++ if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
++ clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
+ DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
+ desired_bpp = 12*3;
+
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 5e891b226acf..7bb7074a131f 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1313,7 +1313,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ }
+ if (is_dp)
+ args.v5.ucLaneNum = dp_lane_count;
+- else if (radeon_encoder->pixel_clock > 165000)
++ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v5.ucLaneNum = 8;
+ else
+ args.v5.ucLaneNum = 4;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 31f5f0e88328..25370ac56b4b 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3517,8 +3517,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
+ {
+ if (enable)
+ WREG32(CP_MEC_CNTL, 0);
+- else
++ else {
+ WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
++ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
++ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
++ }
+ udelay(50);
+ }
+
+@@ -6995,26 +6998,7 @@ static int cik_startup(struct radeon_device *rdev)
+
+ cik_mc_program(rdev);
+
+- if (rdev->flags & RADEON_IS_IGP) {
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+- !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
+- r = cik_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+- } else {
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+- !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
+- !rdev->mc_fw) {
+- r = cik_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+-
++ if (!(rdev->flags & RADEON_IS_IGP)) {
+ r = ci_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+@@ -7327,6 +7311,27 @@ int cik_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
++ if (rdev->flags & RADEON_IS_IGP) {
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
++ !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
++ r = cik_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++ } else {
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
++ !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
++ !rdev->mc_fw) {
++ r = cik_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++ }
++
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
+diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
+index aaf7ffce8b5b..d565f4076a23 100644
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -174,6 +174,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+ WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
+ }
++ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
++ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+ }
+
+ /**
+@@ -201,6 +203,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
+ u32 me_cntl, reg_offset;
+ int i;
+
++ if (enable == false) {
++ cik_sdma_gfx_stop(rdev);
++ cik_sdma_rlc_stop(rdev);
++ }
++
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+@@ -328,10 +335,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
+ if (!rdev->sdma_fw)
+ return -EINVAL;
+
+- /* stop the gfx rings and rlc compute queues */
+- cik_sdma_gfx_stop(rdev);
+- cik_sdma_rlc_stop(rdev);
+-
+ /* halt the MEs */
+ cik_sdma_enable(rdev, false);
+
+@@ -400,9 +403,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
+ */
+ void cik_sdma_fini(struct radeon_device *rdev)
+ {
+- /* stop the gfx rings and rlc compute queues */
+- cik_sdma_gfx_stop(rdev);
+- cik_sdma_rlc_stop(rdev);
+ /* halt the MEs */
+ cik_sdma_enable(rdev, false);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 5f07d1bfbd76..c429bb9b17b6 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -5061,26 +5061,11 @@ static int evergreen_startup(struct radeon_device *rdev)
+ evergreen_mc_program(rdev);
+
+ if (ASIC_IS_DCE5(rdev)) {
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+- r = ni_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+ r = ni_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+- } else {
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+- r = r600_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+ }
+
+ if (rdev->flags & RADEON_IS_AGP) {
+@@ -5308,6 +5293,24 @@ int evergreen_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
++ if (ASIC_IS_DCE5(rdev)) {
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
++ r = ni_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++ } else {
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++ r = r600_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++ }
++
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
+index 76ada8cfe902..3a03ba37d043 100644
+--- a/drivers/gpu/drm/radeon/evergreen_smc.h
++++ b/drivers/gpu/drm/radeon/evergreen_smc.h
+@@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+
+ #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
+
+-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
++#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
+ #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
+ #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
+
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index b2dbd48f7f28..474343adf262 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1881,23 +1881,7 @@ static int cayman_startup(struct radeon_device *rdev)
+
+ evergreen_mc_program(rdev);
+
+- if (rdev->flags & RADEON_IS_IGP) {
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+- r = ni_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+- } else {
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+- r = ni_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+-
++ if (!(rdev->flags & RADEON_IS_IGP)) {
+ r = ni_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+@@ -2148,6 +2132,24 @@ int cayman_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
++ if (rdev->flags & RADEON_IS_IGP) {
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++ r = ni_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++ } else {
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
++ r = ni_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++ }
++
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 67da7e285cde..5af2729f2055 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2726,14 +2726,6 @@ static int r600_startup(struct radeon_device *rdev)
+
+ r600_mc_program(rdev);
+
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+- r = r600_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+-
+ if (rdev->flags & RADEON_IS_AGP) {
+ r600_agp_enable(rdev);
+ } else {
+@@ -2921,6 +2913,14 @@ int r600_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++ r = r600_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 71245d6f34a2..84323c943bfc 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -712,6 +712,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
+ DRM_ERROR("Failed initializing VRAM heap.\n");
+ return r;
+ }
++ /* Change the size here instead of the init above so only lpfn is affected */
++ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
++
+ r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->stollen_vga_memory);
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 99dd9d8fcf72..c4960ad71e5e 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1665,14 +1665,6 @@ static int rv770_startup(struct radeon_device *rdev)
+
+ rv770_mc_program(rdev);
+
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+- r = r600_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+-
+ if (rdev->flags & RADEON_IS_AGP) {
+ rv770_agp_enable(rdev);
+ } else {
+@@ -1876,6 +1868,14 @@ int rv770_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++ r = r600_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 8277ee01a7b4..873e0a608948 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6387,15 +6387,6 @@ static int si_startup(struct radeon_device *rdev)
+
+ si_mc_program(rdev);
+
+- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+- !rdev->rlc_fw || !rdev->mc_fw) {
+- r = si_init_microcode(rdev);
+- if (r) {
+- DRM_ERROR("Failed to load firmware!\n");
+- return r;
+- }
+- }
+-
+ r = si_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+@@ -6663,6 +6654,15 @@ int si_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
++ !rdev->rlc_fw || !rdev->mc_fw) {
++ r = si_init_microcode(rdev);
++ if (r) {
++ DRM_ERROR("Failed to load firmware!\n");
++ return r;
++ }
++ }
++
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 2332aa1bf93c..83895f2d16c6 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2396,7 +2396,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
+ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ enable_sq_ramping = false;
+
+- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
++ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ enable_sq_ramping = false;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+@@ -5409,7 +5409,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
+
+ for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+ if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+- if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
++ if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ break;
+ mc_reg_table->address[i].s0 =
+ cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 729805322883..acd0fe0c80d2 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+
+ moved:
+ if (bo->evicted) {
+- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+- if (ret)
+- pr_err("Can not flush read caches\n");
++ if (bdev->driver->invalidate_caches) {
++ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
++ if (ret)
++ pr_err("Can not flush read caches\n");
++ }
+ bo->evicted = false;
+ }
+
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index ff758eded96f..cd30d98ac510 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -376,7 +376,7 @@ config I2C_CBUS_GPIO
+
+ config I2C_CPM
+ tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
+- depends on (CPM1 || CPM2) && OF_I2C
++ depends on CPM1 || CPM2
+ help
+ This supports the use of the I2C interface on Freescale
+ processors with CPM1 or CPM2.
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index ea7051ee1493..ba93ef85652d 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -496,8 +496,8 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ isert_conn->state = ISER_CONN_INIT;
+ INIT_LIST_HEAD(&isert_conn->conn_accept_node);
+ init_completion(&isert_conn->conn_login_comp);
+- init_waitqueue_head(&isert_conn->conn_wait);
+- init_waitqueue_head(&isert_conn->conn_wait_comp_err);
++ init_completion(&isert_conn->conn_wait);
++ init_completion(&isert_conn->conn_wait_comp_err);
+ kref_init(&isert_conn->conn_kref);
+ kref_get(&isert_conn->conn_kref);
+ mutex_init(&isert_conn->conn_mutex);
+@@ -669,11 +669,11 @@ isert_disconnect_work(struct work_struct *work)
+
+ pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ mutex_lock(&isert_conn->conn_mutex);
+- isert_conn->state = ISER_CONN_DOWN;
++ if (isert_conn->state == ISER_CONN_UP)
++ isert_conn->state = ISER_CONN_TERMINATING;
+
+ if (isert_conn->post_recv_buf_count == 0 &&
+ atomic_read(&isert_conn->post_send_buf_count) == 0) {
+- pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
+ mutex_unlock(&isert_conn->conn_mutex);
+ goto wake_up;
+ }
+@@ -693,7 +693,7 @@ isert_disconnect_work(struct work_struct *work)
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ wake_up:
+- wake_up(&isert_conn->conn_wait);
++ complete(&isert_conn->conn_wait);
+ isert_put_conn(isert_conn);
+ }
+
+@@ -1427,7 +1427,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
+ case ISCSI_OP_SCSI_CMD:
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+@@ -1439,7 +1439,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
+ case ISCSI_OP_SCSI_TMFUNC:
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+@@ -1449,7 +1449,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
+ case ISCSI_OP_TEXT:
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ /*
+@@ -1512,6 +1512,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
+ iscsit_stop_dataout_timer(cmd);
+ device->unreg_rdma_mem(isert_cmd, isert_conn);
+ cmd->write_data_done = wr->cur_rdma_length;
++ wr->send_wr_num = 0;
+
+ pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
+ spin_lock_bh(&cmd->istate_lock);
+@@ -1552,7 +1553,7 @@ isert_do_control_comp(struct work_struct *work)
+ pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
+ /*
+ * Call atomic_dec(&isert_conn->post_send_buf_count)
+- * from isert_free_conn()
++ * from isert_wait_conn()
+ */
+ isert_conn->logout_posted = true;
+ iscsit_logout_post_handler(cmd, cmd->conn);
+@@ -1576,6 +1577,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
+ struct ib_device *ib_dev)
+ {
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
++ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+
+ if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
+ cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
+@@ -1587,7 +1589,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
+ queue_work(isert_comp_wq, &isert_cmd->comp_work);
+ return;
+ }
+- atomic_dec(&isert_conn->post_send_buf_count);
++ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+
+ cmd->i_state = ISTATE_SENT_STATUS;
+ isert_completion_put(tx_desc, isert_cmd, ib_dev);
+@@ -1625,7 +1627,7 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
+ case ISER_IB_RDMA_READ:
+ pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
+
+- atomic_dec(&isert_conn->post_send_buf_count);
++ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+ isert_completion_rdma_read(tx_desc, isert_cmd);
+ break;
+ default:
+@@ -1636,31 +1638,39 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
+ }
+
+ static void
+-isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
++isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+ {
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
++ struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
++
++ if (!isert_cmd)
++ isert_unmap_tx_desc(tx_desc, ib_dev);
++ else
++ isert_completion_put(tx_desc, isert_cmd, ib_dev);
++}
++
++static void
++isert_cq_rx_comp_err(struct isert_conn *isert_conn)
++{
++ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
++ struct iscsi_conn *conn = isert_conn->conn;
+
+- if (tx_desc) {
+- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
++ if (isert_conn->post_recv_buf_count)
++ return;
+
+- if (!isert_cmd)
+- isert_unmap_tx_desc(tx_desc, ib_dev);
+- else
+- isert_completion_put(tx_desc, isert_cmd, ib_dev);
++ if (conn->sess) {
++ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
++ target_wait_for_sess_cmds(conn->sess->se_sess);
+ }
+
+- if (isert_conn->post_recv_buf_count == 0 &&
+- atomic_read(&isert_conn->post_send_buf_count) == 0) {
+- pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+- pr_debug("Calling wake_up from isert_cq_comp_err\n");
++ while (atomic_read(&isert_conn->post_send_buf_count))
++ msleep(3000);
+
+- mutex_lock(&isert_conn->conn_mutex);
+- if (isert_conn->state != ISER_CONN_DOWN)
+- isert_conn->state = ISER_CONN_TERMINATING;
+- mutex_unlock(&isert_conn->conn_mutex);
++ mutex_lock(&isert_conn->conn_mutex);
++ isert_conn->state = ISER_CONN_DOWN;
++ mutex_unlock(&isert_conn->conn_mutex);
+
+- wake_up(&isert_conn->conn_wait_comp_err);
+- }
++ complete(&isert_conn->conn_wait_comp_err);
+ }
+
+ static void
+@@ -1685,8 +1695,11 @@ isert_cq_tx_work(struct work_struct *work)
+ pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
+ pr_debug("TX wc.status: 0x%08x\n", wc.status);
+ pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
+- atomic_dec(&isert_conn->post_send_buf_count);
+- isert_cq_comp_err(tx_desc, isert_conn);
++
++ if (wc.wr_id != ISER_FASTREG_LI_WRID) {
++ atomic_dec(&isert_conn->post_send_buf_count);
++ isert_cq_tx_comp_err(tx_desc, isert_conn);
++ }
+ }
+ }
+
+@@ -1729,7 +1742,7 @@ isert_cq_rx_work(struct work_struct *work)
+ wc.vendor_err);
+ }
+ isert_conn->post_recv_buf_count--;
+- isert_cq_comp_err(NULL, isert_conn);
++ isert_cq_rx_comp_err(isert_conn);
+ }
+ }
+
+@@ -2151,6 +2164,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
+
+ if (!fr_desc->valid) {
+ memset(&inv_wr, 0, sizeof(inv_wr));
++ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
+ wr = &inv_wr;
+@@ -2161,6 +2175,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
+
+ /* Prepare FASTREG WR */
+ memset(&fr_wr, 0, sizeof(fr_wr));
++ fr_wr.wr_id = ISER_FASTREG_LI_WRID;
+ fr_wr.opcode = IB_WR_FAST_REG_MR;
+ fr_wr.wr.fast_reg.iova_start =
+ fr_desc->data_frpl->page_list[0] + page_off;
+@@ -2325,12 +2340,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
+
+- atomic_inc(&isert_conn->post_send_buf_count);
++ atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+
+ rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ if (rc) {
+ pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
+- atomic_dec(&isert_conn->post_send_buf_count);
++ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ }
+ pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
+ isert_cmd);
+@@ -2358,12 +2373,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+ return rc;
+ }
+
+- atomic_inc(&isert_conn->post_send_buf_count);
++ atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
+
+ rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ if (rc) {
+ pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+- atomic_dec(&isert_conn->post_send_buf_count);
++ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+ }
+ pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
+ isert_cmd);
+@@ -2650,22 +2665,11 @@ isert_free_np(struct iscsi_np *np)
+ kfree(isert_np);
+ }
+
+-static int isert_check_state(struct isert_conn *isert_conn, int state)
+-{
+- int ret;
+-
+- mutex_lock(&isert_conn->conn_mutex);
+- ret = (isert_conn->state == state);
+- mutex_unlock(&isert_conn->conn_mutex);
+-
+- return ret;
+-}
+-
+-static void isert_free_conn(struct iscsi_conn *conn)
++static void isert_wait_conn(struct iscsi_conn *conn)
+ {
+ struct isert_conn *isert_conn = conn->context;
+
+- pr_debug("isert_free_conn: Starting \n");
++ pr_debug("isert_wait_conn: Starting \n");
+ /*
+ * Decrement post_send_buf_count for special case when called
+ * from isert_do_control_comp() -> iscsit_logout_post_handler()
+@@ -2675,38 +2679,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
+ atomic_dec(&isert_conn->post_send_buf_count);
+
+ if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+- pr_debug("Calling rdma_disconnect from isert_free_conn\n");
++ pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+ rdma_disconnect(isert_conn->conn_cm_id);
+ }
+ /*
+ * Only wait for conn_wait_comp_err if the isert_conn made it
+ * into full feature phase..
+ */
+- if (isert_conn->state == ISER_CONN_UP) {
+- pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
+- isert_conn->state);
+- mutex_unlock(&isert_conn->conn_mutex);
+-
+- wait_event(isert_conn->conn_wait_comp_err,
+- (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
+-
+- wait_event(isert_conn->conn_wait,
+- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+-
+- isert_put_conn(isert_conn);
+- return;
+- }
+ if (isert_conn->state == ISER_CONN_INIT) {
+ mutex_unlock(&isert_conn->conn_mutex);
+- isert_put_conn(isert_conn);
+ return;
+ }
+- pr_debug("isert_free_conn: wait_event conn_wait %d\n",
+- isert_conn->state);
++ if (isert_conn->state == ISER_CONN_UP)
++ isert_conn->state = ISER_CONN_TERMINATING;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+- wait_event(isert_conn->conn_wait,
+- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
++ wait_for_completion(&isert_conn->conn_wait_comp_err);
++
++ wait_for_completion(&isert_conn->conn_wait);
++}
++
++static void isert_free_conn(struct iscsi_conn *conn)
++{
++ struct isert_conn *isert_conn = conn->context;
+
+ isert_put_conn(isert_conn);
+ }
+@@ -2719,6 +2714,7 @@ static struct iscsit_transport iser_target_transport = {
+ .iscsit_setup_np = isert_setup_np,
+ .iscsit_accept_np = isert_accept_np,
+ .iscsit_free_np = isert_free_np,
++ .iscsit_wait_conn = isert_wait_conn,
+ .iscsit_free_conn = isert_free_conn,
+ .iscsit_get_login_rx = isert_get_login_rx,
+ .iscsit_put_login_tx = isert_put_login_tx,
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 631f2090f0b8..52f4bf0d1a0f 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -6,6 +6,7 @@
+
+ #define ISERT_RDMA_LISTEN_BACKLOG 10
+ #define ISCSI_ISER_SG_TABLESIZE 256
++#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
+
+ enum isert_desc_type {
+ ISCSI_TX_CONTROL,
+@@ -114,8 +115,8 @@ struct isert_conn {
+ struct isert_device *conn_device;
+ struct work_struct conn_logout_work;
+ struct mutex conn_mutex;
+- wait_queue_head_t conn_wait;
+- wait_queue_head_t conn_wait_comp_err;
++ struct completion conn_wait;
++ struct completion conn_wait_comp_err;
+ struct kref conn_kref;
+ struct list_head conn_frwr_pool;
+ int conn_frwr_pool_size;
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index ea3e4b4f7e58..6ab68e058a0a 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -867,12 +867,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
+ int r;
+ struct dm_io_region o_region, c_region;
+ struct cache *cache = mg->cache;
++ sector_t cblock = from_cblock(mg->cblock);
+
+ o_region.bdev = cache->origin_dev->bdev;
+ o_region.count = cache->sectors_per_block;
+
+ c_region.bdev = cache->cache_dev->bdev;
+- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
++ c_region.sector = cblock * cache->sectors_per_block;
+ c_region.count = cache->sectors_per_block;
+
+ if (mg->writeback || mg->demote) {
+@@ -2181,20 +2182,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
+ bool discarded_block;
+ struct dm_bio_prison_cell *cell;
+ struct policy_result lookup_result;
+- struct per_bio_data *pb;
++ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
+
+- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
++ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
+ /*
+ * This can only occur if the io goes to a partial block at
+ * the end of the origin device. We don't cache these.
+ * Just remap to the origin and carry on.
+ */
+- remap_to_origin_clear_discard(cache, bio, block);
++ remap_to_origin(cache, bio);
+ return DM_MAPIO_REMAPPED;
+ }
+
+- pb = init_per_bio_data(bio, pb_data_size);
+-
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index afb419e514bf..579b58200bf2 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -91,6 +91,69 @@ struct block_op {
+ dm_block_t block;
+ };
+
++struct bop_ring_buffer {
++ unsigned begin;
++ unsigned end;
++ struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
++};
++
++static void brb_init(struct bop_ring_buffer *brb)
++{
++ brb->begin = 0;
++ brb->end = 0;
++}
++
++static bool brb_empty(struct bop_ring_buffer *brb)
++{
++ return brb->begin == brb->end;
++}
++
++static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
++{
++ unsigned r = old + 1;
++ return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
++}
++
++static int brb_push(struct bop_ring_buffer *brb,
++ enum block_op_type type, dm_block_t b)
++{
++ struct block_op *bop;
++ unsigned next = brb_next(brb, brb->end);
++
++ /*
++ * We don't allow the last bop to be filled, this way we can
++ * differentiate between full and empty.
++ */
++ if (next == brb->begin)
++ return -ENOMEM;
++
++ bop = brb->bops + brb->end;
++ bop->type = type;
++ bop->block = b;
++
++ brb->end = next;
++
++ return 0;
++}
++
++static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
++{
++ struct block_op *bop;
++
++ if (brb_empty(brb))
++ return -ENODATA;
++
++ bop = brb->bops + brb->begin;
++ result->type = bop->type;
++ result->block = bop->block;
++
++ brb->begin = brb_next(brb, brb->begin);
++
++ return 0;
++}
++
++/*----------------------------------------------------------------*/
++
+ struct sm_metadata {
+ struct dm_space_map sm;
+
+@@ -101,25 +164,20 @@ struct sm_metadata {
+
+ unsigned recursion_count;
+ unsigned allocated_this_transaction;
+- unsigned nr_uncommitted;
+- struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
++ struct bop_ring_buffer uncommitted;
+
+ struct threshold threshold;
+ };
+
+ static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
+ {
+- struct block_op *op;
++ int r = brb_push(&smm->uncommitted, type, b);
+
+- if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
++ if (r) {
+ DMERR("too many recursive allocations");
+ return -ENOMEM;
+ }
+
+- op = smm->uncommitted + smm->nr_uncommitted++;
+- op->type = type;
+- op->block = b;
+-
+ return 0;
+ }
+
+@@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
+ return -ENOMEM;
+ }
+
+- if (smm->recursion_count == 1 && smm->nr_uncommitted) {
+- while (smm->nr_uncommitted && !r) {
+- smm->nr_uncommitted--;
+- r = commit_bop(smm, smm->uncommitted +
+- smm->nr_uncommitted);
++ if (smm->recursion_count == 1) {
++ while (!brb_empty(&smm->uncommitted)) {
++ struct block_op bop;
++
++ r = brb_pop(&smm->uncommitted, &bop);
++ if (r) {
++ DMERR("bug in bop ring buffer");
++ break;
++ }
++
++ r = commit_bop(smm, &bop);
+ if (r)
+ break;
+ }
+@@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+ {
+- int r, i;
++ int r;
++ unsigned i;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ unsigned adjustment = 0;
+
+@@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ * We may have some uncommitted adjustments to add. This list
+ * should always be really short.
+ */
+- for (i = 0; i < smm->nr_uncommitted; i++) {
+- struct block_op *op = smm->uncommitted + i;
++ for (i = smm->uncommitted.begin;
++ i != smm->uncommitted.end;
++ i = brb_next(&smm->uncommitted, i)) {
++ struct block_op *op = smm->uncommitted.bops + i;
+
+ if (op->block != b)
+ continue;
+@@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
+ dm_block_t b, int *result)
+ {
+- int r, i, adjustment = 0;
++ int r, adjustment = 0;
++ unsigned i;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ uint32_t rc;
+
+@@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
+ * We may have some uncommitted adjustments to add. This list
+ * should always be really short.
+ */
+- for (i = 0; i < smm->nr_uncommitted; i++) {
+- struct block_op *op = smm->uncommitted + i;
++ for (i = smm->uncommitted.begin;
++ i != smm->uncommitted.end;
++ i = brb_next(&smm->uncommitted, i)) {
++
++ struct block_op *op = smm->uncommitted.bops + i;
+
+ if (op->block != b)
+ continue;
+@@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ smm->begin = superblock + 1;
+ smm->recursion_count = 0;
+ smm->allocated_this_transaction = 0;
+- smm->nr_uncommitted = 0;
++ brb_init(&smm->uncommitted);
+ threshold_init(&smm->threshold);
+
+ memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+@@ -713,7 +784,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
+ smm->begin = 0;
+ smm->recursion_count = 0;
+ smm->allocated_this_transaction = 0;
+- smm->nr_uncommitted = 0;
++ brb_init(&smm->uncommitted);
+ threshold_init(&smm->threshold);
+
+ memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index c1c3b132fed5..e381142d636f 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -144,6 +144,8 @@
+
+ #define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
+
++#define FLEXCAN_TIMEOUT_US (50)
++
+ /*
+ * FLEXCAN hardware feature flags
+ *
+@@ -259,6 +261,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
+ }
+ #endif
+
++static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
++{
++ if (!priv->reg_xceiver)
++ return 0;
++
++ return regulator_enable(priv->reg_xceiver);
++}
++
++static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
++{
++ if (!priv->reg_xceiver)
++ return 0;
++
++ return regulator_disable(priv->reg_xceiver);
++}
++
+ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
+ u32 reg_esr)
+ {
+@@ -266,26 +284,42 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
+ (reg_esr & FLEXCAN_ESR_ERR_BUS);
+ }
+
+-static inline void flexcan_chip_enable(struct flexcan_priv *priv)
++static int flexcan_chip_enable(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->base;
++ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+ reg = flexcan_read(&regs->mcr);
+ reg &= ~FLEXCAN_MCR_MDIS;
+ flexcan_write(reg, &regs->mcr);
+
+- udelay(10);
++ while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++ usleep_range(10, 20);
++
++ if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
++ return -ETIMEDOUT;
++
++ return 0;
+ }
+
+-static inline void flexcan_chip_disable(struct flexcan_priv *priv)
++static int flexcan_chip_disable(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->base;
++ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+ reg = flexcan_read(&regs->mcr);
+ reg |= FLEXCAN_MCR_MDIS;
+ flexcan_write(reg, &regs->mcr);
++
++ while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++ usleep_range(10, 20);
++
++ if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++ return -ETIMEDOUT;
++
++ return 0;
+ }
+
+ static int flexcan_get_berr_counter(const struct net_device *dev,
+@@ -706,7 +740,9 @@ static int flexcan_chip_start(struct net_device *dev)
+ u32 reg_mcr, reg_ctrl;
+
+ /* enable module */
+- flexcan_chip_enable(priv);
++ err = flexcan_chip_enable(priv);
++ if (err)
++ return err;
+
+ /* soft reset */
+ flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+@@ -785,11 +821,9 @@ static int flexcan_chip_start(struct net_device *dev)
+ if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
+ flexcan_write(0x0, &regs->rxfgmask);
+
+- if (priv->reg_xceiver) {
+- err = regulator_enable(priv->reg_xceiver);
+- if (err)
+- goto out;
+- }
++ err = flexcan_transceiver_enable(priv);
++ if (err)
++ goto out;
+
+ /* synchronize with the can bus */
+ reg_mcr = flexcan_read(&regs->mcr);
+@@ -824,16 +858,17 @@ static void flexcan_chip_stop(struct net_device *dev)
+ struct flexcan_regs __iomem *regs = priv->base;
+ u32 reg;
+
+- /* Disable all interrupts */
+- flexcan_write(0, &regs->imask1);
+-
+ /* Disable + halt module */
+ reg = flexcan_read(&regs->mcr);
+ reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
+ flexcan_write(reg, &regs->mcr);
+
+- if (priv->reg_xceiver)
+- regulator_disable(priv->reg_xceiver);
++ /* Disable all interrupts */
++ flexcan_write(0, &regs->imask1);
++ flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
++ &regs->ctrl);
++
++ flexcan_transceiver_disable(priv);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ return;
+@@ -863,7 +898,7 @@ static int flexcan_open(struct net_device *dev)
+ /* start chip and queuing */
+ err = flexcan_chip_start(dev);
+ if (err)
+- goto out_close;
++ goto out_free_irq;
+
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
+@@ -872,6 +907,8 @@ static int flexcan_open(struct net_device *dev)
+
+ return 0;
+
++ out_free_irq:
++ free_irq(dev->irq, dev);
+ out_close:
+ close_candev(dev);
+ out_disable_per:
+@@ -942,12 +979,16 @@ static int register_flexcandev(struct net_device *dev)
+ goto out_disable_ipg;
+
+ /* select "bus clock", chip must be disabled */
+- flexcan_chip_disable(priv);
++ err = flexcan_chip_disable(priv);
++ if (err)
++ goto out_disable_per;
+ reg = flexcan_read(&regs->ctrl);
+ reg |= FLEXCAN_CTRL_CLK_SRC;
+ flexcan_write(reg, &regs->ctrl);
+
+- flexcan_chip_enable(priv);
++ err = flexcan_chip_enable(priv);
++ if (err)
++ goto out_chip_disable;
+
+ /* set freeze, halt and activate FIFO, restrict register access */
+ reg = flexcan_read(&regs->mcr);
+@@ -964,14 +1005,15 @@ static int register_flexcandev(struct net_device *dev)
+ if (!(reg & FLEXCAN_MCR_FEN)) {
+ netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
+ err = -ENODEV;
+- goto out_disable_per;
++ goto out_chip_disable;
+ }
+
+ err = register_candev(dev);
+
+- out_disable_per:
+ /* disable core and turn off clocks */
++ out_chip_disable:
+ flexcan_chip_disable(priv);
++ out_disable_per:
+ clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
+ clk_disable_unprepare(priv->clk_ipg);
+@@ -1101,9 +1143,10 @@ static int flexcan_probe(struct platform_device *pdev)
+ static int flexcan_remove(struct platform_device *pdev)
+ {
+ struct net_device *dev = platform_get_drvdata(pdev);
++ struct flexcan_priv *priv = netdev_priv(dev);
+
+ unregister_flexcandev(dev);
+-
++ netif_napi_del(&priv->napi);
+ free_candev(dev);
+
+ return 0;
+@@ -1114,8 +1157,11 @@ static int flexcan_suspend(struct device *device)
+ {
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
++ int err;
+
+- flexcan_chip_disable(priv);
++ err = flexcan_chip_disable(priv);
++ if (err)
++ return err;
+
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+@@ -1136,9 +1182,7 @@ static int flexcan_resume(struct device *device)
+ netif_device_attach(dev);
+ netif_start_queue(dev);
+ }
+- flexcan_chip_enable(priv);
+-
+- return 0;
++ return flexcan_chip_enable(priv);
+ }
+ #endif /* CONFIG_PM_SLEEP */
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c0acf98d1ea5..14a50a11d72e 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6813,8 +6813,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+
+ work_mask |= opaque_key;
+
+- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
++ if (desc->err_vlan & RXD_ERR_MASK) {
+ drop_it:
+ tg3_recycle_rx(tnapi, tpr, opaque_key,
+ desc_idx, *post_ptr);
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 70257808aa37..ac50e7c9c2b8 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -2598,7 +2598,11 @@ struct tg3_rx_buffer_desc {
+ #define RXD_ERR_TOO_SMALL 0x00400000
+ #define RXD_ERR_NO_RESOURCES 0x00800000
+ #define RXD_ERR_HUGE_FRAME 0x01000000
+-#define RXD_ERR_MASK 0xffff0000
++
++#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
++ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
++ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
++ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
+
+ u32 reserved;
+ u32 opaque;
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
+index 3dd39dcfe36b..a12410381cb1 100644
+--- a/drivers/net/ethernet/sfc/ptp.c
++++ b/drivers/net/ethernet/sfc/ptp.c
+@@ -1360,6 +1360,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+
++ if (!ptp) {
++ if (net_ratelimit())
++ netif_warn(efx, drv, efx->net_dev,
++ "Received PTP event but PTP not set up\n");
++ return;
++ }
++
+ if (!ptp->enabled)
+ return;
+
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7c8343a4f918..10636cbd3807 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1650,7 +1650,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ dev->features = dev->hw_features;
+- dev->vlan_features = dev->features;
++ dev->vlan_features = dev->features &
++ ~(NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_STAG_TX);
+
+ INIT_LIST_HEAD(&tun->disabled);
+ err = tun_attach(tun, file, false);
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 5e2bac650bd8..3ecb2133dee6 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1031,20 +1031,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->mii.phy_id = 0x03;
+ dev->mii.supports_gmii = 1;
+
+- if (usb_device_no_sg_constraint(dev->udev))
+- dev->can_dma_sg = 1;
+-
+ dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+
+ dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+
+- if (dev->can_dma_sg) {
+- dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
+- dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
+- }
+-
+ /* Enable checksum offload */
+ *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+ AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index eee1f19ef1e9..61c4044f644e 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -269,7 +269,8 @@ static void veth_setup(struct net_device *dev)
+ dev->ethtool_ops = &veth_ethtool_ops;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= VETH_FEATURES;
+- dev->vlan_features = dev->features;
++ dev->vlan_features = dev->features &
++ ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
+ dev->destructor = veth_dev_free;
+
+ dev->hw_features = VETH_FEATURES;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 8065066a6230..0232156dade3 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1621,7 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ /* If we can receive ANY GSO packets, we must allocate large ones. */
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
+ vi->big_packets = true;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 7e2788c488ed..55d89390b4bc 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1760,11 +1760,20 @@ vmxnet3_netpoll(struct net_device *netdev)
+ {
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+- if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+- vmxnet3_disable_all_intrs(adapter);
+-
+- vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
+- vmxnet3_enable_all_intrs(adapter);
++ switch (adapter->intr.type) {
++#ifdef CONFIG_PCI_MSI
++ case VMXNET3_IT_MSIX: {
++ int i;
++ for (i = 0; i < adapter->num_rx_queues; i++)
++ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
++ break;
++ }
++#endif
++ case VMXNET3_IT_MSI:
++ default:
++ vmxnet3_intr(0, adapter->netdev);
++ break;
++ }
+
+ }
+ #endif /* CONFIG_NET_POLL_CONTROLLER */
+diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+index 092b9d412e7f..1078fbd7bda2 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+@@ -56,7 +56,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
++ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
+@@ -95,7 +95,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
++ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+ };
+
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index a1ab4ff46818..c2fa0e3490c7 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -730,11 +730,18 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ return NULL;
+
+ /*
+- * mark descriptor as zero-length and set the 'more'
+- * flag to ensure that both buffers get discarded
++ * Re-check previous descriptor, in case it has been filled
++ * in the mean time.
+ */
+- rs->rs_datalen = 0;
+- rs->rs_more = true;
++ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
++ if (ret == -EINPROGRESS) {
++ /*
++ * mark descriptor as zero-length and set the 'more'
++ * flag to ensure that both buffers get discarded
++ */
++ rs->rs_datalen = 0;
++ rs->rs_more = true;
++ }
+ }
+
+ list_del(&bf->list);
+@@ -1093,32 +1100,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hdr *hdr;
+ bool discard_current = sc->rx.discard_next;
+- int ret = 0;
+
+ /*
+ * Discard corrupt descriptors which are marked in
+ * ath_get_next_rx_buf().
+ */
+- sc->rx.discard_next = rx_stats->rs_more;
+ if (discard_current)
+- return -EINVAL;
++ goto corrupt;
++
++ sc->rx.discard_next = false;
+
+ /*
+ * Discard zero-length packets.
+ */
+ if (!rx_stats->rs_datalen) {
+ RX_STAT_INC(rx_len_err);
+- return -EINVAL;
++ goto corrupt;
+ }
+
+- /*
+- * rs_status follows rs_datalen so if rs_datalen is too large
+- * we can take a hint that hardware corrupted it, so ignore
+- * those frames.
+- */
++ /*
++ * rs_status follows rs_datalen so if rs_datalen is too large
++ * we can take a hint that hardware corrupted it, so ignore
++ * those frames.
++ */
+ if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
+ RX_STAT_INC(rx_len_err);
+- return -EINVAL;
++ goto corrupt;
+ }
+
+ /* Only use status info from the last fragment */
+@@ -1132,10 +1139,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ * This is different from the other corrupt descriptor
+ * condition handled above.
+ */
+- if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
+- ret = -EINVAL;
+- goto exit;
+- }
++ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
++ goto corrupt;
+
+ hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
+
+@@ -1151,18 +1156,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
+ RX_STAT_INC(rx_spectral);
+
+- ret = -EINVAL;
+- goto exit;
++ return -EINVAL;
+ }
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
+- ret = -EINVAL;
+- goto exit;
+- }
++ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
++ return -EINVAL;
+
+ rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
+ if (rx_stats->is_mybeacon) {
+@@ -1173,15 +1175,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ /*
+ * This shouldn't happen, but have a safety check anyway.
+ */
+- if (WARN_ON(!ah->curchan)) {
+- ret = -EINVAL;
+- goto exit;
+- }
++ if (WARN_ON(!ah->curchan))
++ return -EINVAL;
+
+- if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
+- ret =-EINVAL;
+- goto exit;
+- }
++ if (ath9k_process_rate(common, hw, rx_stats, rx_status))
++ return -EINVAL;
+
+ ath9k_process_rssi(common, hw, rx_stats, rx_status);
+
+@@ -1196,9 +1194,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ sc->rx.num_pkts++;
+ #endif
+
+-exit:
+- sc->rx.discard_next = false;
+- return ret;
++ return 0;
++
++corrupt:
++ sc->rx.discard_next = rx_stats->rs_more;
++ return -EINVAL;
+ }
+
+ static void ath9k_rx_skb_postprocess(struct ath_common *common,
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 7fe6b5923a9c..ba39178a94ab 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1457,14 +1457,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+
+- if (!tid->sched)
+- continue;
+-
+ ac = tid->ac;
+ txq = ac->txq;
+
+ ath_txq_lock(sc, txq);
+
++ if (!tid->sched) {
++ ath_txq_unlock(sc, txq);
++ continue;
++ }
++
+ buffered = ath_tid_has_buffered(tid);
+
+ tid->sched = false;
+@@ -2199,14 +2201,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+ txq->stopped = true;
+ }
+
++ if (txctl->an)
++ tid = ath_get_skb_tid(sc, txctl->an, skb);
++
+ if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
+ ath_txq_unlock(sc, txq);
+ txq = sc->tx.uapsdq;
+ ath_txq_lock(sc, txq);
+ } else if (txctl->an &&
+ ieee80211_is_data_present(hdr->frame_control)) {
+- tid = ath_get_skb_tid(sc, txctl->an, skb);
+-
+ WARN_ON(tid->ac->txq != txctl->txq);
+
+ if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+index cae4d3182e33..d6e6405a9b07 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+@@ -704,6 +704,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ return ret;
+ }
+
++static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
++{
++ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
++ return false;
++ return true;
++}
++
++static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
++{
++ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
++ return false;
++ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
++ return true;
++
++ /* disabled by default */
++ return false;
++}
++
+ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+@@ -725,7 +743,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
++ if (!iwl_enable_rx_ampdu(priv->cfg))
+ break;
+ IWL_DEBUG_HT(priv, "start Rx\n");
+ ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+@@ -737,7 +755,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ case IEEE80211_AMPDU_TX_START:
+ if (!priv->trans->ops->txq_enable)
+ break;
+- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
++ if (!iwl_enable_tx_ampdu(priv->cfg))
+ break;
+ IWL_DEBUG_HT(priv, "start Tx\n");
+ ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
+index c3c13ce96eb0..e800002d6158 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
++++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
+@@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+ sizeof(priv->tid_data[sta_id][tid]));
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
++ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+
+ priv->num_stations--;
+
+diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
+index 1fef5240e6ad..e219e761f48b 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
+@@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
+ struct iwl_ht_agg *agg;
+ struct sk_buff_head reclaimed_skbs;
+- struct ieee80211_tx_info *info;
+- struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ int sta_id;
+ int tid;
+@@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ freed = 0;
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+- hdr = (struct ieee80211_hdr *)skb->data;
++ struct ieee80211_hdr *hdr = (void *)skb->data;
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(1);
+
+- info = IEEE80211_SKB_CB(skb);
+ iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
+
++ memset(&info->status, 0, sizeof(info->status));
++ /* Packet was transmitted successfully, failures come as single
++ * frames because before failing a frame the firmware transmits
++ * it without aggregation at least once.
++ */
++ info->flags |= IEEE80211_TX_STAT_ACK;
++
+ if (freed == 1) {
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+ info = IEEE80211_SKB_CB(skb);
+ memset(&info->status, 0, sizeof(info->status));
+- info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = ba_resp->txed_2_done;
+ info->status.ampdu_len = ba_resp->txed;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
+index 99e1da3123c9..2cdbd940575e 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
+@@ -1210,7 +1210,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
+ MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+ module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
+ MODULE_PARM_DESC(11n_disable,
+- "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
++ "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
+ module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+ MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
+diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
+index a1f580c0c6c6..4c6cff4218cb 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
++++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
+@@ -79,9 +79,12 @@ enum iwl_power_level {
+ IWL_POWER_NUM
+ };
+
+-#define IWL_DISABLE_HT_ALL BIT(0)
+-#define IWL_DISABLE_HT_TXAGG BIT(1)
+-#define IWL_DISABLE_HT_RXAGG BIT(2)
++enum iwl_disable_11n {
++ IWL_DISABLE_HT_ALL = BIT(0),
++ IWL_DISABLE_HT_TXAGG = BIT(1),
++ IWL_DISABLE_HT_RXAGG = BIT(2),
++ IWL_ENABLE_HT_TXAGG = BIT(3),
++};
+
+ /**
+ * struct iwl_mod_params
+@@ -90,7 +93,7 @@ enum iwl_power_level {
+ *
+ * @sw_crypto: using hardware encryption, default = 0
+ * @disable_11n: disable 11n capabilities, default = 0,
+- * use IWL_DISABLE_HT_* constants
++ * use IWL_[DIS,EN]ABLE_HT_* constants
+ * @amsdu_size_8K: enable 8K amsdu size, default = 0
+ * @restart_fw: restart firmware, default = 1
+ * @wd_disable: enable stuck queue check, default = 0
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index 9833cdf6177c..5f6fd44e72f1 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -297,6 +297,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+ ieee80211_free_txskb(hw, skb);
+ }
+
++static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
++{
++ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
++ return false;
++ return true;
++}
++
++static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
++{
++ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
++ return false;
++ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
++ return true;
++
++ /* enabled by default */
++ return true;
++}
++
+ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+@@ -316,7 +334,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
++ if (!iwl_enable_rx_ampdu(mvm->cfg)) {
+ ret = -EINVAL;
+ break;
+ }
+@@ -326,7 +344,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
++ if (!iwl_enable_tx_ampdu(mvm->cfg)) {
+ ret = -EINVAL;
+ break;
+ }
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+index b0389279cc1e..c86663ebb493 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+@@ -152,7 +152,7 @@ enum iwl_power_scheme {
+ IWL_POWER_SCHEME_LP
+ };
+
+-#define IWL_CONN_MAX_LISTEN_INTERVAL 70
++#define IWL_CONN_MAX_LISTEN_INTERVAL 10
+ #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index e05440d90319..f41add9c8093 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -819,16 +819,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
+ struct sk_buff_head reclaimed_skbs;
+ struct iwl_mvm_tid_data *tid_data;
+- struct ieee80211_tx_info *info;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+- struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ int sta_id, tid, freed;
+-
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
+-
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
+@@ -885,22 +881,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ freed = 0;
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+- hdr = (struct ieee80211_hdr *)skb->data;
++ struct ieee80211_hdr *hdr = (void *)skb->data;
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(1);
+
+- info = IEEE80211_SKB_CB(skb);
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
++ memset(&info->status, 0, sizeof(info->status));
++ /* Packet was transmitted successfully, failures come as single
++ * frames because before failing a frame the firmware transmits
++ * it without aggregation at least once.
++ */
++ info->flags |= IEEE80211_TX_STAT_ACK;
++
+ if (freed == 1) {
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+- info = IEEE80211_SKB_CB(skb);
+- memset(&info->status, 0, sizeof(info->status));
+- info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = ba_notif->txed_2_done;
+ info->status.ampdu_len = ba_notif->txed;
+diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
+index 5e0eec4d71c7..5d9a8084665d 100644
+--- a/drivers/net/wireless/mwifiex/11ac.c
++++ b/drivers/net/wireless/mwifiex/11ac.c
+@@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
+ vht_cap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_vht_cap));
+ memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
+- (u8 *)bss_desc->bcn_vht_cap +
+- sizeof(struct ieee_types_header),
++ (u8 *)bss_desc->bcn_vht_cap,
+ le16_to_cpu(vht_cap->header.len));
+
+ mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
+diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
+index 0b803c05cab3..983c10c49658 100644
+--- a/drivers/net/wireless/mwifiex/11n.c
++++ b/drivers/net/wireless/mwifiex/11n.c
+@@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
+ ht_cap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
+- (u8 *) bss_desc->bcn_ht_cap +
+- sizeof(struct ieee_types_header),
++ (u8 *)bss_desc->bcn_ht_cap,
+ le16_to_cpu(ht_cap->header.len));
+
+ mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index 52da8ee7599a..cb84edcd794b 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -1212,6 +1212,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
+ rd_index = card->rxbd_rdptr & reg->rx_mask;
+ skb_data = card->rx_buf_list[rd_index];
+
++ /* If skb allocation was failed earlier for Rx packet,
++ * rx_buf_list[rd_index] would have been left with a NULL.
++ */
++ if (!skb_data)
++ return -ENOMEM;
++
+ MWIFIEX_SKB_PACB(skb_data, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+@@ -1526,6 +1532,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
+ if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
+ mwifiex_process_sleep_confirm_resp(adapter, skb->data,
+ skb->len);
++ mwifiex_pcie_enable_host_int(adapter);
++ if (mwifiex_write_reg(adapter,
++ PCIE_CPU_INT_EVENT,
++ CPU_INTR_SLEEP_CFM_DONE)) {
++ dev_warn(adapter->dev,
++ "Write register failed\n");
++ return -1;
++ }
+ while (reg->sleep_cookie && (count++ < 10) &&
+ mwifiex_pcie_ok_to_access_hw(adapter))
+ usleep_range(50, 60);
+@@ -1994,23 +2008,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+ adapter->int_status |= pcie_ireg;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+
+- if (pcie_ireg & HOST_INTR_CMD_DONE) {
+- if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
+- (adapter->ps_state == PS_STATE_SLEEP)) {
+- mwifiex_pcie_enable_host_int(adapter);
+- if (mwifiex_write_reg(adapter,
+- PCIE_CPU_INT_EVENT,
+- CPU_INTR_SLEEP_CFM_DONE)
+- ) {
+- dev_warn(adapter->dev,
+- "Write register failed\n");
+- return;
+-
+- }
+- }
+- } else if (!adapter->pps_uapsd_mode &&
+- adapter->ps_state == PS_STATE_SLEEP &&
+- mwifiex_pcie_ok_to_access_hw(adapter)) {
++ if (!adapter->pps_uapsd_mode &&
++ adapter->ps_state == PS_STATE_SLEEP &&
++ mwifiex_pcie_ok_to_access_hw(adapter)) {
+ /* Potentially for PCIe we could get other
+ * interrupts like shared. Don't change power
+ * state until cookie is set */
+diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
+index ba48e64673d9..a17d4675ddc0 100644
+--- a/drivers/net/wireless/mwifiex/scan.c
++++ b/drivers/net/wireless/mwifiex/scan.c
+@@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
+ curr_bss->ht_info_offset);
+
+ if (curr_bss->bcn_vht_cap)
+- curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
+- curr_bss->vht_cap_offset);
++ curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
++ curr_bss->vht_cap_offset);
+
+ if (curr_bss->bcn_vht_oper)
+- curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
+- curr_bss->vht_info_offset);
++ curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
++ curr_bss->vht_info_offset);
+
+ if (curr_bss->bcn_bss_co_2040)
+ curr_bss->bcn_bss_co_2040 =
+diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
+index 1c70b8d09227..9d0b0c442c95 100644
+--- a/drivers/net/wireless/mwifiex/usb.c
++++ b/drivers/net/wireless/mwifiex/usb.c
+@@ -512,13 +512,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
+ MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_ASYNC_CMD);
+
+-#ifdef CONFIG_PM
+- /* Resume handler may be called due to remote wakeup,
+- * force to exit suspend anyway
+- */
+- usb_disable_autosuspend(card->udev);
+-#endif /* CONFIG_PM */
+-
+ return 0;
+ }
+
+@@ -555,7 +548,6 @@ static struct usb_driver mwifiex_usb_driver = {
+ .id_table = mwifiex_usb_table,
+ .suspend = mwifiex_usb_suspend,
+ .resume = mwifiex_usb_resume,
+- .supports_autosuspend = 1,
+ };
+
+ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
+diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
+index 95fa3599b407..35f881585962 100644
+--- a/drivers/net/wireless/mwifiex/wmm.c
++++ b/drivers/net/wireless/mwifiex/wmm.c
+@@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
+ mwifiex_wmm_delete_all_ralist(priv);
+ memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
+
+- if (priv->adapter->if_ops.clean_pcie_ring)
++ if (priv->adapter->if_ops.clean_pcie_ring &&
++ !priv->adapter->surprise_removed)
+ priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 863bc4bb4806..9fc3f1f4557b 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1131,6 +1131,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ return err;
+ pci_fixup_device(pci_fixup_enable, dev);
+
++ if (dev->msi_enabled || dev->msix_enabled)
++ return 0;
++
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (pin) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+@@ -1166,10 +1169,8 @@ static void pci_enable_bridge(struct pci_dev *dev)
+ pci_enable_bridge(dev->bus->self);
+
+ if (pci_is_enabled(dev)) {
+- if (!dev->is_busmaster) {
+- dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
++ if (!dev->is_busmaster)
+ pci_set_master(dev);
+- }
+ return;
+ }
+
+diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
+index 6ebf3067bde4..b2dcde123e56 100644
+--- a/drivers/pinctrl/pinctrl-sunxi.c
++++ b/drivers/pinctrl/pinctrl-sunxi.c
+@@ -14,6 +14,7 @@
+ #include <linux/clk.h>
+ #include <linux/gpio.h>
+ #include <linux/irqdomain.h>
++#include <linux/irqchip/chained_irq.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+@@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = {
+
+ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
+ {
++ struct irq_chip *chip = irq_get_chip(irq);
+ struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
+ const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
+
+@@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
+ if (reg) {
+ int irqoffset;
+
++ chained_irq_enter(chip, desc);
+ for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
+ int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
+ generic_handle_irq(pin_irq);
+ }
++ chained_irq_exit(chip, desc);
+ }
+ }
+
+diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
+index 167f3d00c916..66977ebf13b3 100644
+--- a/drivers/pnp/pnpacpi/rsparser.c
++++ b/drivers/pnp/pnpacpi/rsparser.c
+@@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
+ struct resource r = {0};
+ int i, flags;
+
+- if (acpi_dev_resource_memory(res, &r)
+- || acpi_dev_resource_io(res, &r)
+- || acpi_dev_resource_address_space(res, &r)
++ if (acpi_dev_resource_address_space(res, &r)
+ || acpi_dev_resource_ext_address_space(res, &r)) {
+ pnp_add_resource(dev, &r);
+ return AE_OK;
+@@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
+ }
+
+ switch (res->type) {
++ case ACPI_RESOURCE_TYPE_MEMORY24:
++ case ACPI_RESOURCE_TYPE_MEMORY32:
++ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
++ if (acpi_dev_resource_memory(res, &r))
++ pnp_add_resource(dev, &r);
++ break;
++ case ACPI_RESOURCE_TYPE_IO:
++ case ACPI_RESOURCE_TYPE_FIXED_IO:
++ if (acpi_dev_resource_io(res, &r))
++ pnp_add_resource(dev, &r);
++ break;
+ case ACPI_RESOURCE_TYPE_DMA:
+ dma = &res->data.dma;
+ if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
+diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
+index b4b0d83f9ef6..7061ac0ad428 100644
+--- a/drivers/rapidio/devices/tsi721.h
++++ b/drivers/rapidio/devices/tsi721.h
+@@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
+ struct list_head free_list;
+ dma_cookie_t completed_cookie;
+ struct tasklet_struct tasklet;
++ bool active;
+ };
+
+ #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
+index 502663f5f7c6..91245f5dbe81 100644
+--- a/drivers/rapidio/devices/tsi721_dma.c
++++ b/drivers/rapidio/devices/tsi721_dma.c
+@@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+ {
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+-
+- tasklet_schedule(&bdma_chan->tasklet);
++ if (bdma_chan->active)
++ tasklet_schedule(&bdma_chan->tasklet);
+ }
+
+ #ifdef CONFIG_PCI_MSI
+@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+ }
+ #endif /* CONFIG_PCI_MSI */
+
+- tasklet_enable(&bdma_chan->tasklet);
++ bdma_chan->active = true;
+ tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+ return bdma_chan->bd_num - 1;
+@@ -576,9 +576,7 @@ err_out:
+ static void tsi721_free_chan_resources(struct dma_chan *dchan)
+ {
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+-#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+-#endif
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
+ BUG_ON(!list_empty(&bdma_chan->active_list));
+ BUG_ON(!list_empty(&bdma_chan->queue));
+
+- tasklet_disable(&bdma_chan->tasklet);
++ tsi721_bdma_interrupt_enable(bdma_chan, 0);
++ bdma_chan->active = false;
++
++#ifdef CONFIG_PCI_MSI
++ if (priv->flags & TSI721_USING_MSIX) {
++ synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
++ bdma_chan->id].vector);
++ synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
++ bdma_chan->id].vector);
++ } else
++#endif
++ synchronize_irq(priv->pdev->irq);
++
++ tasklet_kill(&bdma_chan->tasklet);
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&bdma_chan->free_list, &list);
+ spin_unlock_bh(&bdma_chan->lock);
+
+- tsi721_bdma_interrupt_enable(bdma_chan, 0);
+-
+ #ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
+ bdma_chan->dchan.cookie = 1;
+ bdma_chan->dchan.chan_id = i;
+ bdma_chan->id = i;
++ bdma_chan->active = false;
+
+ spin_lock_init(&bdma_chan->lock);
+
+@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
+
+ tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+ (unsigned long)bdma_chan);
+- tasklet_disable(&bdma_chan->tasklet);
+ list_add_tail(&bdma_chan->dchan.device_node,
+ &mport->dma.channels);
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index a01b8b3b70ca..d97fbf4eb65b 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -923,6 +923,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
+ return 0;
+ }
+
++static int _regulator_do_enable(struct regulator_dev *rdev);
++
+ /**
+ * set_machine_constraints - sets regulator constraints
+ * @rdev: regulator source
+@@ -979,10 +981,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
+ /* If the constraints say the regulator should be on at this point
+ * and we have control then make sure it is enabled.
+ */
+- if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
+- ops->enable) {
+- ret = ops->enable(rdev);
+- if (ret < 0) {
++ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
++ ret = _regulator_do_enable(rdev);
++ if (ret < 0 && ret != -EINVAL) {
+ rdev_err(rdev, "failed to enable\n");
+ goto out;
+ }
+@@ -3571,9 +3572,8 @@ int regulator_suspend_finish(void)
+ struct regulator_ops *ops = rdev->desc->ops;
+
+ mutex_lock(&rdev->mutex);
+- if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
+- ops->enable) {
+- error = ops->enable(rdev);
++ if (rdev->use_count > 0 || rdev->constraints->always_on) {
++ error = _regulator_do_enable(rdev);
+ if (error)
+ ret = error;
+ } else {
+diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
+index f93cc32eb818..71e974738014 100644
+--- a/drivers/s390/char/fs3270.c
++++ b/drivers/s390/char/fs3270.c
+@@ -564,6 +564,7 @@ static void __exit
+ fs3270_exit(void)
+ {
+ raw3270_unregister_notifier(&fs3270_notifier);
++ device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
+ __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
+ }
+
+diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
+index 4911310a38f5..22a9bb1abae1 100644
+--- a/drivers/scsi/isci/host.h
++++ b/drivers/scsi/isci/host.h
+@@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
+ }
+
+ #define for_each_isci_host(id, ihost, pdev) \
+- for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+- id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+- ihost = to_pci_info(pdev)->hosts[++id])
++ for (id = 0; id < SCI_MAX_CONTROLLERS && \
++ (ihost = to_pci_info(pdev)->hosts[id]); id++)
+
+ static inline void wait_for_start(struct isci_host *ihost)
+ {
+diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
+index 85c77f6b802b..ac879745ef80 100644
+--- a/drivers/scsi/isci/port_config.c
++++ b/drivers/scsi/isci/port_config.c
+@@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ } else {
+ /* the phy is already the part of the port */
+- u32 port_state = iport->sm.current_state_id;
+-
+- /* if the PORT'S state is resetting then the link up is from
+- * port hard reset in this case, we need to tell the port
+- * that link up is recieved
+- */
+- BUG_ON(port_state != SCI_PORT_RESETTING);
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_port_link_up(iport, iphy);
+ }
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+index 0d30ca849e8f..5d6fda72d659 100644
+--- a/drivers/scsi/isci/task.c
++++ b/drivers/scsi/isci/task.c
+@@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
+ /* XXX: need to cleanup any ireqs targeting this
+ * domain_device
+ */
+- ret = TMF_RESP_FUNC_COMPLETE;
++ ret = -ENODEV;
+ goto out;
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 93db74ef3461..43acfce3a435 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -2993,8 +2993,7 @@ struct qla_hw_data {
+ IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA8044(ha))
+ #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+- IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
++#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
+ #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+ #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+ #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index ff9c86b1a0d8..e32fccd6580c 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2829,6 +2829,7 @@ static int
+ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ {
+ #define MIN_MSIX_COUNT 2
++#define ATIO_VECTOR 2
+ int i, ret;
+ struct msix_entry *entries;
+ struct qla_msix_entry *qentry;
+@@ -2885,34 +2886,47 @@ msix_failed:
+ }
+
+ /* Enable MSI-X vectors for the base queue */
+- for (i = 0; i < ha->msix_count; i++) {
++ for (i = 0; i < 2; i++) {
+ qentry = &ha->msix_entries[i];
+- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+- ret = request_irq(qentry->vector,
+- qla83xx_msix_entries[i].handler,
+- 0, qla83xx_msix_entries[i].name, rsp);
+- } else if (IS_P3P_TYPE(ha)) {
++ if (IS_P3P_TYPE(ha))
+ ret = request_irq(qentry->vector,
+ qla82xx_msix_entries[i].handler,
+ 0, qla82xx_msix_entries[i].name, rsp);
+- } else {
++ else
+ ret = request_irq(qentry->vector,
+ msix_entries[i].handler,
+ 0, msix_entries[i].name, rsp);
+- }
+- if (ret) {
+- ql_log(ql_log_fatal, vha, 0x00cb,
+- "MSI-X: unable to register handler -- %x/%d.\n",
+- qentry->vector, ret);
+- qla24xx_disable_msix(ha);
+- ha->mqenable = 0;
+- goto msix_out;
+- }
++ if (ret)
++ goto msix_register_fail;
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
+ }
+
++ /*
++ * If target mode is enable, also request the vector for the ATIO
++ * queue.
++ */
++ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
++ qentry = &ha->msix_entries[ATIO_VECTOR];
++ ret = request_irq(qentry->vector,
++ qla83xx_msix_entries[ATIO_VECTOR].handler,
++ 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
++ qentry->have_irq = 1;
++ qentry->rsp = rsp;
++ rsp->msix = qentry;
++ }
++
++msix_register_fail:
++ if (ret) {
++ ql_log(ql_log_fatal, vha, 0x00cb,
++ "MSI-X: unable to register handler -- %x/%d.\n",
++ qentry->vector, ret);
++ qla24xx_disable_msix(ha);
++ ha->mqenable = 0;
++ goto msix_out;
++ }
++
+ /* Enable MSI-X vector for response queue update for queue 0 */
+ if (IS_QLA83XX(ha)) {
+ if (ha->msixbase && ha->mqiobase &&
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 17d740427240..9969fa1ef7c4 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
+ {
+ struct stor_mem_pools *memp = sdevice->hostdata;
+
++ if (!memp)
++ return;
++
+ mempool_destroy(memp->request_mempool);
+ kmem_cache_destroy(memp->request_pool);
+ kfree(memp);
+diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
+index 37bad952ab38..05dd69212e32 100644
+--- a/drivers/spi/spi-ath79.c
++++ b/drivers/spi/spi-ath79.c
+@@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
+
+ flags = GPIOF_DIR_OUT;
+ if (spi->mode & SPI_CS_HIGH)
+- flags |= GPIOF_INIT_HIGH;
+- else
+ flags |= GPIOF_INIT_LOW;
++ else
++ flags |= GPIOF_INIT_HIGH;
+
+ status = gpio_request_one(cdata->gpio, flags,
+ dev_name(&spi->dev));
+diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
+index cc5b75d10c38..524d112d5369 100644
+--- a/drivers/spi/spi-coldfire-qspi.c
++++ b/drivers/spi/spi-coldfire-qspi.c
+@@ -539,7 +539,8 @@ static int mcfqspi_resume(struct device *dev)
+ #ifdef CONFIG_PM_RUNTIME
+ static int mcfqspi_runtime_suspend(struct device *dev)
+ {
+- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
++ struct spi_master *master = dev_get_drvdata(dev);
++ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ clk_disable(mcfqspi->clk);
+
+@@ -548,7 +549,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
+
+ static int mcfqspi_runtime_resume(struct device *dev)
+ {
+- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
++ struct spi_master *master = dev_get_drvdata(dev);
++ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ clk_enable(mcfqspi->clk);
+
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 4e44575bd87a..f1322343d789 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -421,7 +421,6 @@ static int dspi_suspend(struct device *dev)
+
+ static int dspi_resume(struct device *dev)
+ {
+-
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+@@ -505,7 +504,7 @@ static int dspi_probe(struct platform_device *pdev)
+ clk_prepare_enable(dspi->clk);
+
+ init_waitqueue_head(&dspi->waitq);
+- platform_set_drvdata(pdev, dspi);
++ platform_set_drvdata(pdev, master);
+
+ ret = spi_bitbang_start(&dspi->bitbang);
+ if (ret != 0) {
+@@ -527,7 +526,8 @@ out_master_put:
+
+ static int dspi_remove(struct platform_device *pdev)
+ {
+- struct fsl_dspi *dspi = platform_get_drvdata(pdev);
++ struct spi_master *master = platform_get_drvdata(pdev);
++ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ /* Disconnect from the SPI framework */
+ spi_bitbang_stop(&dspi->bitbang);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 15323d8bd9cf..941069517423 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -892,8 +892,8 @@ static int spi_imx_remove(struct platform_device *pdev)
+ spi_bitbang_stop(&spi_imx->bitbang);
+
+ writel(0, spi_imx->base + MXC_CSPICTRL);
+- clk_disable_unprepare(spi_imx->clk_ipg);
+- clk_disable_unprepare(spi_imx->clk_per);
++ clk_unprepare(spi_imx->clk_ipg);
++ clk_unprepare(spi_imx->clk_per);
+ spi_master_put(master);
+
+ return 0;
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index d7ac040e0dc1..d02088f7dc33 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -621,6 +621,8 @@ static ssize_t disksize_store(struct device *dev,
+
+ disksize = PAGE_ALIGN(disksize);
+ meta = zram_meta_alloc(disksize);
++ if (!meta)
++ return -ENOMEM;
+ down_write(&zram->init_lock);
+ if (zram->init_done) {
+ up_write(&zram->init_lock);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index e12f2aab3c87..b5e574659785 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+ spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ iscsit_free_cmd(cmd, false);
+ }
+ }
+@@ -3704,7 +3704,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
+ break;
+ case ISTATE_REMOVE:
+ spin_lock_bh(&conn->cmd_lock);
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_free_cmd(cmd, false);
+@@ -4149,7 +4149,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
+
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_increment_maxcmdsn(cmd, sess);
+@@ -4194,6 +4194,10 @@ int iscsit_close_connection(
+ iscsit_stop_timers_for_cmds(conn);
+ iscsit_stop_nopin_response_timer(conn);
+ iscsit_stop_nopin_timer(conn);
++
++ if (conn->conn_transport->iscsit_wait_conn)
++ conn->conn_transport->iscsit_wait_conn(conn);
++
+ iscsit_free_queue_reqs_for_conn(conn);
+
+ /*
+diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
+index 33be1fb1df32..4ca8fd2a70db 100644
+--- a/drivers/target/iscsi/iscsi_target_erl2.c
++++ b/drivers/target/iscsi/iscsi_target_erl2.c
+@@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_conn_node) {
+
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ cmd->conn = NULL;
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ iscsit_free_cmd(cmd, true);
+@@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_conn_node) {
+
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ cmd->conn = NULL;
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ iscsit_free_cmd(cmd, true);
+@@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
+ }
+ cr = cmd->cr;
+
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ return --cr->cmd_count;
+ }
+
+@@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+ if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
+ continue;
+
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+
+ spin_unlock_bh(&conn->cmd_lock);
+ iscsit_free_cmd(cmd, true);
+@@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+ /*
+ * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
+ * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
+- * list_del(&cmd->i_conn_node); to release the command to the
++ * list_del_init(&cmd->i_conn_node); to release the command to the
+ * session pool and remove it from the connection's list.
+ *
+ * Also stop the DataOUT timer, which will be restarted after
+@@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+ " CID: %hu\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, cmd->cmd_sn, conn->cid);
+
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+ iscsit_free_cmd(cmd, true);
+ spin_lock_bh(&conn->cmd_lock);
+@@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+ */
+ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
+ iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+ iscsit_free_cmd(cmd, true);
+ spin_lock_bh(&conn->cmd_lock);
+@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+
+ cmd->sess = conn->sess;
+
+- list_del(&cmd->i_conn_node);
++ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_free_all_datain_reqs(cmd);
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index 4faeb47fa5e1..3cf77c0b76b4 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_state_lock);
+- if (tpg->tpg_state == TPG_STATE_FREE) {
++ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+ spin_unlock(&tpg->tpg_state_lock);
+ continue;
+ }
+diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
+index 7722cb9d5a80..72573661a14a 100644
+--- a/drivers/thermal/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/x86_pkg_temp_thermal.c
+@@ -68,6 +68,10 @@ struct phy_dev_entry {
+ struct thermal_zone_device *tzone;
+ };
+
++static const struct thermal_zone_params pkg_temp_tz_params = {
++ .no_hwmon = true,
++};
++
+ /* List maintaining number of package instances */
+ static LIST_HEAD(phy_dev_list);
+ static DEFINE_MUTEX(phy_dev_list_mutex);
+@@ -446,7 +450,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
+ thres_count,
+ (thres_count == MAX_NUMBER_OF_TRIPS) ?
+ 0x03 : 0x01,
+- phy_dev_entry, &tzone_ops, NULL, 0, 0);
++ phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
+ if (IS_ERR(phy_dev_entry->tzone)) {
+ err = PTR_ERR(phy_dev_entry->tzone);
+ goto err_ret_free;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 548d1996590f..652438325197 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -718,6 +718,10 @@ int usb_get_configuration(struct usb_device *dev)
+ result = -ENOMEM;
+ goto err;
+ }
++
++ if (dev->quirks & USB_QUIRK_DELAY_INIT)
++ msleep(100);
++
+ result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
+ bigbuffer, length);
+ if (result < 0) {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 01fe36273f3b..1053eb651b2f 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -46,6 +46,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Microsoft LifeCam-VX700 v2.0 */
+ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Logitech HD Pro Webcams C920 and C930e */
++ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
++ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ /* Logitech Quickcam Fusion */
+ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index 854c2ec7b699..3e86bf4371b3 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -58,8 +58,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
+ {
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+- struct pci_dev *p_smbus;
+- u8 rev;
+ u32 temp;
+ int retval;
+
+@@ -175,22 +173,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
+ /* SB600 and old version of SB700 have a bug in EHCI controller,
+ * which causes usb devices lose response in some cases.
+ */
+- if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
+- p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
+- PCI_DEVICE_ID_ATI_SBX00_SMBUS,
+- NULL);
+- if (!p_smbus)
+- break;
+- rev = p_smbus->revision;
+- if ((pdev->device == 0x4386) || (rev == 0x3a)
+- || (rev == 0x3b)) {
+- u8 tmp;
+- ehci_info(ehci, "applying AMD SB600/SB700 USB "
+- "freeze workaround\n");
+- pci_read_config_byte(pdev, 0x53, &tmp);
+- pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
+- }
+- pci_dev_put(p_smbus);
++ if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
++ usb_amd_hang_symptom_quirk()) {
++ u8 tmp;
++ ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
++ pci_read_config_byte(pdev, 0x53, &tmp);
++ pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
+ }
+ break;
+ case PCI_VENDOR_ID_NETMOS:
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index ec337c2bd5e0..659cde1ed1ea 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -150,28 +150,16 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
+ static int ohci_quirk_amd700(struct usb_hcd *hcd)
+ {
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+- struct pci_dev *amd_smbus_dev;
+- u8 rev;
+
+ if (usb_amd_find_chipset_info())
+ ohci->flags |= OHCI_QUIRK_AMD_PLL;
+
+- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+- PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+- if (!amd_smbus_dev)
+- return 0;
+-
+- rev = amd_smbus_dev->revision;
+-
+ /* SB800 needs pre-fetch fix */
+- if ((rev >= 0x40) && (rev <= 0x4f)) {
++ if (usb_amd_prefetch_quirk()) {
+ ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
+ ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
+ }
+
+- pci_dev_put(amd_smbus_dev);
+- amd_smbus_dev = NULL;
+-
+ return 0;
+ }
+
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 08ef2829a7e2..463156d03140 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -79,11 +79,30 @@
+ #define USB_INTEL_USB3_PSSEN 0xD8
+ #define USB_INTEL_USB3PRM 0xDC
+
++/*
++ * amd_chipset_gen values represent AMD different chipset generations
++ */
++enum amd_chipset_gen {
++ NOT_AMD_CHIPSET = 0,
++ AMD_CHIPSET_SB600,
++ AMD_CHIPSET_SB700,
++ AMD_CHIPSET_SB800,
++ AMD_CHIPSET_HUDSON2,
++ AMD_CHIPSET_BOLTON,
++ AMD_CHIPSET_YANGTZE,
++ AMD_CHIPSET_UNKNOWN,
++};
++
++struct amd_chipset_type {
++ enum amd_chipset_gen gen;
++ u8 rev;
++};
++
+ static struct amd_chipset_info {
+ struct pci_dev *nb_dev;
+ struct pci_dev *smbus_dev;
+ int nb_type;
+- int sb_type;
++ struct amd_chipset_type sb_type;
+ int isoc_reqs;
+ int probe_count;
+ int probe_result;
+@@ -91,6 +110,51 @@ static struct amd_chipset_info {
+
+ static DEFINE_SPINLOCK(amd_lock);
+
++/*
++ * amd_chipset_sb_type_init - initialize amd chipset southbridge type
++ *
++ * AMD FCH/SB generation and revision is identified by SMBus controller
++ * vendor, device and revision IDs.
++ *
++ * Returns: 1 if it is an AMD chipset, 0 otherwise.
++ */
++int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
++{
++ u8 rev = 0;
++ pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
++
++ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
++ PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
++ if (pinfo->smbus_dev) {
++ rev = pinfo->smbus_dev->revision;
++ if (rev >= 0x10 && rev <= 0x1f)
++ pinfo->sb_type.gen = AMD_CHIPSET_SB600;
++ else if (rev >= 0x30 && rev <= 0x3f)
++ pinfo->sb_type.gen = AMD_CHIPSET_SB700;
++ else if (rev >= 0x40 && rev <= 0x4f)
++ pinfo->sb_type.gen = AMD_CHIPSET_SB800;
++ } else {
++ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
++ PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
++
++ if (!pinfo->smbus_dev) {
++ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
++ return 0;
++ }
++
++ rev = pinfo->smbus_dev->revision;
++ if (rev >= 0x11 && rev <= 0x14)
++ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
++ else if (rev >= 0x15 && rev <= 0x18)
++ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
++ else if (rev >= 0x39 && rev <= 0x3a)
++ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
++ }
++
++ pinfo->sb_type.rev = rev;
++ return 1;
++}
++
+ void sb800_prefetch(struct device *dev, int on)
+ {
+ u16 misc;
+@@ -106,7 +170,6 @@ EXPORT_SYMBOL_GPL(sb800_prefetch);
+
+ int usb_amd_find_chipset_info(void)
+ {
+- u8 rev = 0;
+ unsigned long flags;
+ struct amd_chipset_info info;
+ int ret;
+@@ -122,27 +185,17 @@ int usb_amd_find_chipset_info(void)
+ memset(&info, 0, sizeof(info));
+ spin_unlock_irqrestore(&amd_lock, flags);
+
+- info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
+- if (info.smbus_dev) {
+- rev = info.smbus_dev->revision;
+- if (rev >= 0x40)
+- info.sb_type = 1;
+- else if (rev >= 0x30 && rev <= 0x3b)
+- info.sb_type = 3;
+- } else {
+- info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+- 0x780b, NULL);
+- if (!info.smbus_dev) {
+- ret = 0;
+- goto commit;
+- }
+-
+- rev = info.smbus_dev->revision;
+- if (rev >= 0x11 && rev <= 0x18)
+- info.sb_type = 2;
++ if (!amd_chipset_sb_type_init(&info)) {
++ ret = 0;
++ goto commit;
+ }
+
+- if (info.sb_type == 0) {
++ /* Below chipset generations needn't enable AMD PLL quirk */
++ if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
++ info.sb_type.gen == AMD_CHIPSET_SB600 ||
++ info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
++ (info.sb_type.gen == AMD_CHIPSET_SB700 &&
++ info.sb_type.rev > 0x3b)) {
+ if (info.smbus_dev) {
+ pci_dev_put(info.smbus_dev);
+ info.smbus_dev = NULL;
+@@ -197,6 +250,27 @@ commit:
+ }
+ EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+
++bool usb_amd_hang_symptom_quirk(void)
++{
++ u8 rev;
++
++ usb_amd_find_chipset_info();
++ rev = amd_chipset.sb_type.rev;
++ /* SB600 and old version of SB700 have hang symptom bug */
++ return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
++ (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
++ rev >= 0x3a && rev <= 0x3b);
++}
++EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
++
++bool usb_amd_prefetch_quirk(void)
++{
++ usb_amd_find_chipset_info();
++ /* SB800 needs pre-fetch fix */
++ return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
++}
++EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
++
+ /*
+ * The hardware normally enables the A-link power management feature, which
+ * lets the system lower the power consumption in idle states.
+@@ -229,7 +303,9 @@ static void usb_amd_quirk_pll(int disable)
+ }
+ }
+
+- if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
++ if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
++ amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
++ amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
+ outb_p(AB_REG_BAR_LOW, 0xcd6);
+ addr_low = inb_p(0xcd7);
+ outb_p(AB_REG_BAR_HIGH, 0xcd6);
+@@ -240,7 +316,8 @@ static void usb_amd_quirk_pll(int disable)
+ outl_p(0x40, AB_DATA(addr));
+ outl_p(0x34, AB_INDX(addr));
+ val = inl_p(AB_DATA(addr));
+- } else if (amd_chipset.sb_type == 3) {
++ } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
++ amd_chipset.sb_type.rev <= 0x3b) {
+ pci_read_config_dword(amd_chipset.smbus_dev,
+ AB_REG_BAR_SB700, &addr);
+ outl(AX_INDXC, AB_INDX(addr));
+@@ -353,7 +430,7 @@ void usb_amd_dev_put(void)
+ amd_chipset.nb_dev = NULL;
+ amd_chipset.smbus_dev = NULL;
+ amd_chipset.nb_type = 0;
+- amd_chipset.sb_type = 0;
++ memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
+ amd_chipset.isoc_reqs = 0;
+ amd_chipset.probe_result = 0;
+
+diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
+index ed6700d00fe6..638e88f7a28b 100644
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -5,6 +5,8 @@
+ void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
+ int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
+ int usb_amd_find_chipset_info(void);
++bool usb_amd_hang_symptom_quirk(void);
++bool usb_amd_prefetch_quirk(void);
+ void usb_amd_dev_put(void);
+ void usb_amd_quirk_pll_disable(void);
+ void usb_amd_quirk_pll_enable(void);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 82fb34183a7f..f178f762b543 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4730,6 +4730,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ /* Accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
+
++ /* support to build packet from discontinuous buffers */
++ hcd->self.no_sg_constraint = 1;
++
+ /* XHCI controllers don't stop the ep queue on short packets :| */
+ hcd->self.no_stop_on_short = 1;
+
+@@ -4754,14 +4757,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ /* xHCI private pointer was set in xhci_pci_probe for the second
+ * registered roothub.
+ */
+- xhci = hcd_to_xhci(hcd);
+- /*
+- * Support arbitrarily aligned sg-list entries on hosts without
+- * TD fragment rules (which are currently unsupported).
+- */
+- if (xhci->hci_version < 0x100)
+- hcd->self.no_sg_constraint = 1;
+-
+ return 0;
+ }
+
+@@ -4788,9 +4783,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ if (xhci->hci_version > 0x96)
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+
+- if (xhci->hci_version < 0x100)
+- hcd->self.no_sg_constraint = 1;
+-
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
+index 7f9ff75d0db2..fcb950031246 100644
+--- a/drivers/video/efifb.c
++++ b/drivers/video/efifb.c
+@@ -108,8 +108,8 @@ static int efifb_setup(char *options)
+ if (!*this_opt) continue;
+
+ for (i = 0; i < M_UNKNOWN; i++) {
+- if (!strcmp(this_opt, efifb_dmi_list[i].optname) &&
+- efifb_dmi_list[i].base != 0) {
++ if (efifb_dmi_list[i].base != 0 &&
++ !strcmp(this_opt, efifb_dmi_list[i].optname)) {
+ screen_info.lfb_base = efifb_dmi_list[i].base;
+ screen_info.lfb_linelength = efifb_dmi_list[i].stride;
+ screen_info.lfb_width = efifb_dmi_list[i].width;
+diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
+index 76273c1d26a6..b5ee393e2e8d 100644
+--- a/fs/bio-integrity.c
++++ b/fs/bio-integrity.c
+@@ -316,7 +316,7 @@ static void bio_integrity_generate(struct bio *bio)
+ bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
+ bix.sector_size = bi->sector_size;
+
+- bio_for_each_segment(bv, bio, i) {
++ bio_for_each_segment_all(bv, bio, i) {
+ void *kaddr = kmap_atomic(bv->bv_page);
+ bix.data_buf = kaddr + bv->bv_offset;
+ bix.data_size = bv->bv_len;
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 810c28fb8c3c..d76c9744c774 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -41,6 +41,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
+ #include <linux/compat.h>
++#include <linux/rculist.h>
+
+ /*
+ * LOCKING:
+@@ -133,8 +134,12 @@ struct nested_calls {
+ * of these on a server and we do not want this to take another cache line.
+ */
+ struct epitem {
+- /* RB tree node used to link this structure to the eventpoll RB tree */
+- struct rb_node rbn;
++ union {
++ /* RB tree node links this structure to the eventpoll RB tree */
++ struct rb_node rbn;
++ /* Used to free the struct epitem */
++ struct rcu_head rcu;
++ };
+
+ /* List header used to link this structure to the eventpoll ready list */
+ struct list_head rdllink;
+@@ -580,14 +585,14 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
+ * @sproc: Pointer to the scan callback.
+ * @priv: Private opaque data passed to the @sproc callback.
+ * @depth: The current depth of recursive f_op->poll calls.
++ * @ep_locked: caller already holds ep->mtx
+ *
+ * Returns: The same integer error code returned by the @sproc callback.
+ */
+ static int ep_scan_ready_list(struct eventpoll *ep,
+ int (*sproc)(struct eventpoll *,
+ struct list_head *, void *),
+- void *priv,
+- int depth)
++ void *priv, int depth, bool ep_locked)
+ {
+ int error, pwake = 0;
+ unsigned long flags;
+@@ -598,7 +603,9 @@ static int ep_scan_ready_list(struct eventpoll *ep,
+ * We need to lock this because we could be hit by
+ * eventpoll_release_file() and epoll_ctl().
+ */
+- mutex_lock_nested(&ep->mtx, depth);
++
++ if (!ep_locked)
++ mutex_lock_nested(&ep->mtx, depth);
+
+ /*
+ * Steal the ready list, and re-init the original one to the
+@@ -662,7 +669,8 @@ static int ep_scan_ready_list(struct eventpoll *ep,
+ }
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+- mutex_unlock(&ep->mtx);
++ if (!ep_locked)
++ mutex_unlock(&ep->mtx);
+
+ /* We have to call this outside the lock */
+ if (pwake)
+@@ -671,6 +679,12 @@ static int ep_scan_ready_list(struct eventpoll *ep,
+ return error;
+ }
+
++static void epi_rcu_free(struct rcu_head *head)
++{
++ struct epitem *epi = container_of(head, struct epitem, rcu);
++ kmem_cache_free(epi_cache, epi);
++}
++
+ /*
+ * Removes a "struct epitem" from the eventpoll RB tree and deallocates
+ * all the associated resources. Must be called with "mtx" held.
+@@ -692,8 +706,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
+
+ /* Remove the current item from the list of epoll hooks */
+ spin_lock(&file->f_lock);
+- if (ep_is_linked(&epi->fllink))
+- list_del_init(&epi->fllink);
++ list_del_rcu(&epi->fllink);
+ spin_unlock(&file->f_lock);
+
+ rb_erase(&epi->rbn, &ep->rbr);
+@@ -704,9 +717,14 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ wakeup_source_unregister(ep_wakeup_source(epi));
+-
+- /* At this point it is safe to free the eventpoll item */
+- kmem_cache_free(epi_cache, epi);
++ /*
++ * At this point it is safe to free the eventpoll item. Use the union
++ * field epi->rcu, since we are trying to minimize the size of
++ * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
++ * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
++ * use of the rbn field.
++ */
++ call_rcu(&epi->rcu, epi_rcu_free);
+
+ atomic_long_dec(&ep->user->epoll_watches);
+
+@@ -807,15 +825,34 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
+ return 0;
+ }
+
++static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
++ poll_table *pt);
++
++struct readyevents_arg {
++ struct eventpoll *ep;
++ bool locked;
++};
++
+ static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
+ {
+- return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
++ struct readyevents_arg *arg = priv;
++
++ return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL,
++ call_nests + 1, arg->locked);
+ }
+
+ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
+ {
+ int pollflags;
+ struct eventpoll *ep = file->private_data;
++ struct readyevents_arg arg;
++
++ /*
++ * During ep_insert() we already hold the ep->mtx for the tfile.
++ * Prevent re-aquisition.
++ */
++ arg.locked = wait && (wait->_qproc == ep_ptable_queue_proc);
++ arg.ep = ep;
+
+ /* Insert inside our poll wait queue */
+ poll_wait(file, &ep->poll_wait, wait);
+@@ -827,7 +864,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
+ * could re-enter here.
+ */
+ pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
+- ep_poll_readyevents_proc, ep, ep, current);
++ ep_poll_readyevents_proc, &arg, ep, current);
+
+ return pollflags != -1 ? pollflags : 0;
+ }
+@@ -872,7 +909,6 @@ static const struct file_operations eventpoll_fops = {
+ */
+ void eventpoll_release_file(struct file *file)
+ {
+- struct list_head *lsthead = &file->f_ep_links;
+ struct eventpoll *ep;
+ struct epitem *epi;
+
+@@ -890,17 +926,12 @@ void eventpoll_release_file(struct file *file)
+ * Besides, ep_remove() acquires the lock, so we can't hold it here.
+ */
+ mutex_lock(&epmutex);
+-
+- while (!list_empty(lsthead)) {
+- epi = list_first_entry(lsthead, struct epitem, fllink);
+-
++ list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
+ ep = epi->ep;
+- list_del_init(&epi->fllink);
+ mutex_lock_nested(&ep->mtx, 0);
+ ep_remove(ep, epi);
+ mutex_unlock(&ep->mtx);
+ }
+-
+ mutex_unlock(&epmutex);
+ }
+
+@@ -1138,7 +1169,9 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
+ struct file *child_file;
+ struct epitem *epi;
+
+- list_for_each_entry(epi, &file->f_ep_links, fllink) {
++ /* CTL_DEL can remove links here, but that can't increase our count */
++ rcu_read_lock();
++ list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
+ child_file = epi->ep->file;
+ if (is_file_epoll(child_file)) {
+ if (list_empty(&child_file->f_ep_links)) {
+@@ -1160,6 +1193,7 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
+ "file is not an ep!\n");
+ }
+ }
++ rcu_read_unlock();
+ return error;
+ }
+
+@@ -1231,7 +1265,7 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi)
+ * Must be called with "mtx" held.
+ */
+ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+- struct file *tfile, int fd)
++ struct file *tfile, int fd, int full_check)
+ {
+ int error, revents, pwake = 0;
+ unsigned long flags;
+@@ -1286,7 +1320,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+
+ /* Add the current item to the list of active epoll hook for this file */
+ spin_lock(&tfile->f_lock);
+- list_add_tail(&epi->fllink, &tfile->f_ep_links);
++ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
+ spin_unlock(&tfile->f_lock);
+
+ /*
+@@ -1297,7 +1331,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+
+ /* now check if we've created too many backpaths */
+ error = -EINVAL;
+- if (reverse_path_check())
++ if (full_check && reverse_path_check())
+ goto error_remove_epi;
+
+ /* We have to drop the new item inside our item list to keep track of it */
+@@ -1327,8 +1361,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+
+ error_remove_epi:
+ spin_lock(&tfile->f_lock);
+- if (ep_is_linked(&epi->fllink))
+- list_del_init(&epi->fllink);
++ list_del_rcu(&epi->fllink);
+ spin_unlock(&tfile->f_lock);
+
+ rb_erase(&epi->rbn, &ep->rbr);
+@@ -1521,7 +1554,7 @@ static int ep_send_events(struct eventpoll *ep,
+ esed.maxevents = maxevents;
+ esed.events = events;
+
+- return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
++ return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
+ }
+
+ static inline struct timespec ep_set_mstimeout(long ms)
+@@ -1791,11 +1824,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ struct epoll_event __user *, event)
+ {
+ int error;
+- int did_lock_epmutex = 0;
++ int full_check = 0;
+ struct fd f, tf;
+ struct eventpoll *ep;
+ struct epitem *epi;
+ struct epoll_event epds;
++ struct eventpoll *tep = NULL;
+
+ error = -EFAULT;
+ if (ep_op_has_event(op) &&
+@@ -1844,27 +1878,37 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ * and hang them on the tfile_check_list, so we can check that we
+ * haven't created too many possible wakeup paths.
+ *
+- * We need to hold the epmutex across both ep_insert and ep_remove
+- * b/c we want to make sure we are looking at a coherent view of
+- * epoll network.
++ * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
++ * the epoll file descriptor is attaching directly to a wakeup source,
++ * unless the epoll file descriptor is nested. The purpose of taking the
++ * 'epmutex' on add is to prevent complex toplogies such as loops and
++ * deep wakeup paths from forming in parallel through multiple
++ * EPOLL_CTL_ADD operations.
+ */
+- if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
+- mutex_lock(&epmutex);
+- did_lock_epmutex = 1;
+- }
++ mutex_lock_nested(&ep->mtx, 0);
+ if (op == EPOLL_CTL_ADD) {
+- if (is_file_epoll(tf.file)) {
+- error = -ELOOP;
+- if (ep_loop_check(ep, tf.file) != 0) {
+- clear_tfile_check_list();
+- goto error_tgt_fput;
++ if (!list_empty(&f.file->f_ep_links) ||
++ is_file_epoll(tf.file)) {
++ full_check = 1;
++ mutex_unlock(&ep->mtx);
++ mutex_lock(&epmutex);
++ if (is_file_epoll(tf.file)) {
++ error = -ELOOP;
++ if (ep_loop_check(ep, tf.file) != 0) {
++ clear_tfile_check_list();
++ goto error_tgt_fput;
++ }
++ } else
++ list_add(&tf.file->f_tfile_llink,
++ &tfile_check_list);
++ mutex_lock_nested(&ep->mtx, 0);
++ if (is_file_epoll(tf.file)) {
++ tep = tf.file->private_data;
++ mutex_lock_nested(&tep->mtx, 1);
+ }
+- } else
+- list_add(&tf.file->f_tfile_llink, &tfile_check_list);
++ }
+ }
+
+- mutex_lock_nested(&ep->mtx, 0);
+-
+ /*
+ * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
+ * above, we can be sure to be able to use the item looked up by
+@@ -1877,10 +1921,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ case EPOLL_CTL_ADD:
+ if (!epi) {
+ epds.events |= POLLERR | POLLHUP;
+- error = ep_insert(ep, &epds, tf.file, fd);
++ error = ep_insert(ep, &epds, tf.file, fd, full_check);
+ } else
+ error = -EEXIST;
+- clear_tfile_check_list();
++ if (full_check)
++ clear_tfile_check_list();
+ break;
+ case EPOLL_CTL_DEL:
+ if (epi)
+@@ -1896,10 +1941,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ error = -ENOENT;
+ break;
+ }
++ if (tep != NULL)
++ mutex_unlock(&tep->mtx);
+ mutex_unlock(&ep->mtx);
+
+ error_tgt_fput:
+- if (did_lock_epmutex)
++ if (full_check)
+ mutex_unlock(&epmutex);
+
+ fdput(tf);
+diff --git a/fs/namei.c b/fs/namei.c
+index 23ac50f4ee40..187cacf1c83c 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3924,6 +3924,7 @@ retry:
+ out_dput:
+ done_path_create(&new_path, new_dentry);
+ if (retry_estale(error, how)) {
++ path_put(&old_path);
+ how |= LOOKUP_REVAL;
+ goto retry;
+ }
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index ef792f29f831..5d8ccecf5f5c 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -659,16 +659,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
+
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
++ if (delegation == NULL)
++ goto out_enoent;
+
+- if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
+- rcu_read_unlock();
+- return -ENOENT;
+- }
++ if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
++ goto out_enoent;
+ nfs_mark_return_delegation(server, delegation);
+ rcu_read_unlock();
+
+ nfs_delegation_run_state_manager(clp);
+ return 0;
++out_enoent:
++ rcu_read_unlock();
++ return -ENOENT;
+ }
+
+ static struct inode *
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d3d7766f55e3..a53651743d4d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3972,8 +3972,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
+ {
+ nfs4_stateid current_stateid;
+
+- if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode))
+- return false;
++ /* If the current stateid represents a lost lock, then exit */
++ if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
++ return true;
+ return nfs4_stateid_match(stateid, &current_stateid);
+ }
+
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index d71903c6068b..f07941160515 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2371,8 +2371,8 @@ out_dio:
+
+ if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ ((file->f_flags & O_DIRECT) && !direct_io)) {
+- ret = filemap_fdatawrite_range(file->f_mapping, pos,
+- pos + count - 1);
++ ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
++ *ppos + count - 1);
+ if (ret < 0)
+ written = ret;
+
+@@ -2385,8 +2385,8 @@ out_dio:
+ }
+
+ if (!ret)
+- ret = filemap_fdatawait_range(file->f_mapping, pos,
+- pos + count - 1);
++ ret = filemap_fdatawait_range(file->f_mapping, *ppos,
++ *ppos + count - 1);
+ }
+
+ /*
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index aaa50611ec66..d7b5108789e2 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
+ */
+ if (status < 0)
+ mlog_errno(status);
++ /*
++ * Clear dq_off so that we search for the structure in quota file next
++ * time we acquire it. The structure might be deleted and reallocated
++ * elsewhere by another node while our dquot structure is on freelist.
++ */
++ dquot->dq_off = 0;
+ clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+ out_trans:
+ ocfs2_commit_trans(osb, handle);
+@@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
+ status = ocfs2_lock_global_qf(info, 1);
+ if (status < 0)
+ goto out;
+- if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
+- status = ocfs2_qinfo_lock(info, 0);
+- if (status < 0)
+- goto out_dq;
+- status = qtree_read_dquot(&info->dqi_gi, dquot);
+- ocfs2_qinfo_unlock(info, 0);
+- if (status < 0)
+- goto out_dq;
+- }
+- set_bit(DQ_READ_B, &dquot->dq_flags);
++ status = ocfs2_qinfo_lock(info, 0);
++ if (status < 0)
++ goto out_dq;
++ /*
++ * We always want to read dquot structure from disk because we don't
++ * know what happened with it while it was on freelist.
++ */
++ status = qtree_read_dquot(&info->dqi_gi, dquot);
++ ocfs2_qinfo_unlock(info, 0);
++ if (status < 0)
++ goto out_dq;
+
+ OCFS2_DQUOT(dquot)->dq_use_count++;
+ OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index 2e4344be3b96..2001862bf2b1 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
+ ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
+
+ out:
+- /* Clear the read bit so that next time someone uses this
+- * dquot he reads fresh info from disk and allocates local
+- * dquot structure */
+- clear_bit(DQ_READ_B, &dquot->dq_flags);
+ return status;
+ }
+
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 1485e38daaa3..c35eaa404933 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1813,6 +1813,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
+ if (rc)
+ goto out_mmput;
+
++ rc = -ENOENT;
+ down_read(&mm->mmap_sem);
+ vma = find_exact_vma(mm, vm_start, vm_end);
+ if (vma && vma->vm_file) {
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+index 7c2e030e72f1..a12f6ed91c84 100644
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -5,6 +5,7 @@
+ #include <linux/relay.h>
+ #include <linux/compat.h>
+ #include <uapi/linux/blktrace_api.h>
++#include <linux/list.h>
+
+ #if defined(CONFIG_BLK_DEV_IO_TRACE)
+
+@@ -23,6 +24,7 @@ struct blk_trace {
+ struct dentry *dir;
+ struct dentry *dropped_file;
+ struct dentry *msg_file;
++ struct list_head running_list;
+ atomic_t dropped;
+ };
+
+diff --git a/include/linux/firewire.h b/include/linux/firewire.h
+index 5d7782e42b8f..c3683bdf28fe 100644
+--- a/include/linux/firewire.h
++++ b/include/linux/firewire.h
+@@ -200,6 +200,7 @@ struct fw_device {
+ unsigned irmc:1;
+ unsigned bc_implemented:2;
+
++ work_func_t workfn;
+ struct delayed_work work;
+ struct fw_attribute_group attribute_group;
+ };
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index fed08c0c543b..648bcb007eba 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -161,7 +161,7 @@ extern unsigned int kobjsize(const void *objp);
+ * Special vmas that are non-mergable, non-mlock()able.
+ * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
+ */
+-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
++#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+
+ /*
+ * mapping from the currently active vm_flags protection bits (the
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index ebeab360d851..0ecc46e7af3d 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -60,6 +60,12 @@ struct tp_module {
+ unsigned int num_tracepoints;
+ struct tracepoint * const *tracepoints_ptrs;
+ };
++bool trace_module_has_bad_taint(struct module *mod);
++#else
++static inline bool trace_module_has_bad_taint(struct module *mod)
++{
++ return false;
++}
+ #endif /* CONFIG_MODULES */
+
+ struct tracepoint_iter {
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index b1aa324c5e65..51dcc6faa561 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -482,6 +482,24 @@ extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ struct ip_options *opt);
+ #ifdef CONFIG_SYN_COOKIES
++#include <linux/ktime.h>
++
++/* Syncookies use a monotonic timer which increments every 64 seconds.
++ * This counter is used both as a hash input and partially encoded into
++ * the cookie value. A cookie is only validated further if the delta
++ * between the current counter value and the encoded one is less than this,
++ * i.e. a sent cookie is valid only at most for 128 seconds (or less if
++ * the counter advances immediately after a cookie is generated).
++ */
++#define MAX_SYNCOOKIE_AGE 2
++
++static inline u32 tcp_cookie_time(void)
++{
++ struct timespec now;
++ getnstimeofday(&now);
++ return now.tv_sec >> 6; /* 64 seconds granularity */
++}
++
+ extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
+ const struct tcphdr *th, u16 *mssp);
+ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
+@@ -1303,7 +1321,8 @@ struct tcp_fastopen_request {
+ /* Fast Open cookie. Size 0 means a cookie request */
+ struct tcp_fastopen_cookie cookie;
+ struct msghdr *data; /* data in MSG_FASTOPEN */
+- u16 copied; /* queued in tcp_connect() */
++ size_t size;
++ int copied; /* queued in tcp_connect() */
+ };
+ void tcp_free_fastopen_req(struct tcp_sock *tp);
+
+diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
+index a12589c4ee92..361bd0f04018 100644
+--- a/include/target/iscsi/iscsi_transport.h
++++ b/include/target/iscsi/iscsi_transport.h
+@@ -12,6 +12,7 @@ struct iscsit_transport {
+ int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
+ int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
+ void (*iscsit_free_np)(struct iscsi_np *);
++ void (*iscsit_wait_conn)(struct iscsi_conn *);
+ void (*iscsit_free_conn)(struct iscsi_conn *);
+ int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
+ int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 558aa91186b6..52770bfde2a5 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -885,6 +885,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
+ return -EINVAL;
+
+ if (msgflg & MSG_COPY) {
++ if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
++ return -EINVAL;
+ copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
+ if (IS_ERR(copy))
+ return PTR_ERR(copy);
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 4772034b4b17..5ae9f950e024 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ * Temporarilly set tasks mems_allowed to target nodes of migration,
+ * so that the migration code can allocate pages on these nodes.
+ *
+- * Call holding cpuset_mutex, so current's cpuset won't change
+- * during this call, as manage_mutex holds off any cpuset_attach()
+- * calls. Therefore we don't need to take task_lock around the
+- * call to guarantee_online_mems(), as we know no one is changing
+- * our task's cpuset.
+- *
+ * While the mm_struct we are migrating is typically from some
+ * other task, the task_struct mems_allowed that we are hacking
+ * is for our current task, which must allocate new pages for that
+@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+
+ do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+
++ rcu_read_lock();
+ mems_cs = effective_nodemask_cpuset(task_cs(tsk));
+ guarantee_online_mems(mems_cs, &tsk->mems_allowed);
++ rcu_read_unlock();
+ }
+
+ /*
+@@ -2511,9 +2507,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+
+ task_lock(current);
+ cs = nearest_hardwall_ancestor(task_cs(current));
++ allowed = node_isset(node, cs->mems_allowed);
+ task_unlock(current);
+
+- allowed = node_isset(node, cs->mems_allowed);
+ mutex_unlock(&callback_mutex);
+ return allowed;
+ }
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 221a58fc62f7..231754863a87 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ return -EINVAL;
+ address -= key->both.offset;
+
++ if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
++ return -EFAULT;
++
+ /*
+ * PROCESS_PRIVATE futexes are fast.
+ * As the mm cannot disappear under us and the 'key' only needs
+@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ * but access_ok() should be faster than find_vma()
+ */
+ if (!fshared) {
+- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+- return -EFAULT;
+ key->private.mm = mm;
+ key->private.address = address;
+ get_futex_key_refs(key);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 3e59f951d42f..4c84746a840b 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
+
+ static void wake_threads_waitq(struct irq_desc *desc)
+ {
+- if (atomic_dec_and_test(&desc->threads_active) &&
+- waitqueue_active(&desc->wait_for_threads))
++ if (atomic_dec_and_test(&desc->threads_active))
+ wake_up(&desc->wait_for_threads);
+ }
+
+diff --git a/kernel/rcutree.h b/kernel/rcutree.h
+index 5f97eab602cd..52be957c9fe2 100644
+--- a/kernel/rcutree.h
++++ b/kernel/rcutree.h
+@@ -104,6 +104,8 @@ struct rcu_dynticks {
+ /* idle-period nonlazy_posted snapshot. */
+ unsigned long last_accelerate;
+ /* Last jiffy CBs were accelerated. */
++ unsigned long last_advance_all;
++ /* Last jiffy CBs were all advanced. */
+ int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
+ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+ };
+diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
+index 130c97b027f2..511e6b47c594 100644
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -1627,20 +1627,26 @@ module_param(rcu_idle_gp_delay, int, 0644);
+ static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
+ module_param(rcu_idle_lazy_gp_delay, int, 0644);
+
+-extern int tick_nohz_enabled;
++extern int tick_nohz_active;
+
+ /*
+- * Try to advance callbacks for all flavors of RCU on the current CPU.
+- * Afterwards, if there are any callbacks ready for immediate invocation,
+- * return true.
++ * Try to advance callbacks for all flavors of RCU on the current CPU, but
++ * only if it has been awhile since the last time we did so. Afterwards,
++ * if there are any callbacks ready for immediate invocation, return true.
+ */
+ static bool rcu_try_advance_all_cbs(void)
+ {
+ bool cbs_ready = false;
+ struct rcu_data *rdp;
++ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_node *rnp;
+ struct rcu_state *rsp;
+
++ /* Exit early if we advanced recently. */
++ if (jiffies == rdtp->last_advance_all)
++ return 0;
++ rdtp->last_advance_all = jiffies;
++
+ for_each_rcu_flavor(rsp) {
+ rdp = this_cpu_ptr(rsp->rda);
+ rnp = rdp->mynode;
+@@ -1718,7 +1724,7 @@ static void rcu_prepare_for_idle(int cpu)
+ int tne;
+
+ /* Handle nohz enablement switches conservatively. */
+- tne = ACCESS_ONCE(tick_nohz_enabled);
++ tne = ACCESS_ONCE(tick_nohz_active);
+ if (tne != rdtp->tick_nohz_enabled_snap) {
+ if (rcu_cpu_has_callbacks(cpu, NULL))
+ invoke_rcu_core(); /* force nohz to see update. */
+@@ -1739,6 +1745,8 @@ static void rcu_prepare_for_idle(int cpu)
+ */
+ if (rdtp->all_lazy &&
+ rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
++ rdtp->all_lazy = false;
++ rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
+ invoke_rcu_core();
+ return;
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index ceae65e69a64..a494ace683e3 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5119,10 +5119,13 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
+ DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+ DEFINE_PER_CPU(int, sd_llc_size);
+ DEFINE_PER_CPU(int, sd_llc_id);
++DEFINE_PER_CPU(struct sched_domain *, sd_busy);
++DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+
+ static void update_top_cache_domain(int cpu)
+ {
+ struct sched_domain *sd;
++ struct sched_domain *busy_sd = NULL;
+ int id = cpu;
+ int size = 1;
+
+@@ -5130,11 +5133,16 @@ static void update_top_cache_domain(int cpu)
+ if (sd) {
+ id = cpumask_first(sched_domain_span(sd));
+ size = cpumask_weight(sched_domain_span(sd));
++ busy_sd = sd->parent; /* sd_busy */
+ }
++ rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
+
+ rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
+ per_cpu(sd_llc_size, cpu) = size;
+ per_cpu(sd_llc_id, cpu) = id;
++
++ sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
++ rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+ }
+
+ /*
+@@ -5325,6 +5333,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+ * die on a /0 trap.
+ */
+ sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
++ sg->sgp->power_orig = sg->sgp->power;
+
+ /*
+ * Make sure the first group of this domain contains the
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 411732334906..790e2fc808da 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5598,16 +5598,16 @@ static inline void nohz_balance_exit_idle(int cpu)
+ static inline void set_cpu_sd_state_busy(void)
+ {
+ struct sched_domain *sd;
++ int cpu = smp_processor_id();
+
+ rcu_read_lock();
+- sd = rcu_dereference_check_sched_domain(this_rq()->sd);
++ sd = rcu_dereference(per_cpu(sd_busy, cpu));
+
+ if (!sd || !sd->nohz_idle)
+ goto unlock;
+ sd->nohz_idle = 0;
+
+- for (; sd; sd = sd->parent)
+- atomic_inc(&sd->groups->sgp->nr_busy_cpus);
++ atomic_inc(&sd->groups->sgp->nr_busy_cpus);
+ unlock:
+ rcu_read_unlock();
+ }
+@@ -5615,16 +5615,16 @@ unlock:
+ void set_cpu_sd_state_idle(void)
+ {
+ struct sched_domain *sd;
++ int cpu = smp_processor_id();
+
+ rcu_read_lock();
+- sd = rcu_dereference_check_sched_domain(this_rq()->sd);
++ sd = rcu_dereference(per_cpu(sd_busy, cpu));
+
+ if (!sd || sd->nohz_idle)
+ goto unlock;
+ sd->nohz_idle = 1;
+
+- for (; sd; sd = sd->parent)
+- atomic_dec(&sd->groups->sgp->nr_busy_cpus);
++ atomic_dec(&sd->groups->sgp->nr_busy_cpus);
+ unlock:
+ rcu_read_unlock();
+ }
+@@ -5807,6 +5807,8 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
+ {
+ unsigned long now = jiffies;
+ struct sched_domain *sd;
++ struct sched_group_power *sgp;
++ int nr_busy;
+
+ if (unlikely(idle_cpu(cpu)))
+ return 0;
+@@ -5832,22 +5834,22 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
+ goto need_kick;
+
+ rcu_read_lock();
+- for_each_domain(cpu, sd) {
+- struct sched_group *sg = sd->groups;
+- struct sched_group_power *sgp = sg->sgp;
+- int nr_busy = atomic_read(&sgp->nr_busy_cpus);
++ sd = rcu_dereference(per_cpu(sd_busy, cpu));
+
+- if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
+- goto need_kick_unlock;
++ if (sd) {
++ sgp = sd->groups->sgp;
++ nr_busy = atomic_read(&sgp->nr_busy_cpus);
+
+- if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
+- && (cpumask_first_and(nohz.idle_cpus_mask,
+- sched_domain_span(sd)) < cpu))
++ if (nr_busy > 1)
+ goto need_kick_unlock;
+-
+- if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
+- break;
+ }
++
++ sd = rcu_dereference(per_cpu(sd_asym, cpu));
++
++ if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
++ sched_domain_span(sd)) < cpu))
++ goto need_kick_unlock;
++
+ rcu_read_unlock();
+ return 0;
+
+@@ -6013,15 +6015,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ /*
+- * Ensure the task's vruntime is normalized, so that when its
++ * Ensure the task's vruntime is normalized, so that when it's
+ * switched back to the fair class the enqueue_entity(.flags=0) will
+ * do the right thing.
+ *
+- * If it was on_rq, then the dequeue_entity(.flags=0) will already
+- * have normalized the vruntime, if it was !on_rq, then only when
++ * If it's on_rq, then the dequeue_entity(.flags=0) will already
++ * have normalized the vruntime, if it's !on_rq, then only when
+ * the task is sleeping will it still have non-normalized vruntime.
+ */
+- if (!se->on_rq && p->state != TASK_RUNNING) {
++ if (!p->on_rq && p->state != TASK_RUNNING) {
+ /*
+ * Fix up our vruntime so that the current sleep doesn't
+ * cause 'unlimited' sleep bonus.
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 417b1b3fd7e9..ff04e1a06412 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq)
+ * if we should look at the mask. It would be a shame
+ * if we looked at the mask, but the mask was not
+ * updated yet.
++ *
++ * Matched by the barrier in pull_rt_task().
+ */
+- wmb();
++ smp_wmb();
+ atomic_inc(&rq->rd->rto_count);
+ }
+
+@@ -1227,8 +1229,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
+ */
+ if (curr && unlikely(rt_task(curr)) &&
+ (curr->nr_cpus_allowed < 2 ||
+- curr->prio <= p->prio) &&
+- (p->nr_cpus_allowed > 1)) {
++ curr->prio <= p->prio)) {
+ int target = find_lowest_rq(p);
+
+ if (target != -1)
+@@ -1644,6 +1645,12 @@ static int pull_rt_task(struct rq *this_rq)
+ if (likely(!rt_overloaded(this_rq)))
+ return 0;
+
++ /*
++ * Match the barrier from rt_set_overloaded; this guarantees that if we
++ * see overloaded we must also see the rto_mask bit.
++ */
++ smp_rmb();
++
+ for_each_cpu(cpu, this_rq->rd->rto_mask) {
+ if (this_cpu == cpu)
+ continue;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index a6208afd80e7..4f310592b1ba 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -596,6 +596,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
+ DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+ DECLARE_PER_CPU(int, sd_llc_size);
+ DECLARE_PER_CPU(int, sd_llc_id);
++DECLARE_PER_CPU(struct sched_domain *, sd_busy);
++DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+
+ struct sched_group_power {
+ atomic_t ref;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 3612fc77f834..ea20f7d1ac2c 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -361,8 +361,8 @@ void __init tick_nohz_init(void)
+ /*
+ * NO HZ enabled ?
+ */
+-int tick_nohz_enabled __read_mostly = 1;
+-
++static int tick_nohz_enabled __read_mostly = 1;
++int tick_nohz_active __read_mostly;
+ /*
+ * Enable / Disable tickless mode
+ */
+@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t now, idle;
+
+- if (!tick_nohz_enabled)
++ if (!tick_nohz_active)
+ return -1;
+
+ now = ktime_get();
+@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t now, iowait;
+
+- if (!tick_nohz_enabled)
++ if (!tick_nohz_active)
+ return -1;
+
+ now = ktime_get();
+@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+ return false;
+ }
+
+- if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
++ if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
++ ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
+ return false;
++ }
+
+ if (need_resched())
+ return false;
+@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void)
+ local_irq_disable();
+
+ ts = &__get_cpu_var(tick_cpu_sched);
+- /*
+- * set ts->inidle unconditionally. even if the system did not
+- * switch to nohz mode the cpu frequency governers rely on the
+- * update of the idle time accounting in tick_nohz_start_idle().
+- */
+ ts->inidle = 1;
+ __tick_nohz_idle_enter(ts);
+
+@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ ktime_t next;
+
+- if (!tick_nohz_enabled)
++ if (!tick_nohz_active)
+ return;
+
+ local_irq_disable();
+@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void)
+ local_irq_enable();
+ return;
+ }
+-
++ tick_nohz_active = 1;
+ ts->nohz_mode = NOHZ_MODE_LOWRES;
+
+ /*
+@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void)
+ }
+
+ #ifdef CONFIG_NO_HZ_COMMON
+- if (tick_nohz_enabled)
++ if (tick_nohz_enabled) {
+ ts->nohz_mode = NOHZ_MODE_HIGHRES;
++ tick_nohz_active = 1;
++ }
+ #endif
+ }
+ #endif /* HIGH_RES_TIMERS */
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index b8b8560bfb95..7f727b34280d 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -26,6 +26,7 @@
+ #include <linux/export.h>
+ #include <linux/time.h>
+ #include <linux/uaccess.h>
++#include <linux/list.h>
+
+ #include <trace/events/block.h>
+
+@@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1;
+ static struct trace_array *blk_tr;
+ static bool blk_tracer_enabled __read_mostly;
+
++static LIST_HEAD(running_trace_list);
++static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
++
+ /* Select an alternative, minimalistic output than the original one */
+ #define TRACE_BLK_OPT_CLASSIC 0x1
+
+@@ -107,10 +111,18 @@ record_it:
+ * Send out a notify for this process, if we haven't done so since a trace
+ * started
+ */
+-static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
++static void trace_note_tsk(struct task_struct *tsk)
+ {
++ unsigned long flags;
++ struct blk_trace *bt;
++
+ tsk->btrace_seq = blktrace_seq;
+- trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
++ spin_lock_irqsave(&running_trace_lock, flags);
++ list_for_each_entry(bt, &running_trace_list, running_list) {
++ trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
++ sizeof(tsk->comm));
++ }
++ spin_unlock_irqrestore(&running_trace_lock, flags);
+ }
+
+ static void trace_note_time(struct blk_trace *bt)
+@@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ goto record_it;
+ }
+
++ if (unlikely(tsk->btrace_seq != blktrace_seq))
++ trace_note_tsk(tsk);
++
+ /*
+ * A word about the locking here - we disable interrupts to reserve
+ * some space in the relay per-cpu buffer, to prevent an irq
+ * from coming in and stepping on our toes.
+ */
+ local_irq_save(flags);
+-
+- if (unlikely(tsk->btrace_seq != blktrace_seq))
+- trace_note_tsk(bt, tsk);
+-
+ t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
+ if (t) {
+ sequence = per_cpu_ptr(bt->sequence, cpu);
+@@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ bt->dir = dir;
+ bt->dev = dev;
+ atomic_set(&bt->dropped, 0);
++ INIT_LIST_HEAD(&bt->running_list);
+
+ ret = -EIO;
+ bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
+@@ -601,6 +613,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
+ blktrace_seq++;
+ smp_mb();
+ bt->trace_state = Blktrace_running;
++ spin_lock_irq(&running_trace_lock);
++ list_add(&bt->running_list, &running_trace_list);
++ spin_unlock_irq(&running_trace_lock);
+
+ trace_note_time(bt);
+ ret = 0;
+@@ -608,6 +623,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
+ } else {
+ if (bt->trace_state == Blktrace_running) {
+ bt->trace_state = Blktrace_stopped;
++ spin_lock_irq(&running_trace_lock);
++ list_del_init(&bt->running_list);
++ spin_unlock_irq(&running_trace_lock);
+ relay_flush(bt->rchan);
+ ret = 0;
+ }
+@@ -1472,6 +1490,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
+ if (atomic_dec_and_test(&blk_probes_ref))
+ blk_unregister_tracepoints();
+
++ spin_lock_irq(&running_trace_lock);
++ list_del(&bt->running_list);
++ spin_unlock_irq(&running_trace_lock);
+ blk_trace_free(bt);
+ return 0;
+ }
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 368a4d50cc30..b03b1f897b5e 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1763,6 +1763,16 @@ static void trace_module_add_events(struct module *mod)
+ {
+ struct ftrace_event_call **call, **start, **end;
+
++ if (!mod->num_trace_events)
++ return;
++
++ /* Don't add infrastructure for mods without tracepoints */
++ if (trace_module_has_bad_taint(mod)) {
++ pr_err("%s: module has bad taint, not creating trace events\n",
++ mod->name);
++ return;
++ }
++
+ start = mod->trace_events;
+ end = mod->trace_events + mod->num_trace_events;
+
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 29f26540e9c9..031cc5655a51 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
+ EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
+
+ #ifdef CONFIG_MODULES
++bool trace_module_has_bad_taint(struct module *mod)
++{
++ return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
++}
++
+ static int tracepoint_module_coming(struct module *mod)
+ {
+ struct tp_module *tp_mod, *iter;
+@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
+ * module headers (for forced load), to make sure we don't cause a crash.
+ * Staging and out-of-tree GPL modules are fine.
+ */
+- if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
++ if (trace_module_has_bad_taint(mod))
+ return 0;
+ mutex_lock(&tracepoints_mutex);
+ tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
+diff --git a/lib/show_mem.c b/lib/show_mem.c
+index b7c72311ad0c..5847a4921b8e 100644
+--- a/lib/show_mem.c
++++ b/lib/show_mem.c
+@@ -12,8 +12,7 @@
+ void show_mem(unsigned int filter)
+ {
+ pg_data_t *pgdat;
+- unsigned long total = 0, reserved = 0, shared = 0,
+- nonshared = 0, highmem = 0;
++ unsigned long total = 0, reserved = 0, highmem = 0;
+
+ printk("Mem-Info:\n");
+ show_free_areas(filter);
+@@ -22,43 +21,27 @@ void show_mem(unsigned int filter)
+ return;
+
+ for_each_online_pgdat(pgdat) {
+- unsigned long i, flags;
++ unsigned long flags;
++ int zoneid;
+
+ pgdat_resize_lock(pgdat, &flags);
+- for (i = 0; i < pgdat->node_spanned_pages; i++) {
+- struct page *page;
+- unsigned long pfn = pgdat->node_start_pfn + i;
+-
+- if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
+- touch_nmi_watchdog();
+-
+- if (!pfn_valid(pfn))
++ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
++ struct zone *zone = &pgdat->node_zones[zoneid];
++ if (!populated_zone(zone))
+ continue;
+
+- page = pfn_to_page(pfn);
+-
+- if (PageHighMem(page))
+- highmem++;
++ total += zone->present_pages;
++ reserved = zone->present_pages - zone->managed_pages;
+
+- if (PageReserved(page))
+- reserved++;
+- else if (page_count(page) == 1)
+- nonshared++;
+- else if (page_count(page) > 1)
+- shared += page_count(page) - 1;
+-
+- total++;
++ if (is_highmem_idx(zoneid))
++ highmem += zone->present_pages;
+ }
+ pgdat_resize_unlock(pgdat, &flags);
+ }
+
+ printk("%lu pages RAM\n", total);
+-#ifdef CONFIG_HIGHMEM
+- printk("%lu pages HighMem\n", highmem);
+-#endif
++ printk("%lu pages HighMem/MovableOnly\n", highmem);
+ printk("%lu pages reserved\n", reserved);
+- printk("%lu pages shared\n", shared);
+- printk("%lu pages non-shared\n", nonshared);
+ #ifdef CONFIG_QUICKLIST
+ printk("%lu pages in pagetable cache\n",
+ quicklist_total_size());
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 74ad00908c79..d2c6751879dc 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -252,7 +252,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ {
+ int nr_scanned = 0, total_isolated = 0;
+ struct page *cursor, *valid_page = NULL;
+- unsigned long nr_strict_required = end_pfn - blockpfn;
+ unsigned long flags;
+ bool locked = false;
+
+@@ -265,11 +264,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+
+ nr_scanned++;
+ if (!pfn_valid_within(blockpfn))
+- continue;
++ goto isolate_fail;
++
+ if (!valid_page)
+ valid_page = page;
+ if (!PageBuddy(page))
+- continue;
++ goto isolate_fail;
+
+ /*
+ * The zone lock must be held to isolate freepages.
+@@ -290,12 +290,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+
+ /* Recheck this is a buddy page under lock */
+ if (!PageBuddy(page))
+- continue;
++ goto isolate_fail;
+
+ /* Found a free page, break it into order-0 pages */
+ isolated = split_free_page(page);
+- if (!isolated && strict)
+- break;
+ total_isolated += isolated;
+ for (i = 0; i < isolated; i++) {
+ list_add(&page->lru, freelist);
+@@ -306,7 +304,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ if (isolated) {
+ blockpfn += isolated - 1;
+ cursor += isolated - 1;
++ continue;
+ }
++
++isolate_fail:
++ if (strict)
++ break;
++ else
++ continue;
++
+ }
+
+ trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
+@@ -316,7 +322,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ * pages requested were isolated. If there were any failures, 0 is
+ * returned and CMA will fail.
+ */
+- if (strict && nr_strict_required > total_isolated)
++ if (strict && blockpfn < end_pfn)
+ total_isolated = 0;
+
+ if (locked)
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index dd7789ce7572..389973fd6bb7 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1897,7 +1897,7 @@ out:
+ return ret;
+ }
+
+-#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
++#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
+
+ int hugepage_madvise(struct vm_area_struct *vma,
+ unsigned long *vm_flags, int advice)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 8e7adcba8176..15429b92ff98 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1089,8 +1089,8 @@ skip_node:
+ * skipping css reference should be safe.
+ */
+ if (next_css) {
+- if ((next_css->flags & CSS_ONLINE) &&
+- (next_css == &root->css || css_tryget(next_css)))
++ if ((next_css == &root->css) ||
++ ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
+ return mem_cgroup_from_css(next_css);
+
+ prev_css = next_css;
+@@ -6346,11 +6346,24 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
+ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
+ {
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
++ struct cgroup_subsys_state *iter;
+
+ kmem_cgroup_css_offline(memcg);
+
+ mem_cgroup_invalidate_reclaim_iterators(memcg);
+- mem_cgroup_reparent_charges(memcg);
++
++ /*
++ * This requires that offlining is serialized. Right now that is
++ * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
++ */
++ rcu_read_lock();
++ css_for_each_descendant_post(iter, css) {
++ rcu_read_unlock();
++ mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
++ rcu_read_lock();
++ }
++ rcu_read_unlock();
++
+ mem_cgroup_destroy_all_caches(memcg);
+ vmpressure_cleanup(&memcg->vmpressure);
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 317ea747d2cd..06f847933eeb 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1217,6 +1217,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ }
+ local_irq_restore(flags);
+ }
++static bool gfp_thisnode_allocation(gfp_t gfp_mask)
++{
++ return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
++}
++#else
++static bool gfp_thisnode_allocation(gfp_t gfp_mask)
++{
++ return false;
++}
+ #endif
+
+ /*
+@@ -1553,7 +1562,13 @@ again:
+ get_pageblock_migratetype(page));
+ }
+
+- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
++ /*
++ * NOTE: GFP_THISNODE allocations do not partake in the kswapd
++ * aging protocol, so they can't be fair.
++ */
++ if (!gfp_thisnode_allocation(gfp_flags))
++ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
++
+ __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ zone_statistics(preferred_zone, zone, gfp_flags);
+ local_irq_restore(flags);
+@@ -1925,8 +1940,12 @@ zonelist_scan:
+ * ultimately fall back to remote zones that do not
+ * partake in the fairness round-robin cycle of this
+ * zonelist.
++ *
++ * NOTE: GFP_THISNODE allocations do not partake in
++ * the kswapd aging protocol, so they can't be fair.
+ */
+- if (alloc_flags & ALLOC_WMARK_LOW) {
++ if ((alloc_flags & ALLOC_WMARK_LOW) &&
++ !gfp_thisnode_allocation(gfp_mask)) {
+ if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+ continue;
+ if (!zone_local(preferred_zone, zone))
+@@ -2492,8 +2511,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ * allowed per node queues are empty and that nodes are
+ * over allocated.
+ */
+- if (IS_ENABLED(CONFIG_NUMA) &&
+- (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
++ if (gfp_thisnode_allocation(gfp_mask))
+ goto nopage;
+
+ restart:
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 11af243bf92f..467e3e071832 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -764,9 +764,6 @@ static void neigh_periodic_work(struct work_struct *work)
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
+
+- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+- goto out;
+-
+ /*
+ * periodically recompute ReachableTime from random function
+ */
+@@ -779,6 +776,9 @@ static void neigh_periodic_work(struct work_struct *work)
+ neigh_rand_reach_time(p->base_reachable_time);
+ }
+
++ if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
++ goto out;
++
+ for (i = 0 ; i < (1 << nht->hash_shift); i++) {
+ np = &nht->hash_buckets[i];
+
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index c31e3ad98ef2..ba22cc3a5a53 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -109,7 +109,6 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+ secpath_reset(skb);
+ if (!skb->l4_rxhash)
+ skb->rxhash = 0;
+- skb_dst_drop(skb);
+ skb->vlan_tci = 0;
+ skb_set_queue_mapping(skb, 0);
+ skb->pkt_type = PACKET_HOST;
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 14a15c49129d..15e024105f91 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -89,8 +89,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
+
+
+ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
+- __be16 dport, __u32 sseq, __u32 count,
+- __u32 data)
++ __be16 dport, __u32 sseq, __u32 data)
+ {
+ /*
+ * Compute the secure sequence number.
+@@ -102,7 +101,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
+ * As an extra hack, we add a small "data" value that encodes the
+ * MSS into the second hash value.
+ */
+-
++ u32 count = tcp_cookie_time();
+ return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
+ sseq + (count << COOKIEBITS) +
+ ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
+@@ -114,22 +113,21 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
+ * If the syncookie is bad, the data returned will be out of
+ * range. This must be checked by the caller.
+ *
+- * The count value used to generate the cookie must be within
+- * "maxdiff" if the current (passed-in) "count". The return value
+- * is (__u32)-1 if this test fails.
++ * The count value used to generate the cookie must be less than
++ * MAX_SYNCOOKIE_AGE minutes in the past.
++ * The return value (__u32)-1 if this test fails.
+ */
+ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
+- __be16 sport, __be16 dport, __u32 sseq,
+- __u32 count, __u32 maxdiff)
++ __be16 sport, __be16 dport, __u32 sseq)
+ {
+- __u32 diff;
++ u32 diff, count = tcp_cookie_time();
+
+ /* Strip away the layers from the cookie */
+ cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
+
+ /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
+ diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
+- if (diff >= maxdiff)
++ if (diff >= MAX_SYNCOOKIE_AGE)
+ return (__u32)-1;
+
+ return (cookie -
+@@ -138,22 +136,22 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
+ }
+
+ /*
+- * MSS Values are taken from the 2009 paper
+- * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
+- * - values 1440 to 1460 accounted for 80% of observed mss values
+- * - values outside the 536-1460 range are rare (<0.2%).
++ * MSS Values are chosen based on the 2011 paper
++ * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
++ * Values ..
++ * .. lower than 536 are rare (< 0.2%)
++ * .. between 537 and 1299 account for less than < 1.5% of observed values
++ * .. in the 1300-1349 range account for about 15 to 20% of observed mss values
++ * .. exceeding 1460 are very rare (< 0.04%)
+ *
+- * Table must be sorted.
++ * 1460 is the single most frequently announced mss value (30 to 46% depending
++ * on monitor location). Table must be sorted.
+ */
+ static __u16 const msstab[] = {
+- 64,
+- 512,
+ 536,
+- 1024,
+- 1440,
++ 1300,
++ 1440, /* 1440, 1452: PPPoE */
+ 1460,
+- 4312,
+- 8960,
+ };
+
+ /*
+@@ -173,7 +171,7 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+
+ return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
+ th->source, th->dest, ntohl(th->seq),
+- jiffies / (HZ * 60), mssind);
++ mssind);
+ }
+ EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
+
+@@ -189,13 +187,6 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+ }
+
+ /*
+- * This (misnamed) value is the age of syncookie which is permitted.
+- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
+- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
+- * backoff) to compute at runtime so it's currently hardcoded here.
+- */
+-#define COUNTER_TRIES 4
+-/*
+ * Check if a ack sequence number is a valid syncookie.
+ * Return the decoded mss if it is, or 0 if not.
+ */
+@@ -204,9 +195,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ {
+ __u32 seq = ntohl(th->seq) - 1;
+ __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
+- th->source, th->dest, seq,
+- jiffies / (HZ * 60),
+- COUNTER_TRIES);
++ th->source, th->dest, seq);
+
+ return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index be5246e1d5b6..531ab5721d79 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1000,7 +1000,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
+ }
+ }
+
+-static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
++static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
++ int *copied, size_t size)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ int err, flags;
+@@ -1015,11 +1016,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
+ if (unlikely(tp->fastopen_req == NULL))
+ return -ENOBUFS;
+ tp->fastopen_req->data = msg;
++ tp->fastopen_req->size = size;
+
+ flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
+ err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+ msg->msg_namelen, flags);
+- *size = tp->fastopen_req->copied;
++ *copied = tp->fastopen_req->copied;
+ tcp_free_fastopen_req(tp);
+ return err;
+ }
+@@ -1039,7 +1041,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+
+ flags = msg->msg_flags;
+ if (flags & MSG_FASTOPEN) {
+- err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
++ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
+ if (err == -EINPROGRESS && copied_syn > 0)
+ goto out;
+ else if (err)
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index fb8227a8c004..e088932bcfae 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2902,7 +2902,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
+ MAX_TCP_OPTION_SPACE;
+
+- syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
++ space = min_t(size_t, space, fo->size);
++
++ /* limit to order-0 allocations */
++ space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
++
++ syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
+ sk->sk_allocation);
+ if (syn_data == NULL)
+ goto fallback;
+diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
+index 140748debc4a..8af3eb57f438 100644
+--- a/net/ipv6/exthdrs_core.c
++++ b/net/ipv6/exthdrs_core.c
+@@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+ found = (nexthdr == target);
+
+ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
+- if (target < 0)
++ if (target < 0 || found)
+ break;
+ return -ENOENT;
+ }
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index bf63ac8a49b9..d703218a653b 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -24,26 +24,21 @@
+ #define COOKIEBITS 24 /* Upper bits store count */
+ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
+
+-/* Table must be sorted. */
++/* RFC 2460, Section 8.3:
++ * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
++ *
++ * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
++ * using higher values than ipv4 tcp syncookies.
++ * The other values are chosen based on ethernet (1500 and 9k MTU), plus
++ * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
++ */
+ static __u16 const msstab[] = {
+- 64,
+- 512,
+- 536,
+- 1280 - 60,
++ 1280 - 60, /* IPV6_MIN_MTU - 60 */
+ 1480 - 60,
+ 1500 - 60,
+- 4460 - 60,
+ 9000 - 60,
+ };
+
+-/*
+- * This (misnamed) value is the age of syncookie which is permitted.
+- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
+- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
+- * backoff) to compute at runtime so it's currently hardcoded here.
+- */
+-#define COUNTER_TRIES 4
+-
+ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst)
+@@ -86,8 +81,9 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
+ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __be16 sport, __be16 dport, __u32 sseq,
+- __u32 count, __u32 data)
++ __u32 data)
+ {
++ u32 count = tcp_cookie_time();
+ return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
+ sseq + (count << COOKIEBITS) +
+ ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
+@@ -96,15 +92,14 @@ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
+
+ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
+ const struct in6_addr *daddr, __be16 sport,
+- __be16 dport, __u32 sseq, __u32 count,
+- __u32 maxdiff)
++ __be16 dport, __u32 sseq)
+ {
+- __u32 diff;
++ __u32 diff, count = tcp_cookie_time();
+
+ cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
+
+ diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
+- if (diff >= maxdiff)
++ if (diff >= MAX_SYNCOOKIE_AGE)
+ return (__u32)-1;
+
+ return (cookie -
+@@ -125,8 +120,7 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ *mssp = msstab[mssind];
+
+ return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
+- th->dest, ntohl(th->seq),
+- jiffies / (HZ * 60), mssind);
++ th->dest, ntohl(th->seq), mssind);
+ }
+ EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
+
+@@ -146,8 +140,7 @@ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ {
+ __u32 seq = ntohl(th->seq) - 1;
+ __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
+- th->source, th->dest, seq,
+- jiffies / (HZ * 60), COUNTER_TRIES);
++ th->source, th->dest, seq);
+
+ return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
+ }
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 06556d6e1a4d..ab4569df9cef 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -111,7 +111,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ fptr->nexthdr = nexthdr;
+ fptr->reserved = 0;
+- ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
++ fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+
+ /* Fragment the skb. ipv6 header and the remaining fields of the
+ * fragment header are updated in ipv6_gso_segment()
+diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
+index 22290a929b94..641f43219a48 100644
+--- a/net/mac80211/mesh_ps.c
++++ b/net/mac80211/mesh_ps.c
+@@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
+ sdata->vif.addr);
+ nullfunc->frame_control = fc;
+ nullfunc->duration_id = 0;
++ nullfunc->seq_ctrl = 0;
+ /* no address resolution for this frame -> set addr 1 immediately */
+ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 86e4ad56b573..8d7f4abe65ba 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -282,6 +282,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ switch (vht_oper->chan_width) {
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ vht_chandef.width = chandef->width;
++ vht_chandef.center_freq1 = chandef->center_freq1;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ vht_chandef.width = NL80211_CHAN_WIDTH_80;
+@@ -331,6 +332,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ ret = 0;
+
+ out:
++ /*
++ * When tracking the current AP, don't do any further checks if the
++ * new chandef is identical to the one we're currently using for the
++ * connection. This keeps us from playing ping-pong with regulatory,
++ * without it the following can happen (for example):
++ * - connect to an AP with 80 MHz, world regdom allows 80 MHz
++ * - AP advertises regdom US
++ * - CRDA loads regdom US with 80 MHz prohibited (old database)
++ * - the code below detects an unsupported channel, downgrades, and
++ * we disconnect from the AP in the caller
++ * - disconnect causes CRDA to reload world regdomain and the game
++ * starts anew.
++ * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
++ *
++ * It seems possible that there are still scenarios with CSA or real
++ * bandwidth changes where a this could happen, but those cases are
++ * less common and wouldn't completely prevent using the AP.
++ */
++ if (tracking &&
++ cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef))
++ return ret;
++
+ /* don't print the message below for VHT mismatch if VHT is disabled */
+ if (ret & IEEE80211_STA_DISABLE_VHT)
+ vht_chandef = *chandef;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index aeb967a0aeed..db41c190e76d 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -340,6 +340,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
+ return NULL;
+
+ spin_lock_init(&sta->lock);
++ spin_lock_init(&sta->ps_lock);
+ INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+ INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
+ mutex_init(&sta->ampdu_mlme.mtx);
+@@ -1049,6 +1050,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+
+ skb_queue_head_init(&pending);
+
++ /* sync with ieee80211_tx_h_unicast_ps_buf */
++ spin_lock(&sta->ps_lock);
+ /* Send all buffered frames to the station */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int count = skb_queue_len(&pending), tmp;
+@@ -1068,6 +1071,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ }
+
+ ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
++ spin_unlock(&sta->ps_lock);
+
+ local->total_ps_buffered -= buffered;
+
+@@ -1114,6 +1118,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
+ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
++ nullfunc->seq_ctrl = 0;
+
+ skb->priority = tid;
+ skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 4208dbd5861f..492d59cbf289 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -245,6 +245,7 @@ struct sta_ampdu_mlme {
+ * @drv_unblock_wk: used for driver PS unblocking
+ * @listen_interval: listen interval of this station, when we're acting as AP
+ * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
++ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
+ * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
+ * when it leaves power saving state or polls
+ * @tx_filtered: buffers (per AC) of frames we already tried to
+@@ -328,10 +329,8 @@ struct sta_info {
+ /* use the accessors defined below */
+ unsigned long _flags;
+
+- /*
+- * STA powersave frame queues, no more than the internal
+- * locking required.
+- */
++ /* STA powersave lock and frame queues */
++ spinlock_t ps_lock;
+ struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
+ struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
+ unsigned long driver_buffered_tids;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 81dca92176c7..d6a47e76efff 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -477,6 +477,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ sta->sta.addr, sta->sta.aid, ac);
+ if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
+ purge_old_ps_buffers(tx->local);
++
++ /* sync with ieee80211_sta_ps_deliver_wakeup */
++ spin_lock(&sta->ps_lock);
++ /*
++ * STA woke up the meantime and all the frames on ps_tx_buf have
++ * been queued to pending queue. No reordering can happen, go
++ * ahead and Tx the packet.
++ */
++ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
++ !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
++ spin_unlock(&sta->ps_lock);
++ return TX_CONTINUE;
++ }
++
+ if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
+ struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
+ ps_dbg(tx->sdata,
+@@ -490,6 +504,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ info->control.vif = &tx->sdata->vif;
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
++ spin_unlock(&sta->ps_lock);
+
+ if (!timer_pending(&local->sta_cleanup))
+ mod_timer(&local->sta_cleanup,
+diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
+index afba19cb6f87..a282fddf8b00 100644
+--- a/net/mac80211/wme.c
++++ b/net/mac80211/wme.c
+@@ -153,6 +153,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ return IEEE80211_AC_BE;
+ }
+
++ if (skb->protocol == sdata->control_port_protocol) {
++ skb->priority = 7;
++ return ieee80211_downgrade_queue(sdata, skb);
++ }
++
+ /* use the data classifier to determine what 802.1d tag the
+ * data frame has */
+ skb->priority = cfg80211_classify8021d(skb);
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index dfe3f36ff2aa..56ebe71cfe13 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -759,6 +759,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+ struct sctp_chunk auth;
+ sctp_ierror_t ret;
+
++ /* Make sure that we and the peer are AUTH capable */
++ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
++ kfree_skb(chunk->auth_chunk);
++ sctp_association_free(new_asoc);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++ }
++
+ /* set-up our fake chunk so that we can process it */
+ auth.skb = chunk->auth_chunk;
+ auth.asoc = chunk->asoc;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 83a1daa642bb..1d034825fcc3 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -853,6 +853,8 @@ static void xs_close(struct rpc_xprt *xprt)
+
+ dprintk("RPC: xs_close xprt %p\n", xprt);
+
++ cancel_delayed_work_sync(&transport->connect_worker);
++
+ xs_reset_transport(transport);
+ xprt->reestablish_timeout = 0;
+
+@@ -887,12 +889,8 @@ static void xs_local_destroy(struct rpc_xprt *xprt)
+ */
+ static void xs_destroy(struct rpc_xprt *xprt)
+ {
+- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+-
+ dprintk("RPC: xs_destroy xprt %p\n", xprt);
+
+- cancel_delayed_work_sync(&transport->connect_worker);
+-
+ xs_local_destroy(xprt);
+ }
+
+@@ -1834,6 +1832,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
+ }
+ #endif
+
++static void xs_dummy_setup_socket(struct work_struct *work)
++{
++}
++
+ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
+ struct sock_xprt *transport, int family, int type, int protocol)
+ {
+@@ -2673,6 +2675,9 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
+ xprt->ops = &xs_local_ops;
+ xprt->timeout = &xs_local_default_timeout;
+
++ INIT_DELAYED_WORK(&transport->connect_worker,
++ xs_dummy_setup_socket);
++
+ switch (sun->sun_family) {
+ case AF_LOCAL:
+ if (sun->sun_path[0] != '/') {
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index a427623ee574..d7c1ac621a90 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -161,9 +161,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+
+ static inline unsigned int unix_hash_fold(__wsum n)
+ {
+- unsigned int hash = (__force unsigned int)n;
++ unsigned int hash = (__force unsigned int)csum_fold(n);
+
+- hash ^= hash>>16;
+ hash ^= hash>>8;
+ return hash&(UNIX_HASH_SIZE-1);
+ }
+diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
+index 2906d520eea7..3be02b680268 100644
+--- a/net/xfrm/xfrm_ipcomp.c
++++ b/net/xfrm/xfrm_ipcomp.c
+@@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
+ const int plen = skb->len;
+ int dlen = IPCOMP_SCRATCH_SIZE;
+ u8 *start = skb->data;
+- const int cpu = get_cpu();
+- u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
+- struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
++ struct crypto_comp *tfm;
++ u8 *scratch;
+ int err;
+
+ local_bh_disable();
++ scratch = *this_cpu_ptr(ipcomp_scratches);
++ tfm = *this_cpu_ptr(ipcd->tfms);
+ err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
+- local_bh_enable();
+ if (err)
+ goto out;
+
+@@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
+ }
+
+ memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
+- put_cpu();
++ local_bh_enable();
+
+ pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
+ return 0;
+
+ out:
+- put_cpu();
++ local_bh_enable();
+ return err;
+ }
+
+diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
+index 30f119b1d1ec..820313a04d49 100644
+--- a/security/selinux/ss/ebitmap.c
++++ b/security/selinux/ss/ebitmap.c
+@@ -213,7 +213,12 @@ netlbl_import_failure:
+ }
+ #endif /* CONFIG_NETLABEL */
+
+-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
++/*
++ * Check to see if all the bits set in e2 are also set in e1. Optionally,
++ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
++ * last_e2bit.
++ */
++int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
+ {
+ struct ebitmap_node *n1, *n2;
+ int i;
+@@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
+
+ n1 = e1->node;
+ n2 = e2->node;
++
+ while (n1 && n2 && (n1->startbit <= n2->startbit)) {
+ if (n1->startbit < n2->startbit) {
+ n1 = n1->next;
+ continue;
+ }
+- for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
++ for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
++ i--; /* Skip trailing NULL map entries */
++ if (last_e2bit && (i >= 0)) {
++ u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
++ __fls(n2->maps[i]);
++ if (lastsetbit > last_e2bit)
++ return 0;
++ }
++
++ while (i >= 0) {
+ if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
+ return 0;
++ i--;
+ }
+
+ n1 = n1->next;
+diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
+index 922f8afa89dd..712c8a7b8e8b 100644
+--- a/security/selinux/ss/ebitmap.h
++++ b/security/selinux/ss/ebitmap.h
+@@ -16,7 +16,13 @@
+
+ #include <net/netlabel.h>
+
+-#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
++#ifdef CONFIG_64BIT
++#define EBITMAP_NODE_SIZE 64
++#else
++#define EBITMAP_NODE_SIZE 32
++#endif
++
++#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
+ / sizeof(unsigned long))
+ #define EBITMAP_UNIT_SIZE BITS_PER_LONG
+ #define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
+@@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
+
+ int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
+ int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
+-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
++int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
+ int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
+ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
+ void ebitmap_destroy(struct ebitmap *e);
+diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
+index 40de8d3f208e..c85bc1ec040c 100644
+--- a/security/selinux/ss/mls.c
++++ b/security/selinux/ss/mls.c
+@@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
+ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
+ {
+ struct level_datum *levdatum;
+- struct ebitmap_node *node;
+- int i;
+
+ if (!l->sens || l->sens > p->p_levels.nprim)
+ return 0;
+@@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
+ if (!levdatum)
+ return 0;
+
+- ebitmap_for_each_positive_bit(&l->cat, node, i) {
+- if (i > p->p_cats.nprim)
+- return 0;
+- if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
+- /*
+- * Category may not be associated with
+- * sensitivity.
+- */
+- return 0;
+- }
+- }
+-
+- return 1;
++ /*
++ * Return 1 iff all the bits set in l->cat are also be set in
++ * levdatum->level->cat and no bit in l->cat is larger than
++ * p->p_cats.nprim.
++ */
++ return ebitmap_contains(&levdatum->level->cat, &l->cat,
++ p->p_cats.nprim);
+ }
+
+ int mls_range_isvalid(struct policydb *p, struct mls_range *r)
+diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
+index 03bed52a8052..e93648774137 100644
+--- a/security/selinux/ss/mls_types.h
++++ b/security/selinux/ss/mls_types.h
+@@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
+ static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
+ {
+ return ((l1->sens >= l2->sens) &&
+- ebitmap_contains(&l1->cat, &l2->cat));
++ ebitmap_contains(&l1->cat, &l2->cat, 0));
+ }
+
+ #define mls_level_incomp(l1, l2) \
+diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
+index d0d7ac1e99d2..f62356c2f54c 100644
+--- a/sound/pci/hda/hda_eld.c
++++ b/sound/pci/hda/hda_eld.c
+@@ -478,10 +478,9 @@ static void hdmi_print_sad_info(int i, struct cea_sad *a,
+ snd_iprintf(buffer, "sad%d_profile\t\t%d\n", i, a->profile);
+ }
+
+-static void hdmi_print_eld_info(struct snd_info_entry *entry,
+- struct snd_info_buffer *buffer)
++void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
++ struct snd_info_buffer *buffer)
+ {
+- struct hdmi_eld *eld = entry->private_data;
+ struct parsed_hdmi_eld *e = &eld->info;
+ char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
+ int i;
+@@ -500,13 +499,10 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
+ [4 ... 7] = "reserved"
+ };
+
+- mutex_lock(&eld->lock);
+ snd_iprintf(buffer, "monitor_present\t\t%d\n", eld->monitor_present);
+ snd_iprintf(buffer, "eld_valid\t\t%d\n", eld->eld_valid);
+- if (!eld->eld_valid) {
+- mutex_unlock(&eld->lock);
++ if (!eld->eld_valid)
+ return;
+- }
+ snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
+ snd_iprintf(buffer, "connection_type\t\t%s\n",
+ eld_connection_type_names[e->conn_type]);
+@@ -528,13 +524,11 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
+
+ for (i = 0; i < e->sad_count; i++)
+ hdmi_print_sad_info(i, e->sad + i, buffer);
+- mutex_unlock(&eld->lock);
+ }
+
+-static void hdmi_write_eld_info(struct snd_info_entry *entry,
+- struct snd_info_buffer *buffer)
++void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
++ struct snd_info_buffer *buffer)
+ {
+- struct hdmi_eld *eld = entry->private_data;
+ struct parsed_hdmi_eld *e = &eld->info;
+ char line[64];
+ char name[64];
+@@ -542,7 +536,6 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
+ long long val;
+ unsigned int n;
+
+- mutex_lock(&eld->lock);
+ while (!snd_info_get_line(buffer, line, sizeof(line))) {
+ if (sscanf(line, "%s %llx", name, &val) != 2)
+ continue;
+@@ -594,38 +587,7 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
+ e->sad_count = n + 1;
+ }
+ }
+- mutex_unlock(&eld->lock);
+-}
+-
+-
+-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
+- int index)
+-{
+- char name[32];
+- struct snd_info_entry *entry;
+- int err;
+-
+- snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
+- err = snd_card_proc_new(codec->bus->card, name, &entry);
+- if (err < 0)
+- return err;
+-
+- snd_info_set_text_ops(entry, eld, hdmi_print_eld_info);
+- entry->c.text.write = hdmi_write_eld_info;
+- entry->mode |= S_IWUSR;
+- eld->proc_entry = entry;
+-
+- return 0;
+-}
+-
+-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld)
+-{
+- if (!codec->bus->shutdown && eld->proc_entry) {
+- snd_device_free(codec->bus->card, eld->proc_entry);
+- eld->proc_entry = NULL;
+- }
+ }
+-
+ #endif /* CONFIG_PROC_FS */
+
+ /* update PCM info based on ELD */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index f7e76619f7c9..ccf5eb6b3d37 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -169,6 +169,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
+ "{Intel, PPT},"
+ "{Intel, LPT},"
+ "{Intel, LPT_LP},"
++ "{Intel, WPT_LP},"
+ "{Intel, HPT},"
+ "{Intel, PBG},"
+ "{Intel, SCH},"
+@@ -568,6 +569,7 @@ enum {
+ AZX_DRIVER_ICH,
+ AZX_DRIVER_PCH,
+ AZX_DRIVER_SCH,
++ AZX_DRIVER_HDMI,
+ AZX_DRIVER_ATI,
+ AZX_DRIVER_ATIHDMI,
+ AZX_DRIVER_ATIHDMI_NS,
+@@ -647,6 +649,7 @@ static char *driver_short_names[] = {
+ [AZX_DRIVER_ICH] = "HDA Intel",
+ [AZX_DRIVER_PCH] = "HDA Intel PCH",
+ [AZX_DRIVER_SCH] = "HDA Intel MID",
++ [AZX_DRIVER_HDMI] = "HDA Intel HDMI",
+ [AZX_DRIVER_ATI] = "HDA ATI SB",
+ [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
+ [AZX_DRIVER_ATIHDMI_NS] = "HDA ATI HDMI",
+@@ -3994,13 +3997,16 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ /* Lynx Point-LP */
+ { PCI_DEVICE(0x8086, 0x9c21),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++ /* Wildcat Point-LP */
++ { PCI_DEVICE(0x8086, 0x9ca0),
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ /* Haswell */
+ { PCI_DEVICE(0x8086, 0x0a0c),
+- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
++ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ { PCI_DEVICE(0x8086, 0x0c0c),
+- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
++ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ { PCI_DEVICE(0x8086, 0x0d0c),
+- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
++ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ /* 5 Series/3400 */
+ { PCI_DEVICE(0x8086, 0x3b56),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+@@ -4080,6 +4086,22 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaa48),
+ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++ { PCI_DEVICE(0x1002, 0xaa50),
++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++ { PCI_DEVICE(0x1002, 0xaa58),
++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++ { PCI_DEVICE(0x1002, 0xaa60),
++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++ { PCI_DEVICE(0x1002, 0xaa68),
++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++ { PCI_DEVICE(0x1002, 0xaa80),
++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++ { PCI_DEVICE(0x1002, 0xaa88),
++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++ { PCI_DEVICE(0x1002, 0xaa90),
++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++ { PCI_DEVICE(0x1002, 0xaa98),
++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0x9902),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI },
+ { PCI_DEVICE(0x1002, 0xaaa0),
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index 2e7493ef8ee0..040d93324f32 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -751,10 +751,6 @@ struct hdmi_eld {
+ int eld_size;
+ char eld_buffer[ELD_MAX_SIZE];
+ struct parsed_hdmi_eld info;
+- struct mutex lock;
+-#ifdef CONFIG_PROC_FS
+- struct snd_info_entry *proc_entry;
+-#endif
+ };
+
+ int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid);
+@@ -767,20 +763,10 @@ void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
+ struct hda_pcm_stream *hinfo);
+
+ #ifdef CONFIG_PROC_FS
+-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
+- int index);
+-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld);
+-#else
+-static inline int snd_hda_eld_proc_new(struct hda_codec *codec,
+- struct hdmi_eld *eld,
+- int index)
+-{
+- return 0;
+-}
+-static inline void snd_hda_eld_proc_free(struct hda_codec *codec,
+- struct hdmi_eld *eld)
+-{
+-}
++void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
++ struct snd_info_buffer *buffer);
++void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
++ struct snd_info_buffer *buffer);
+ #endif
+
+ #define SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE 80
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 7fc15814c618..41ebdd8812b1 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -1085,6 +1085,7 @@ static int patch_ad1884(struct hda_codec *codec)
+ spec = codec->spec;
+
+ spec->gen.mixer_nid = 0x20;
++ spec->gen.mixer_merge_nid = 0x21;
+ spec->gen.beep_nid = 0x10;
+ set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 417e0fc2d119..adb374babd18 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -45,6 +45,7 @@ module_param(static_hdmi_pcm, bool, 0644);
+ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+
+ #define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
++#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
+
+ struct hdmi_spec_per_cvt {
+ hda_nid_t cvt_nid;
+@@ -63,9 +64,11 @@ struct hdmi_spec_per_pin {
+ hda_nid_t pin_nid;
+ int num_mux_nids;
+ hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
++ hda_nid_t cvt_nid;
+
+ struct hda_codec *codec;
+ struct hdmi_eld sink_eld;
++ struct mutex lock;
+ struct delayed_work work;
+ struct snd_kcontrol *eld_ctl;
+ int repoll_count;
+@@ -75,6 +78,9 @@ struct hdmi_spec_per_pin {
+ bool chmap_set; /* channel-map override by ALSA API? */
+ unsigned char chmap[8]; /* ALSA API channel-map */
+ char pcm_name[8]; /* filled in build_pcm callbacks */
++#ifdef CONFIG_PROC_FS
++ struct snd_info_entry *proc_entry;
++#endif
+ };
+
+ struct hdmi_spec {
+@@ -351,17 +357,19 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
+ {
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct hdmi_spec *spec = codec->spec;
++ struct hdmi_spec_per_pin *per_pin;
+ struct hdmi_eld *eld;
+ int pin_idx;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+
+ pin_idx = kcontrol->private_value;
+- eld = &get_pin(spec, pin_idx)->sink_eld;
++ per_pin = get_pin(spec, pin_idx);
++ eld = &per_pin->sink_eld;
+
+- mutex_lock(&eld->lock);
++ mutex_lock(&per_pin->lock);
+ uinfo->count = eld->eld_valid ? eld->eld_size : 0;
+- mutex_unlock(&eld->lock);
++ mutex_unlock(&per_pin->lock);
+
+ return 0;
+ }
+@@ -371,15 +379,17 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
+ {
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct hdmi_spec *spec = codec->spec;
++ struct hdmi_spec_per_pin *per_pin;
+ struct hdmi_eld *eld;
+ int pin_idx;
+
+ pin_idx = kcontrol->private_value;
+- eld = &get_pin(spec, pin_idx)->sink_eld;
++ per_pin = get_pin(spec, pin_idx);
++ eld = &per_pin->sink_eld;
+
+- mutex_lock(&eld->lock);
++ mutex_lock(&per_pin->lock);
+ if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
+- mutex_unlock(&eld->lock);
++ mutex_unlock(&per_pin->lock);
+ snd_BUG();
+ return -EINVAL;
+ }
+@@ -389,7 +399,7 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
+ if (eld->eld_valid)
+ memcpy(ucontrol->value.bytes.data, eld->eld_buffer,
+ eld->eld_size);
+- mutex_unlock(&eld->lock);
++ mutex_unlock(&per_pin->lock);
+
+ return 0;
+ }
+@@ -490,6 +500,68 @@ static void hdmi_set_channel_count(struct hda_codec *codec,
+ AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
+ }
+
++/*
++ * ELD proc files
++ */
++
++#ifdef CONFIG_PROC_FS
++static void print_eld_info(struct snd_info_entry *entry,
++ struct snd_info_buffer *buffer)
++{
++ struct hdmi_spec_per_pin *per_pin = entry->private_data;
++
++ mutex_lock(&per_pin->lock);
++ snd_hdmi_print_eld_info(&per_pin->sink_eld, buffer);
++ mutex_unlock(&per_pin->lock);
++}
++
++static void write_eld_info(struct snd_info_entry *entry,
++ struct snd_info_buffer *buffer)
++{
++ struct hdmi_spec_per_pin *per_pin = entry->private_data;
++
++ mutex_lock(&per_pin->lock);
++ snd_hdmi_write_eld_info(&per_pin->sink_eld, buffer);
++ mutex_unlock(&per_pin->lock);
++}
++
++static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
++{
++ char name[32];
++ struct hda_codec *codec = per_pin->codec;
++ struct snd_info_entry *entry;
++ int err;
++
++ snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
++ err = snd_card_proc_new(codec->bus->card, name, &entry);
++ if (err < 0)
++ return err;
++
++ snd_info_set_text_ops(entry, per_pin, print_eld_info);
++ entry->c.text.write = write_eld_info;
++ entry->mode |= S_IWUSR;
++ per_pin->proc_entry = entry;
++
++ return 0;
++}
++
++static void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
++{
++ if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) {
++ snd_device_free(per_pin->codec->bus->card, per_pin->proc_entry);
++ per_pin->proc_entry = NULL;
++ }
++}
++#else
++static inline int eld_proc_new(struct hdmi_spec_per_pin *per_pin,
++ int index)
++{
++ return 0;
++}
++static inline void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
++{
++}
++#endif
+
+ /*
+ * Channel mapping routines
+@@ -608,25 +680,35 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
+ bool non_pcm,
+ int ca)
+ {
++ struct cea_channel_speaker_allocation *ch_alloc;
+ int i;
+ int err;
+ int order;
+ int non_pcm_mapping[8];
+
+ order = get_channel_allocation_order(ca);
++ ch_alloc = &channel_allocations[order];
+
+ if (hdmi_channel_mapping[ca][1] == 0) {
+- for (i = 0; i < channel_allocations[order].channels; i++)
+- hdmi_channel_mapping[ca][i] = i | (i << 4);
+- for (; i < 8; i++)
+- hdmi_channel_mapping[ca][i] = 0xf | (i << 4);
++ int hdmi_slot = 0;
++ /* fill actual channel mappings in ALSA channel (i) order */
++ for (i = 0; i < ch_alloc->channels; i++) {
++ while (!ch_alloc->speakers[7 - hdmi_slot] && !WARN_ON(hdmi_slot >= 8))
++ hdmi_slot++; /* skip zero slots */
++
++ hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++;
++ }
++ /* fill the rest of the slots with ALSA channel 0xf */
++ for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++)
++ if (!ch_alloc->speakers[7 - hdmi_slot])
++ hdmi_channel_mapping[ca][i++] = (0xf << 4) | hdmi_slot;
+ }
+
+ if (non_pcm) {
+- for (i = 0; i < channel_allocations[order].channels; i++)
+- non_pcm_mapping[i] = i | (i << 4);
++ for (i = 0; i < ch_alloc->channels; i++)
++ non_pcm_mapping[i] = (i << 4) | i;
+ for (; i < 8; i++)
+- non_pcm_mapping[i] = 0xf | (i << 4);
++ non_pcm_mapping[i] = (0xf << 4) | i;
+ }
+
+ for (i = 0; i < 8; i++) {
+@@ -639,25 +721,31 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
+ break;
+ }
+ }
+-
+- hdmi_debug_channel_mapping(codec, pin_nid);
+ }
+
+ struct channel_map_table {
+ unsigned char map; /* ALSA API channel map position */
+- unsigned char cea_slot; /* CEA slot value */
+ int spk_mask; /* speaker position bit mask */
+ };
+
+ static struct channel_map_table map_tables[] = {
+- { SNDRV_CHMAP_FL, 0x00, FL },
+- { SNDRV_CHMAP_FR, 0x01, FR },
+- { SNDRV_CHMAP_RL, 0x04, RL },
+- { SNDRV_CHMAP_RR, 0x05, RR },
+- { SNDRV_CHMAP_LFE, 0x02, LFE },
+- { SNDRV_CHMAP_FC, 0x03, FC },
+- { SNDRV_CHMAP_RLC, 0x06, RLC },
+- { SNDRV_CHMAP_RRC, 0x07, RRC },
++ { SNDRV_CHMAP_FL, FL },
++ { SNDRV_CHMAP_FR, FR },
++ { SNDRV_CHMAP_RL, RL },
++ { SNDRV_CHMAP_RR, RR },
++ { SNDRV_CHMAP_LFE, LFE },
++ { SNDRV_CHMAP_FC, FC },
++ { SNDRV_CHMAP_RLC, RLC },
++ { SNDRV_CHMAP_RRC, RRC },
++ { SNDRV_CHMAP_RC, RC },
++ { SNDRV_CHMAP_FLC, FLC },
++ { SNDRV_CHMAP_FRC, FRC },
++ { SNDRV_CHMAP_FLH, FLH },
++ { SNDRV_CHMAP_FRH, FRH },
++ { SNDRV_CHMAP_FLW, FLW },
++ { SNDRV_CHMAP_FRW, FRW },
++ { SNDRV_CHMAP_TC, TC },
++ { SNDRV_CHMAP_FCH, FCH },
+ {} /* terminator */
+ };
+
+@@ -673,25 +761,19 @@ static int to_spk_mask(unsigned char c)
+ }
+
+ /* from ALSA API channel position to CEA slot */
+-static int to_cea_slot(unsigned char c)
++static int to_cea_slot(int ordered_ca, unsigned char pos)
+ {
+- struct channel_map_table *t = map_tables;
+- for (; t->map; t++) {
+- if (t->map == c)
+- return t->cea_slot;
+- }
+- return 0x0f;
+-}
++ int mask = to_spk_mask(pos);
++ int i;
+
+-/* from CEA slot to ALSA API channel position */
+-static int from_cea_slot(unsigned char c)
+-{
+- struct channel_map_table *t = map_tables;
+- for (; t->map; t++) {
+- if (t->cea_slot == c)
+- return t->map;
++ if (mask) {
++ for (i = 0; i < 8; i++) {
++ if (channel_allocations[ordered_ca].speakers[7 - i] == mask)
++ return i;
++ }
+ }
+- return 0;
++
++ return -1;
+ }
+
+ /* from speaker bit mask to ALSA API channel position */
+@@ -705,6 +787,14 @@ static int spk_to_chmap(int spk)
+ return 0;
+ }
+
++/* from CEA slot to ALSA API channel position */
++static int from_cea_slot(int ordered_ca, unsigned char slot)
++{
++ int mask = channel_allocations[ordered_ca].speakers[7 - slot];
++
++ return spk_to_chmap(mask);
++}
++
+ /* get the CA index corresponding to the given ALSA API channel map */
+ static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
+ {
+@@ -731,16 +821,27 @@ static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
+ /* set up the channel slots for the given ALSA API channel map */
+ static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
+ hda_nid_t pin_nid,
+- int chs, unsigned char *map)
++ int chs, unsigned char *map,
++ int ca)
+ {
+- int i;
+- for (i = 0; i < 8; i++) {
++ int ordered_ca = get_channel_allocation_order(ca);
++ int alsa_pos, hdmi_slot;
++ int assignments[8] = {[0 ... 7] = 0xf};
++
++ for (alsa_pos = 0; alsa_pos < chs; alsa_pos++) {
++
++ hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]);
++
++ if (hdmi_slot < 0)
++ continue; /* unassigned channel */
++
++ assignments[hdmi_slot] = alsa_pos;
++ }
++
++ for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) {
+ int val, err;
+- if (i < chs)
+- val = to_cea_slot(map[i]);
+- else
+- val = 0xf;
+- val |= (i << 4);
++
++ val = (assignments[hdmi_slot] << 4) | hdmi_slot;
+ err = snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_HDMI_CHAN_SLOT, val);
+ if (err)
+@@ -756,7 +857,7 @@ static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
+ int ordered_ca = get_channel_allocation_order(ca);
+ for (i = 0; i < 8; i++) {
+ if (i < channel_allocations[ordered_ca].channels)
+- map[i] = from_cea_slot(hdmi_channel_mapping[ca][i] & 0x0f);
++ map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
+ else
+ map[i] = 0;
+ }
+@@ -769,11 +870,13 @@ static void hdmi_setup_channel_mapping(struct hda_codec *codec,
+ {
+ if (!non_pcm && chmap_set) {
+ hdmi_manual_setup_channel_mapping(codec, pin_nid,
+- channels, map);
++ channels, map, ca);
+ } else {
+ hdmi_std_setup_channel_mapping(codec, pin_nid, non_pcm, ca);
+ hdmi_setup_fake_chmap(map, ca);
+ }
++
++ hdmi_debug_channel_mapping(codec, pin_nid);
+ }
+
+ /*
+@@ -903,8 +1006,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ {
+ hda_nid_t pin_nid = per_pin->pin_nid;
+ int channels = per_pin->channels;
++ int active_channels;
+ struct hdmi_eld *eld;
+- int ca;
++ int ca, ordered_ca;
+ union audio_infoframe ai;
+
+ if (!channels)
+@@ -926,6 +1030,11 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ if (ca < 0)
+ ca = 0;
+
++ ordered_ca = get_channel_allocation_order(ca);
++ active_channels = channel_allocations[ordered_ca].channels;
++
++ hdmi_set_channel_count(codec, per_pin->cvt_nid, active_channels);
++
+ memset(&ai, 0, sizeof(ai));
+ if (eld->info.conn_type == 0) { /* HDMI */
+ struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
+@@ -933,7 +1042,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ hdmi_ai->type = 0x84;
+ hdmi_ai->ver = 0x01;
+ hdmi_ai->len = 0x0a;
+- hdmi_ai->CC02_CT47 = channels - 1;
++ hdmi_ai->CC02_CT47 = active_channels - 1;
+ hdmi_ai->CA = ca;
+ hdmi_checksum_audio_infoframe(hdmi_ai);
+ } else if (eld->info.conn_type == 1) { /* DisplayPort */
+@@ -942,7 +1051,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ dp_ai->type = 0x84;
+ dp_ai->len = 0x1b;
+ dp_ai->ver = 0x11 << 2;
+- dp_ai->CC02_CT47 = channels - 1;
++ dp_ai->CC02_CT47 = active_channels - 1;
+ dp_ai->CA = ca;
+ } else {
+ snd_printd("HDMI: unknown connection type at pin %d\n",
+@@ -966,9 +1075,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ if (!hdmi_infoframe_uptodate(codec, pin_nid, ai.bytes,
+ sizeof(ai))) {
+ snd_printdd("hdmi_setup_audio_infoframe: "
+- "pin=%d channels=%d\n",
++ "pin=%d channels=%d ca=0x%02x\n",
+ pin_nid,
+- channels);
++ active_channels, ca);
+ hdmi_stop_infoframe_trans(codec, pin_nid);
+ hdmi_fill_audio_infoframe(codec, pin_nid,
+ ai.bytes, sizeof(ai));
+@@ -983,7 +1092,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ * Unsolicited events
+ */
+
+-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
++static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
+
+ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
+ {
+@@ -1009,8 +1118,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
+ if (pin_idx < 0)
+ return;
+
+- hdmi_present_sense(get_pin(spec, pin_idx), 1);
+- snd_hda_jack_report_sync(codec);
++ if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
++ snd_hda_jack_report_sync(codec);
+ }
+
+ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
+@@ -1160,7 +1269,16 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
+ return 0;
+ }
+
+-static void haswell_config_cvts(struct hda_codec *codec,
++/* Intel HDMI workaround to fix audio routing issue:
++ * For some Intel display codecs, pins share the same connection list.
++ * So a conveter can be selected by multiple pins and playback on any of these
++ * pins will generate sound on the external display, because audio flows from
++ * the same converter to the display pipeline. Also muting one pin may make
++ * other pins have no sound output.
++ * So this function assures that an assigned converter for a pin is not selected
++ * by any other pins.
++ */
++static void intel_not_share_assigned_cvt(struct hda_codec *codec,
+ hda_nid_t pin_nid, int mux_idx)
+ {
+ struct hdmi_spec *spec = codec->spec;
+@@ -1231,6 +1349,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ per_cvt = get_cvt(spec, cvt_idx);
+ /* Claim converter */
+ per_cvt->assigned = 1;
++ per_pin->cvt_nid = per_cvt->cvt_nid;
+ hinfo->nid = per_cvt->cvt_nid;
+
+ snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+@@ -1238,8 +1357,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ mux_idx);
+
+ /* configure unused pins to choose other converters */
+- if (is_haswell(codec))
+- haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
++ if (is_haswell(codec) || is_valleyview(codec))
++ intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
+
+ snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+
+@@ -1297,7 +1416,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
+ return 0;
+ }
+
+-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
++static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ {
+ struct hda_codec *codec = per_pin->codec;
+ struct hdmi_spec *spec = codec->spec;
+@@ -1312,10 +1431,15 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ * specification worked this way. Hence, we just ignore the data in
+ * the unsolicited response to avoid custom WARs.
+ */
+- int present = snd_hda_pin_sense(codec, pin_nid);
++ int present;
+ bool update_eld = false;
+ bool eld_changed = false;
++ bool ret;
+
++ snd_hda_power_up(codec);
++ present = snd_hda_pin_sense(codec, pin_nid);
++
++ mutex_lock(&per_pin->lock);
+ pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
+ if (pin_eld->monitor_present)
+ eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
+@@ -1345,11 +1469,10 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ queue_delayed_work(codec->bus->workq,
+ &per_pin->work,
+ msecs_to_jiffies(300));
+- return;
++ goto unlock;
+ }
+ }
+
+- mutex_lock(&pin_eld->lock);
+ if (pin_eld->eld_valid && !eld->eld_valid) {
+ update_eld = true;
+ eld_changed = true;
+@@ -1374,12 +1497,19 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ hdmi_setup_audio_infoframe(codec, per_pin,
+ per_pin->non_pcm);
+ }
+- mutex_unlock(&pin_eld->lock);
+
+ if (eld_changed)
+ snd_ctl_notify(codec->bus->card,
+ SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+ &per_pin->eld_ctl->id);
++ unlock:
++ if ((codec->vendor_id & 0xffff0000) == 0x10020000)
++ ret = true; /* AMD codecs create ELD by itself */
++ else
++ ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
++ mutex_unlock(&per_pin->lock);
++ snd_hda_power_down(codec);
++ return ret;
+ }
+
+ static void hdmi_repoll_eld(struct work_struct *work)
+@@ -1390,7 +1520,8 @@ static void hdmi_repoll_eld(struct work_struct *work)
+ if (per_pin->repoll_count++ > 6)
+ per_pin->repoll_count = 0;
+
+- hdmi_present_sense(per_pin, per_pin->repoll_count);
++ if (hdmi_present_sense(per_pin, per_pin->repoll_count))
++ snd_hda_jack_report_sync(per_pin->codec);
+ }
+
+ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
+@@ -1551,12 +1682,12 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ int pinctl;
+
+ non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
++ mutex_lock(&per_pin->lock);
+ per_pin->channels = substream->runtime->channels;
+ per_pin->setup = true;
+
+- hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
+-
+ hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
++ mutex_unlock(&per_pin->lock);
+
+ if (spec->dyn_pin_out) {
+ pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+@@ -1611,11 +1742,14 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ }
+
+ snd_hda_spdif_ctls_unassign(codec, pin_idx);
++
++ mutex_lock(&per_pin->lock);
+ per_pin->chmap_set = false;
+ memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
+
+ per_pin->setup = false;
+ per_pin->channels = 0;
++ mutex_unlock(&per_pin->lock);
+ }
+
+ return 0;
+@@ -1650,8 +1784,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+ struct hda_codec *codec = info->private_data;
+ struct hdmi_spec *spec = codec->spec;
+- const unsigned int valid_mask =
+- FL | FR | RL | RR | LFE | FC | RLC | RRC;
+ unsigned int __user *dst;
+ int chs, count = 0;
+
+@@ -1669,8 +1801,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ int chs_bytes = chs * 4;
+ if (cap->channels != chs)
+ continue;
+- if (cap->spk_mask & ~valid_mask)
+- continue;
+ if (size < 8)
+ return -ENOMEM;
+ if (put_user(SNDRV_CTL_TLVT_CHMAP_VAR, dst) ||
+@@ -1748,10 +1878,12 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ ca = hdmi_manual_channel_allocation(ARRAY_SIZE(chmap), chmap);
+ if (ca < 0)
+ return -EINVAL;
++ mutex_lock(&per_pin->lock);
+ per_pin->chmap_set = true;
+ memcpy(per_pin->chmap, chmap, sizeof(chmap));
+ if (prepared)
+ hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
++ mutex_unlock(&per_pin->lock);
+
+ return 0;
+ }
+@@ -1868,12 +2000,11 @@ static int generic_hdmi_init_per_pins(struct hda_codec *codec)
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+- struct hdmi_eld *eld = &per_pin->sink_eld;
+
+ per_pin->codec = codec;
+- mutex_init(&eld->lock);
++ mutex_init(&per_pin->lock);
+ INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
+- snd_hda_eld_proc_new(codec, eld, pin_idx);
++ eld_proc_new(per_pin, pin_idx);
+ }
+ return 0;
+ }
+@@ -1914,10 +2045,9 @@ static void generic_hdmi_free(struct hda_codec *codec)
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+- struct hdmi_eld *eld = &per_pin->sink_eld;
+
+ cancel_delayed_work(&per_pin->work);
+- snd_hda_eld_proc_free(codec, eld);
++ eld_proc_free(per_pin);
+ }
+
+ flush_workqueue(codec->bus->workq);
+@@ -2717,6 +2847,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
++{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
+ {} /* terminator */
+ };
+@@ -2771,6 +2902,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
+ MODULE_ALIAS("snd-hda-codec-id:80862806");
+ MODULE_ALIAS("snd-hda-codec-id:80862807");
+ MODULE_ALIAS("snd-hda-codec-id:80862880");
++MODULE_ALIAS("snd-hda-codec-id:80862882");
+ MODULE_ALIAS("snd-hda-codec-id:808629fb");
+
+ MODULE_LICENSE("GPL");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1be437f533a6..deddee9c1565 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3464,6 +3464,19 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+ alc_fixup_headset_mode(codec, fix, action);
+ }
+
++static void alc_no_shutup(struct hda_codec *codec)
++{
++}
++
++static void alc_fixup_no_shutup(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ struct alc_spec *spec = codec->spec;
++ spec->shutup = alc_no_shutup;
++ }
++}
++
+ static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -3674,6 +3687,7 @@ enum {
+ ALC269_FIXUP_HP_GPIO_LED,
+ ALC269_FIXUP_INV_DMIC,
+ ALC269_FIXUP_LENOVO_DOCK,
++ ALC269_FIXUP_NO_SHUTUP,
+ ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
+ ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+ ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -3840,6 +3854,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+ },
++ [ALC269_FIXUP_NO_SHUTUP] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_no_shutup,
++ },
+ [ALC269_FIXUP_LENOVO_DOCK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -4000,6 +4018,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
++ SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
+@@ -4089,6 +4108,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
+index 77acd790ea47..eb7ad7706205 100644
+--- a/sound/pci/oxygen/xonar_dg.c
++++ b/sound/pci/oxygen/xonar_dg.c
+@@ -294,6 +294,16 @@ static int output_switch_put(struct snd_kcontrol *ctl,
+ oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+ data->output_sel == 1 ? GPIO_HP_REAR : 0,
+ GPIO_HP_REAR);
++ oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
++ data->output_sel == 0 ?
++ OXYGEN_PLAY_MUTE01 :
++ OXYGEN_PLAY_MUTE23 |
++ OXYGEN_PLAY_MUTE45 |
++ OXYGEN_PLAY_MUTE67,
++ OXYGEN_PLAY_MUTE01 |
++ OXYGEN_PLAY_MUTE23 |
++ OXYGEN_PLAY_MUTE45 |
++ OXYGEN_PLAY_MUTE67);
+ }
+ mutex_unlock(&chip->mutex);
+ return changed;
+@@ -596,7 +606,7 @@ struct oxygen_model model_xonar_dg = {
+ .model_data_size = sizeof(struct dg),
+ .device_config = PLAYBACK_0_TO_I2S |
+ PLAYBACK_1_TO_SPDIF |
+- CAPTURE_0_FROM_I2S_2 |
++ CAPTURE_0_FROM_I2S_1 |
+ CAPTURE_1_FROM_SPDIF,
+ .dac_channels_pcm = 6,
+ .dac_channels_mixer = 0,
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 95558ef4a7a0..be4db47cb2d9 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ }
+ break;
+
++ case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
+ case USB_ID(0x046d, 0x0808):
+ case USB_ID(0x046d, 0x0809):
+ case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
diff --git a/1015_linux-3.12.16.patch b/1015_linux-3.12.16.patch
new file mode 100644
index 00000000..b8c1c7a2
--- /dev/null
+++ b/1015_linux-3.12.16.patch
@@ -0,0 +1,2135 @@
+diff --git a/Makefile b/Makefile
+index 517391a3093e..4aab3be88e9b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 5689c18c85f5..ceb4807ee8b2 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -120,11 +120,14 @@
+ /*
+ * 2nd stage PTE definitions for LPAE.
+ */
+-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
+-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
+-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
+-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
+-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
++#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
++#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
++#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
++#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
++#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
++
++#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
++#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+
+ /*
+ * Hyp-mode PL2 PTE definitions for LPAE.
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 0e1e2b3afa45..2a767d262c17 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -622,6 +622,7 @@ void __init dump_machine_table(void)
+ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
+ {
+ struct membank *bank = &meminfo.bank[meminfo.nr_banks];
++ u64 aligned_start;
+
+ if (meminfo.nr_banks >= NR_BANKS) {
+ printk(KERN_CRIT "NR_BANKS too low, "
+@@ -634,10 +635,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
+ * Size is appropriately rounded down, start is rounded up.
+ */
+ size -= start & ~PAGE_MASK;
+- bank->start = PAGE_ALIGN(start);
++ aligned_start = PAGE_ALIGN(start);
+
+-#ifndef CONFIG_ARM_LPAE
+- if (bank->start + size < bank->start) {
++#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
++ if (aligned_start > ULONG_MAX) {
++ printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
++ "32-bit physical address space\n", (long long)start);
++ return -EINVAL;
++ }
++
++ if (aligned_start + size > ULONG_MAX) {
+ printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
+ "32-bit physical address space\n", (long long)start);
+ /*
+@@ -645,10 +652,25 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
+ * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
+ * This means we lose a page after masking.
+ */
+- size = ULONG_MAX - bank->start;
++ size = ULONG_MAX - aligned_start;
+ }
+ #endif
+
++ if (aligned_start < PHYS_OFFSET) {
++ if (aligned_start + size <= PHYS_OFFSET) {
++ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
++ aligned_start, aligned_start + size);
++ return -EINVAL;
++ }
++
++ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
++ aligned_start, (u64)PHYS_OFFSET);
++
++ size -= PHYS_OFFSET - aligned_start;
++ aligned_start = PHYS_OFFSET;
++ }
++
++ bank->start = aligned_start;
+ bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
+
+ /*
+diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
+index 94a119a35af8..3c405f43ca39 100644
+--- a/arch/arm/mach-tegra/common.c
++++ b/arch/arm/mach-tegra/common.c
+@@ -22,6 +22,7 @@
+ #include <linux/io.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
++#include <linux/of.h>
+ #include <linux/reboot.h>
+ #include <linux/irqchip.h>
+ #include <linux/clk-provider.h>
+@@ -82,10 +83,20 @@ void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd)
+ static void __init tegra_init_cache(void)
+ {
+ #ifdef CONFIG_CACHE_L2X0
++ static const struct of_device_id pl310_ids[] __initconst = {
++ { .compatible = "arm,pl310-cache", },
++ {}
++ };
++
++ struct device_node *np;
+ int ret;
+ void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+ u32 aux_ctrl, cache_type;
+
++ np = of_find_matching_node(NULL, pl310_ids);
++ if (!np)
++ return;
++
+ cache_type = readl(p + L2X0_CACHE_TYPE);
+ aux_ctrl = (cache_type & 0x700) << (17-8);
+ aux_ctrl |= 0x7C400001;
+diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
+index d5a4e9ad8f0f..33eab618b3f1 100644
+--- a/arch/arm/mm/mm.h
++++ b/arch/arm/mm/mm.h
+@@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
+
+ struct mem_type {
+ pteval_t prot_pte;
++ pteval_t prot_pte_s2;
+ pmdval_t prot_l1;
+ pmdval_t prot_sect;
+ unsigned int domain;
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index b1d17eeb59b8..0222ba7603af 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -229,12 +229,16 @@ __setup("noalign", noalign_setup);
+ #endif /* ifdef CONFIG_CPU_CP15 / else */
+
+ #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
++#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
+ #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
+
+ static struct mem_type mem_types[] = {
+ [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+ L_PTE_SHARED,
++ .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
++ s2_policy(L_PTE_S2_MT_DEV_SHARED) |
++ L_PTE_SHARED,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
+ .domain = DOMAIN_IO,
+@@ -456,7 +460,8 @@ static void __init build_mem_type_table(void)
+ cp = &cache_policies[cachepolicy];
+ vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+ s2_pgprot = cp->pte_s2;
+- hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
++ hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
++ s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
+
+ /*
+ * ARMv6 and above have extended page tables.
+diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
+index 84fcc5018284..519c4b2c0687 100644
+--- a/arch/arm64/boot/dts/foundation-v8.dts
++++ b/arch/arm64/boot/dts/foundation-v8.dts
+@@ -6,6 +6,8 @@
+
+ /dts-v1/;
+
++/memreserve/ 0x80000000 0x00010000;
++
+ / {
+ model = "Foundation-v8A";
+ compatible = "arm,foundation-aarch64", "arm,vexpress";
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 965c28ff7b3b..82d95a7e9466 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -255,7 +255,7 @@ static inline int has_transparent_hugepage(void)
+ #define pgprot_noncached(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+ #define pgprot_writecombine(prot) \
+- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
++ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ #define pgprot_dmacoherent(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ #define __HAVE_PHYS_MEM_ACCESS_PROT
+diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
+index 89c047f9a971..70ba9d4ee978 100644
+--- a/arch/arm64/include/asm/syscall.h
++++ b/arch/arm64/include/asm/syscall.h
+@@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+ {
++ if (n == 0)
++ return;
++
+ if (i + n > SYSCALL_MAX_ARGS) {
+ unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
+ unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
+@@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+ {
++ if (n == 0)
++ return;
++
+ if (i + n > SYSCALL_MAX_ARGS) {
+ pr_warning("%s called with max args %d, handling only %d\n",
+ __func__, i + n, SYSCALL_MAX_ARGS);
+diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
+index d3e5e9bc8f94..e37db7f2a5fa 100644
+--- a/arch/powerpc/include/asm/eeh.h
++++ b/arch/powerpc/include/asm/eeh.h
+@@ -117,6 +117,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
+ return edev ? edev->pdev : NULL;
+ }
+
++/* Return values from eeh_ops::next_error */
++enum {
++ EEH_NEXT_ERR_NONE = 0,
++ EEH_NEXT_ERR_INF,
++ EEH_NEXT_ERR_FROZEN_PE,
++ EEH_NEXT_ERR_FENCED_PHB,
++ EEH_NEXT_ERR_DEAD_PHB,
++ EEH_NEXT_ERR_DEAD_IOC
++};
++
+ /*
+ * The struct is used to trace the registered EEH operation
+ * callback functions. Actually, those operation callback
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 36bed5a12750..d3a132c9127c 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -626,84 +626,90 @@ static void eeh_handle_special_event(void)
+ {
+ struct eeh_pe *pe, *phb_pe;
+ struct pci_bus *bus;
+- struct pci_controller *hose, *tmp;
++ struct pci_controller *hose;
+ unsigned long flags;
+- int rc = 0;
++ int rc;
+
+- /*
+- * The return value from next_error() has been classified as follows.
+- * It might be good to enumerate them. However, next_error() is only
+- * supported by PowerNV platform for now. So it would be fine to use
+- * integer directly:
+- *
+- * 4 - Dead IOC 3 - Dead PHB
+- * 2 - Fenced PHB 1 - Frozen PE
+- * 0 - No error found
+- *
+- */
+- rc = eeh_ops->next_error(&pe);
+- if (rc <= 0)
+- return;
+
+- switch (rc) {
+- case 4:
+- /* Mark all PHBs in dead state */
+- eeh_serialize_lock(&flags);
+- list_for_each_entry_safe(hose, tmp,
+- &hose_list, list_node) {
+- phb_pe = eeh_phb_pe_get(hose);
+- if (!phb_pe) continue;
+-
+- eeh_pe_state_mark(phb_pe,
+- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
++ do {
++ rc = eeh_ops->next_error(&pe);
++
++ switch (rc) {
++ case EEH_NEXT_ERR_DEAD_IOC:
++ /* Mark all PHBs in dead state */
++ eeh_serialize_lock(&flags);
++
++ /* Purge all events */
++ eeh_remove_event(NULL);
++
++ list_for_each_entry(hose, &hose_list, list_node) {
++ phb_pe = eeh_phb_pe_get(hose);
++ if (!phb_pe) continue;
++
++ eeh_pe_state_mark(phb_pe,
++ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
++ }
++
++ eeh_serialize_unlock(flags);
++
++ break;
++ case EEH_NEXT_ERR_FROZEN_PE:
++ case EEH_NEXT_ERR_FENCED_PHB:
++ case EEH_NEXT_ERR_DEAD_PHB:
++ /* Mark the PE in fenced state */
++ eeh_serialize_lock(&flags);
++
++ /* Purge all events of the PHB */
++ eeh_remove_event(pe);
++
++ if (rc == EEH_NEXT_ERR_DEAD_PHB)
++ eeh_pe_state_mark(pe,
++ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
++ else
++ eeh_pe_state_mark(pe,
++ EEH_PE_ISOLATED | EEH_PE_RECOVERING);
++
++ eeh_serialize_unlock(flags);
++
++ break;
++ case EEH_NEXT_ERR_NONE:
++ return;
++ default:
++ pr_warn("%s: Invalid value %d from next_error()\n",
++ __func__, rc);
++ return;
+ }
+- eeh_serialize_unlock(flags);
+-
+- /* Purge all events */
+- eeh_remove_event(NULL);
+- break;
+- case 3:
+- case 2:
+- case 1:
+- /* Mark the PE in fenced state */
+- eeh_serialize_lock(&flags);
+- if (rc == 3)
+- eeh_pe_state_mark(pe,
+- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+- else
+- eeh_pe_state_mark(pe,
+- EEH_PE_ISOLATED | EEH_PE_RECOVERING);
+- eeh_serialize_unlock(flags);
+-
+- /* Purge all events of the PHB */
+- eeh_remove_event(pe);
+- break;
+- default:
+- pr_err("%s: Invalid value %d from next_error()\n",
+- __func__, rc);
+- return;
+- }
+
+- /*
+- * For fenced PHB and frozen PE, it's handled as normal
+- * event. We have to remove the affected PHBs for dead
+- * PHB and IOC
+- */
+- if (rc == 2 || rc == 1)
+- eeh_handle_normal_event(pe);
+- else {
+- list_for_each_entry_safe(hose, tmp,
+- &hose_list, list_node) {
+- phb_pe = eeh_phb_pe_get(hose);
+- if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD))
+- continue;
+-
+- bus = eeh_pe_bus_get(phb_pe);
+- /* Notify all devices that they're about to go down. */
+- eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
+- pcibios_remove_pci_devices(bus);
++ /*
++ * For fenced PHB and frozen PE, it's handled as normal
++ * event. We have to remove the affected PHBs for dead
++ * PHB and IOC
++ */
++ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
++ rc == EEH_NEXT_ERR_FENCED_PHB) {
++ eeh_handle_normal_event(pe);
++ } else {
++ list_for_each_entry(hose, &hose_list, list_node) {
++ phb_pe = eeh_phb_pe_get(hose);
++ if (!phb_pe ||
++ !(phb_pe->state & EEH_PE_PHB_DEAD))
++ continue;
++
++ /* Notify all devices to be down */
++ bus = eeh_pe_bus_get(phb_pe);
++ eeh_pe_dev_traverse(pe,
++ eeh_report_failure, NULL);
++ pcibios_remove_pci_devices(bus);
++ }
+ }
+- }
++
++ /*
++ * If we have detected dead IOC, we needn't proceed
++ * any more since all PHBs would have been removed
++ */
++ if (rc == EEH_NEXT_ERR_DEAD_IOC)
++ break;
++ } while (rc != EEH_NEXT_ERR_NONE);
+ }
+
+ /**
+diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
+index b7eb5d4f4c89..227c7fe4067f 100644
+--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
++++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
+@@ -766,12 +766,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
+ */
+ static int ioda_eeh_next_error(struct eeh_pe **pe)
+ {
+- struct pci_controller *hose, *tmp;
++ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ u64 frozen_pe_no;
+ u16 err_type, severity;
+ long rc;
+- int ret = 1;
++ int ret = EEH_NEXT_ERR_NONE;
+
+ /*
+ * While running here, it's safe to purge the event queue.
+@@ -781,7 +781,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
+ eeh_remove_event(NULL);
+ opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
+
+- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
++ list_for_each_entry(hose, &hose_list, list_node) {
+ /*
+ * If the subordinate PCI buses of the PHB has been
+ * removed, we needn't take care of it any more.
+@@ -820,19 +820,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
+ switch (err_type) {
+ case OPAL_EEH_IOC_ERROR:
+ if (severity == OPAL_EEH_SEV_IOC_DEAD) {
+- list_for_each_entry_safe(hose, tmp,
+- &hose_list, list_node) {
++ list_for_each_entry(hose, &hose_list,
++ list_node) {
+ phb = hose->private_data;
+ phb->eeh_state |= PNV_EEH_STATE_REMOVED;
+ }
+
+ pr_err("EEH: dead IOC detected\n");
+- ret = 4;
+- goto out;
++ ret = EEH_NEXT_ERR_DEAD_IOC;
+ } else if (severity == OPAL_EEH_SEV_INF) {
+ pr_info("EEH: IOC informative error "
+ "detected\n");
+ ioda_eeh_hub_diag(hose);
++ ret = EEH_NEXT_ERR_NONE;
+ }
+
+ break;
+@@ -844,21 +844,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
+ pr_err("EEH: dead PHB#%x detected\n",
+ hose->global_number);
+ phb->eeh_state |= PNV_EEH_STATE_REMOVED;
+- ret = 3;
+- goto out;
++ ret = EEH_NEXT_ERR_DEAD_PHB;
+ } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
+ if (ioda_eeh_get_phb_pe(hose, pe))
+ break;
+
+ pr_err("EEH: fenced PHB#%x detected\n",
+ hose->global_number);
+- ret = 2;
+- goto out;
++ ret = EEH_NEXT_ERR_FENCED_PHB;
+ } else if (severity == OPAL_EEH_SEV_INF) {
+ pr_info("EEH: PHB#%x informative error "
+ "detected\n",
+ hose->global_number);
+ ioda_eeh_phb_diag(hose);
++ ret = EEH_NEXT_ERR_NONE;
+ }
+
+ break;
+@@ -868,13 +867,23 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
+
+ pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
+ (*pe)->addr, (*pe)->phb->global_number);
+- ret = 1;
+- goto out;
++ ret = EEH_NEXT_ERR_FROZEN_PE;
++ break;
++ default:
++ pr_warn("%s: Unexpected error type %d\n",
++ __func__, err_type);
+ }
++
++ /*
++ * If we have no errors on the specific PHB or only
++ * informative error there, we continue poking it.
++ * Otherwise, we need actions to be taken by upper
++ * layer.
++ */
++ if (ret > EEH_NEXT_ERR_INF)
++ break;
+ }
+
+- ret = 0;
+-out:
+ return ret;
+ }
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index dce0df8150df..74dd12952ea8 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2664,6 +2664,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
+ int emulate = 0;
+ gfn_t pseudo_gfn;
+
++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
++ return 0;
++
+ for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
+ if (iterator.level == level) {
+ mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
+@@ -2834,6 +2837,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
+ bool ret = false;
+ u64 spte = 0ull;
+
++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
++ return false;
++
+ if (!page_fault_can_be_fast(error_code))
+ return false;
+
+@@ -3229,6 +3235,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
+ struct kvm_shadow_walk_iterator iterator;
+ u64 spte = 0ull;
+
++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
++ return spte;
++
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
+ if (!is_shadow_present_pte(spte))
+@@ -4557,6 +4566,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
+ u64 spte;
+ int nr_sptes = 0;
+
++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
++ return nr_sptes;
++
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+ sptes[iterator.level-1] = spte;
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index ad75d77999d0..cba218a2f08d 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+ if (FNAME(gpte_changed)(vcpu, gw, top_level))
+ goto out_gpte_changed;
+
++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
++ goto out_gpte_changed;
++
+ for (shadow_walk_init(&it, vcpu, addr);
+ shadow_walk_okay(&it) && it.level > gw->level;
+ shadow_walk_next(&it)) {
+@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+ */
+ mmu_topup_memory_caches(vcpu);
+
++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
++ WARN_ON(1);
++ return;
++ }
++
+ spin_lock(&vcpu->kvm->mmu_lock);
+ for_each_shadow_entry(vcpu, gva, iterator) {
+ level = iterator.level;
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 6128914ee873..59181e653826 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7294,8 +7294,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ free_vpid(vmx);
+- free_nested(vmx);
+ free_loaded_vmcs(vmx->loaded_vmcs);
++ free_nested(vmx);
+ kfree(vmx->guest_msrs);
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vmx);
+diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
+index 877b9a1b2152..01495755701b 100644
+--- a/arch/x86/net/bpf_jit.S
++++ b/arch/x86/net/bpf_jit.S
+@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh:
+ push %r9; \
+ push SKBDATA; \
+ /* rsi already has offset */ \
+- mov $SIZE,%ecx; /* size */ \
++ mov $SIZE,%edx; /* size */ \
+ call bpf_internal_load_pointer_neg_helper; \
+ test %rax,%rax; \
+ pop SKBDATA; \
+diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
+index 587e0202a70b..e5590953630e 100644
+--- a/drivers/clocksource/vf_pit_timer.c
++++ b/drivers/clocksource/vf_pit_timer.c
+@@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
+
+ static unsigned int pit_read_sched_clock(void)
+ {
+- return __raw_readl(clksrc_base + PITCVAL);
++ return ~__raw_readl(clksrc_base + PITCVAL);
+ }
+
+ static int __init pit_clocksource_init(unsigned long rate)
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index fe4a7d16e261..c077df094ae5 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -201,6 +201,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int bios_reserved = 0;
+
++#ifdef CONFIG_INTEL_IOMMU
++ if (intel_iommu_gfx_mapped) {
++ DRM_INFO("DMAR active, disabling use of stolen memory\n");
++ return 0;
++ }
++#endif
++
+ if (dev_priv->gtt.stolen_size == 0)
+ return 0;
+
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index 49f6cc0f9919..351805362290 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -574,17 +574,6 @@ int lg4ff_init(struct hid_device *hid)
+ if (error)
+ return error;
+
+- /* Check if autocentering is available and
+- * set the centering force to zero by default */
+- if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
+- if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
+- dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+- else
+- dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+-
+- dev->ff->set_autocenter(dev, 0);
+- }
+-
+ /* Get private driver data */
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+@@ -605,6 +594,17 @@ int lg4ff_init(struct hid_device *hid)
+ entry->max_range = lg4ff_devices[i].max_range;
+ entry->set_range = lg4ff_devices[i].set_range;
+
++ /* Check if autocentering is available and
++ * set the centering force to zero by default */
++ if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
++ if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
++ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
++ else
++ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
++
++ dev->ff->set_autocenter(dev, 0);
++ }
++
+ /* Create sysfs interface */
+ error = device_create_file(&hid->dev, &dev_attr_range);
+ if (error)
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 6a6dd5cd7833..d0a0034d1734 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -313,13 +313,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
+ hid_hw_close(hidraw->hid);
+ wake_up_interruptible(&hidraw->wait);
+ }
++ device_destroy(hidraw_class,
++ MKDEV(hidraw_major, hidraw->minor));
+ } else {
+ --hidraw->open;
+ }
+ if (!hidraw->open) {
+ if (!hidraw->exist) {
+- device_destroy(hidraw_class,
+- MKDEV(hidraw_major, hidraw->minor));
+ hidraw_table[hidraw->minor] = NULL;
+ kfree(hidraw);
+ } else {
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index ba93ef85652d..09c71293ab4b 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -1652,7 +1652,6 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
+ static void
+ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+ {
+- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct iscsi_conn *conn = isert_conn->conn;
+
+ if (isert_conn->post_recv_buf_count)
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 597e9b8fc18d..ef1cf52f8bb9 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -486,6 +486,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
+ unsigned char *packet = psmouse->packet;
+
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
++ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ input_mt_report_pointer_emulation(dev, true);
+ input_sync(dev);
+ }
+@@ -984,6 +985,44 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ }
+
+ /*
++ * Advertise INPUT_PROP_BUTTONPAD for clickpads. The testing of bit 12 in
++ * fw_version for this is based on the following fw_version & caps table:
++ *
++ * Laptop-model: fw_version: caps: buttons:
++ * Acer S3 0x461f00 10, 13, 0e clickpad
++ * Acer S7-392 0x581f01 50, 17, 0d clickpad
++ * Acer V5-131 0x461f02 01, 16, 0c clickpad
++ * Acer V5-551 0x461f00 ? clickpad
++ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
++ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
++ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
++ * Asus UX31 0x361f00 20, 15, 0e clickpad
++ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
++ * Avatar AVIU-145A2 0x361f00 ? clickpad
++ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
++ * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
++ * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
++ * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad
++ * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad
++ * Samsung NP900X3E-A02 0x575f03 ? clickpad
++ * Samsung NP-QX410 0x851b00 19, 14, 0c clickpad
++ * Samsung RC512 0x450f00 08, 15, 0c 2 hw buttons
++ * Samsung RF710 0x450f00 ? 2 hw buttons
++ * System76 Pangolin 0x250f01 ? 2 hw buttons
++ * (*) + 3 trackpoint buttons
++ */
++static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
++{
++ struct input_dev *dev = psmouse->dev;
++ struct elantech_data *etd = psmouse->private;
++
++ if (etd->fw_version & 0x001000) {
++ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
++ __clear_bit(BTN_RIGHT, dev->keybit);
++ }
++}
++
++/*
+ * Set the appropriate event bits for the input subsystem
+ */
+ static int elantech_set_input_params(struct psmouse *psmouse)
+@@ -1026,6 +1065,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
+ __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
+ /* fall through */
+ case 3:
++ if (etd->hw_version == 3)
++ elantech_set_buttonpad_prop(psmouse);
+ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
+ input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
+ if (etd->reports_pressure) {
+@@ -1047,9 +1088,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
+ */
+ psmouse_warn(psmouse, "couldn't query resolution data.\n");
+ }
+- /* v4 is clickpad, with only one button. */
+- __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+- __clear_bit(BTN_RIGHT, dev->keybit);
++ elantech_set_buttonpad_prop(psmouse);
+ __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
+ /* For X to recognize me as touchpad. */
+ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
+diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
+index e53416a4d7f3..a6debe13d5b9 100644
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -304,7 +304,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
+ struct usb_device *dev = interface_to_usbdev(intf);
+ char limit = 0;
+ /* result has to be defined as int for some devices */
+- int result = 0;
++ int result = 0, touch_max = 0;
+ int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
+ unsigned char *report;
+
+@@ -351,7 +351,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
+ if (usage == WCM_DESKTOP) {
+ if (finger) {
+ features->device_type = BTN_TOOL_FINGER;
+-
++ /* touch device at least supports one touch point */
++ touch_max = 1;
+ switch (features->type) {
+ case TABLETPC2FG:
+ features->pktlen = WACOM_PKGLEN_TPC2FG;
+@@ -504,6 +505,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
+ }
+
+ out:
++ if (!features->touch_max && touch_max)
++ features->touch_max = touch_max;
+ result = 0;
+ kfree(report);
+ return result;
+diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
+index 6386ced910c2..91c694ba42f4 100644
+--- a/drivers/media/pci/cx18/cx18-driver.c
++++ b/drivers/media/pci/cx18/cx18-driver.c
+@@ -327,13 +327,16 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
+ struct i2c_client *c;
+ u8 eedata[256];
+
++ memset(tv, 0, sizeof(*tv));
++
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
++ if (!c)
++ return;
+
+ strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
+ c->adapter = &cx->i2c_adap[0];
+ c->addr = 0xa0 >> 1;
+
+- memset(tv, 0, sizeof(*tv));
+ if (tveeprom_read(c, eedata, sizeof(eedata)))
+ goto ret;
+
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
+index 20e345d9fe8f..a1c641e18362 100644
+--- a/drivers/media/usb/dvb-usb/cxusb.c
++++ b/drivers/media/usb/dvb-usb/cxusb.c
+@@ -149,6 +149,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+ {
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
++ int ret;
+ int i;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+@@ -173,7 +174,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (1 + msg[i].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[i].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ obuf[0] = 0;
+ obuf[1] = msg[i].len;
+@@ -193,12 +195,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (3 + msg[i].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[i].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ if (1 + msg[i + 1].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[i + 1].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ obuf[0] = msg[i].len;
+ obuf[1] = msg[i+1].len;
+@@ -223,7 +227,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (2 + msg[i].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[i].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ obuf[0] = msg[i].addr;
+ obuf[1] = msg[i].len;
+@@ -237,8 +242,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ }
+ }
+
++ if (i == num)
++ ret = num;
++ else
++ ret = -EREMOTEIO;
++
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+- return i == num ? num : -EREMOTEIO;
++ return ret;
+ }
+
+ static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index 71b22f5a05ce..4170a45d17e0 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -301,6 +301,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
+ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
+ {
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
++ int ret;
+
+ if (!d)
+ return -ENODEV;
+@@ -316,7 +317,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
+ if (2 + msg[1].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[1].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+
+ obuf[0] = msg[0].addr << 1;
+@@ -340,7 +342,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
+ if (2 + msg[0].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[1].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+
+ obuf[0] = msg[0].addr << 1;
+@@ -357,7 +360,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
+ if (2 + msg[0].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[1].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+
+ obuf[0] = msg[0].addr << 1;
+@@ -386,15 +390,17 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
+
+ break;
+ }
++ ret = num;
+
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+- return num;
++ return ret;
+ }
+
+ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
+ {
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+- int len, i, j;
++ int len, i, j, ret;
+
+ if (!d)
+ return -ENODEV;
+@@ -430,7 +436,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
+ if (2 + msg[j].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[j].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+
+ dw210x_op_rw(d->udev, 0xc3,
+@@ -466,7 +473,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
+ if (2 + msg[j].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[j].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+
+ obuf[0] = msg[j].addr << 1;
+@@ -481,15 +489,18 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
+ }
+
+ }
++ ret = num;
+
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+- return num;
++ return ret;
+ }
+
+ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+ {
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
++ int ret;
+ int i;
+
+ if (!d)
+@@ -506,7 +517,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (2 + msg[1].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[1].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+@@ -530,7 +542,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (2 + msg[0].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[0].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+@@ -556,9 +569,11 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ msg[i].flags == 0 ? ">>>" : "<<<");
+ debug_dump(msg[i].buf, msg[i].len, deb_xfer);
+ }
++ ret = num;
+
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+- return num;
++ return ret;
+ }
+
+ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+@@ -566,7 +581,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ {
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct usb_device *udev;
+- int len, i, j;
++ int len, i, j, ret;
+
+ if (!d)
+ return -ENODEV;
+@@ -618,7 +633,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (msg[j].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[j].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+
+ dw210x_op_rw(d->udev, 0x91, 0, 0,
+@@ -652,7 +668,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (2 + msg[j].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[j].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+
+ obuf[0] = msg[j + 1].len;
+@@ -671,7 +688,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (2 + msg[j].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[j].len);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto unlock;
+ }
+ obuf[0] = msg[j].len + 1;
+ obuf[1] = (msg[j].addr << 1);
+@@ -685,9 +703,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ }
+ }
+ }
++ ret = num;
+
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+- return num;
++ return ret;
+ }
+
+ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 5d41aee69d16..6c0fd8e0f9bf 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -523,10 +523,21 @@ retry:
+ return rc;
+ }
+
++static u64 ibmveth_encode_mac_addr(u8 *mac)
++{
++ int i;
++ u64 encoded = 0;
++
++ for (i = 0; i < ETH_ALEN; i++)
++ encoded = (encoded << 8) | mac[i];
++
++ return encoded;
++}
++
+ static int ibmveth_open(struct net_device *netdev)
+ {
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+- u64 mac_address = 0;
++ u64 mac_address;
+ int rxq_entries = 1;
+ unsigned long lpar_rc;
+ int rc;
+@@ -580,8 +591,7 @@ static int ibmveth_open(struct net_device *netdev)
+ adapter->rx_queue.num_slots = rxq_entries;
+ adapter->rx_queue.toggle = 1;
+
+- memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
+- mac_address = mac_address >> 16;
++ mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
+
+ rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_queue.queue_len;
+@@ -1184,8 +1194,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
+ /* add the addresses to the filter table */
+ netdev_for_each_mc_addr(ha, netdev) {
+ /* add the multicast address to the filter table */
+- unsigned long mcast_addr = 0;
+- memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
++ u64 mcast_addr;
++ mcast_addr = ibmveth_encode_mac_addr(ha->addr);
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastAddFilter,
+ mcast_addr);
+@@ -1369,9 +1379,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
+
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+
+- adapter->mac_addr = 0;
+- memcpy(&adapter->mac_addr, mac_addr_p, 6);
+-
+ netdev->irq = dev->irq;
+ netdev->netdev_ops = &ibmveth_netdev_ops;
+ netdev->ethtool_ops = &netdev_ethtool_ops;
+@@ -1380,7 +1387,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features |= netdev->hw_features;
+
+- memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
++ memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
+diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
+index 84066bafe057..2c636cbf1341 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.h
++++ b/drivers/net/ethernet/ibm/ibmveth.h
+@@ -139,7 +139,6 @@ struct ibmveth_adapter {
+ struct napi_struct napi;
+ struct net_device_stats stats;
+ unsigned int mcastFilterSize;
+- unsigned long mac_addr;
+ void * buffer_list_addr;
+ void * filter_list_addr;
+ dma_addr_t buffer_list_dma;
+diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
+index ada6e210279f..48f0b06f4448 100644
+--- a/drivers/net/ethernet/intel/e100.c
++++ b/drivers/net/ethernet/intel/e100.c
+@@ -3036,7 +3036,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ *enable_wake = false;
+ }
+
+- pci_disable_device(pdev);
++ pci_clear_master(pdev);
+ }
+
+ static int __e100_power_off(struct pci_dev *pdev, bool wake)
+diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
+index 34d00f5771fe..b6b601cebb9e 100644
+--- a/drivers/net/ethernet/sfc/efx.h
++++ b/drivers/net/ethernet/sfc/efx.h
+@@ -67,6 +67,9 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+ #define EFX_RXQ_MIN_ENT 128U
+ #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+
++#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
++ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
++
+ /* Filters */
+
+ /**
+diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
+index 5b471cf5c323..3b2356bc6fba 100644
+--- a/drivers/net/ethernet/sfc/ethtool.c
++++ b/drivers/net/ethernet/sfc/ethtool.c
+@@ -582,7 +582,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
+- ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
++ ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+ }
+@@ -595,7 +595,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+- ring->tx_pending > EFX_MAX_DMAQ_SIZE)
++ ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
+ return -EINVAL;
+
+ if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 6a32ef9d63ae..2b0aab130205 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1531,7 +1531,7 @@ static int emac_dev_open(struct net_device *ndev)
+ struct device *emac_dev = &ndev->dev;
+ u32 cnt;
+ struct resource *res;
+- int ret;
++ int q, m, ret;
+ int i = 0;
+ int k = 0;
+ struct emac_priv *priv = netdev_priv(ndev);
+@@ -1566,8 +1566,7 @@ static int emac_dev_open(struct net_device *ndev)
+
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
+ for (i = res->start; i <= res->end; i++) {
+- if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
+- 0, ndev->name, ndev))
++ if (request_irq(i, emac_irq, 0, ndev->name, ndev))
+ goto rollback;
+ }
+ k++;
+@@ -1640,7 +1639,15 @@ static int emac_dev_open(struct net_device *ndev)
+
+ rollback:
+
+- dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
++ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed");
++
++ for (q = k; k >= 0; k--) {
++ for (m = i; m >= res->start; m--)
++ free_irq(m, ndev);
++ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
++ m = res->end;
++ }
++
+ ret = -EBUSY;
+ err:
+ pm_runtime_put(&priv->pdev->dev);
+@@ -1658,6 +1665,9 @@ err:
+ */
+ static int emac_dev_stop(struct net_device *ndev)
+ {
++ struct resource *res;
++ int i = 0;
++ int irq_num;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+
+@@ -1673,6 +1683,13 @@ static int emac_dev_stop(struct net_device *ndev)
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
++ /* Free IRQ */
++ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
++ for (irq_num = res->start; irq_num <= res->end; irq_num++)
++ free_irq(irq_num, priv->ndev);
++ i++;
++ }
++
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+
+diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+index 0fad98b85f60..eee2ef6ce248 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
++++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+@@ -596,8 +596,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+
+ mutex_lock(&mvm->mutex);
+
+- /* Rssi update while not associated ?! */
+- if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
++ /*
++ * Rssi update while not associated - can happen since the statistics
++ * are handled asynchronously
++ */
++ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+ goto out_unlock;
+
+ /* No open connection - reports should be disabled */
+diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
+index f95de0d16216..1de59b0f8fa8 100644
+--- a/drivers/net/wireless/p54/txrx.c
++++ b/drivers/net/wireless/p54/txrx.c
+@@ -587,7 +587,7 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
+ chan = priv->curchan;
+ if (chan) {
+ struct survey_info *survey = &priv->survey[chan->hw_value];
+- survey->noise = clamp_t(s8, priv->noise, -128, 127);
++ survey->noise = clamp(priv->noise, -128, 127);
+ survey->channel_time = priv->survey_raw.active;
+ survey->channel_time_tx = priv->survey_raw.tx;
+ survey->channel_time_busy = priv->survey_raw.tx +
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index d97fbf4eb65b..ea83084cb7d9 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1806,8 +1806,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
+
+ trace_regulator_disable_complete(rdev_get_name(rdev));
+
+- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+- NULL);
+ return 0;
+ }
+
+@@ -1831,6 +1829,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
+ rdev_err(rdev, "failed to disable\n");
+ return ret;
+ }
++ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
++ NULL);
+ }
+
+ rdev->use_count = 0;
+@@ -1883,20 +1883,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
+ {
+ int ret = 0;
+
+- /* force disable */
+- if (rdev->desc->ops->disable) {
+- /* ah well, who wants to live forever... */
+- ret = rdev->desc->ops->disable(rdev);
+- if (ret < 0) {
+- rdev_err(rdev, "failed to force disable\n");
+- return ret;
+- }
+- /* notify other consumers that power has been forced off */
+- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+- REGULATOR_EVENT_DISABLE, NULL);
++ ret = _regulator_do_disable(rdev);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to force disable\n");
++ return ret;
+ }
+
+- return ret;
++ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
++ REGULATOR_EVENT_DISABLE, NULL);
++
++ return 0;
+ }
+
+ /**
+@@ -3569,8 +3565,6 @@ int regulator_suspend_finish(void)
+
+ mutex_lock(&regulator_list_mutex);
+ list_for_each_entry(rdev, &regulator_list, list) {
+- struct regulator_ops *ops = rdev->desc->ops;
+-
+ mutex_lock(&rdev->mutex);
+ if (rdev->use_count > 0 || rdev->constraints->always_on) {
+ error = _regulator_do_enable(rdev);
+@@ -3579,12 +3573,10 @@ int regulator_suspend_finish(void)
+ } else {
+ if (!has_full_constraints)
+ goto unlock;
+- if (!ops->disable)
+- goto unlock;
+ if (!_regulator_is_enabled(rdev))
+ goto unlock;
+
+- error = ops->disable(rdev);
++ error = _regulator_do_disable(rdev);
+ if (error)
+ ret = error;
+ }
+@@ -3774,7 +3766,7 @@ static int __init regulator_init_complete(void)
+ ops = rdev->desc->ops;
+ c = rdev->constraints;
+
+- if (!ops->disable || (c && c->always_on))
++ if (c && c->always_on)
+ continue;
+
+ mutex_lock(&rdev->mutex);
+@@ -3795,7 +3787,7 @@ static int __init regulator_init_complete(void)
+ /* We log since this may kill the system if it
+ * goes wrong. */
+ rdev_info(rdev, "disabling\n");
+- ret = ops->disable(rdev);
++ ret = _regulator_do_disable(rdev);
+ if (ret != 0) {
+ rdev_err(rdev, "couldn't disable: %d\n", ret);
+ }
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 451bf99582ff..846d5c6609d8 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -2978,12 +2978,12 @@ static int dasd_alloc_queue(struct dasd_block *block)
+
+ elevator_exit(block->request_queue->elevator);
+ block->request_queue->elevator = NULL;
++ mutex_lock(&block->request_queue->sysfs_lock);
+ rc = elevator_init(block->request_queue, "deadline");
+- if (rc) {
++ if (rc)
+ blk_cleanup_queue(block->request_queue);
+- return rc;
+- }
+- return 0;
++ mutex_unlock(&block->request_queue->sysfs_lock);
++ return rc;
+ }
+
+ /*
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 73f5208714a4..1af67a214d33 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ "QUIRK: Resetting on resume");
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
++ pdev->device == 0x0015 &&
++ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
++ pdev->subsystem_device == 0xc0cd)
++ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ if (pdev->vendor == PCI_VENDOR_ID_VIA)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index a53651743d4d..5a5fb98edb8a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5779,21 +5779,20 @@ struct nfs_release_lockowner_data {
+ struct nfs4_lock_state *lsp;
+ struct nfs_server *server;
+ struct nfs_release_lockowner_args args;
+- struct nfs4_sequence_args seq_args;
+- struct nfs4_sequence_res seq_res;
++ struct nfs_release_lockowner_res res;
+ };
+
+ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
+ {
+ struct nfs_release_lockowner_data *data = calldata;
+ nfs40_setup_sequence(data->server,
+- &data->seq_args, &data->seq_res, task);
++ &data->args.seq_args, &data->res.seq_res, task);
+ }
+
+ static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
+ {
+ struct nfs_release_lockowner_data *data = calldata;
+- nfs40_sequence_done(task, &data->seq_res);
++ nfs40_sequence_done(task, &data->res.seq_res);
+ }
+
+ static void nfs4_release_lockowner_release(void *calldata)
+@@ -5822,7 +5821,6 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
+ data = kmalloc(sizeof(*data), GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
+- nfs4_init_sequence(&data->seq_args, &data->seq_res, 0);
+ data->lsp = lsp;
+ data->server = server;
+ data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
+@@ -5830,6 +5828,8 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
+ data->args.lock_owner.s_dev = server->s_dev;
+
+ msg.rpc_argp = &data->args;
++ msg.rpc_resp = &data->res;
++ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
+ rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
+ return 0;
+ }
+diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
+index 106a83570630..9fa215473b1d 100644
+--- a/fs/proc/proc_devtree.c
++++ b/fs/proc/proc_devtree.c
+@@ -235,6 +235,7 @@ void __init proc_device_tree_init(void)
+ return;
+ root = of_find_node_by_path("/");
+ if (root == NULL) {
++ remove_proc_entry("device-tree", NULL);
+ pr_debug("/proc/device-tree: can't find root\n");
+ return;
+ }
+diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
+index 7c1420bb1dce..6ade97de7a85 100644
+--- a/include/linux/ceph/messenger.h
++++ b/include/linux/ceph/messenger.h
+@@ -157,7 +157,7 @@ struct ceph_msg {
+ bool front_is_vmalloc;
+ bool more_to_follow;
+ bool needs_out_seq;
+- int front_max;
++ int front_alloc_len;
+ unsigned long ack_stamp; /* tx: when we were acked */
+
+ struct ceph_msgpool *pool;
+diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
+index 8f47625a0661..4fb6a8938957 100644
+--- a/include/linux/ceph/osd_client.h
++++ b/include/linux/ceph/osd_client.h
+@@ -138,6 +138,7 @@ struct ceph_osd_request {
+ __le64 *r_request_pool;
+ void *r_request_pgid;
+ __le32 *r_request_attempts;
++ bool r_paused;
+ struct ceph_eversion *r_request_reassert_version;
+
+ int r_result;
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 3561d305b1e0..7b3a119c51c2 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -169,6 +169,8 @@ struct cgroup {
+ *
+ * The ID of the root cgroup is always 0, and a new cgroup
+ * will be assigned with a smallest available ID.
++ *
++ * Allocating/Removing ID must be protected by cgroup_mutex.
+ */
+ int id;
+
+diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
+index 5eaa746735ff..20aebdbab9a4 100644
+--- a/include/linux/ftrace_event.h
++++ b/include/linux/ftrace_event.h
+@@ -325,10 +325,6 @@ enum {
+ FILTER_TRACE_FN,
+ };
+
+-#define EVENT_STORAGE_SIZE 128
+-extern struct mutex event_storage_mutex;
+-extern char event_storage[EVENT_STORAGE_SIZE];
+-
+ extern int trace_event_raw_init(struct ftrace_event_call *call);
+ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
+ const char *name, int offset, int size,
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index ea9e076a91bf..2b307018979d 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -467,9 +467,14 @@ struct nfs_lockt_res {
+ };
+
+ struct nfs_release_lockowner_args {
++ struct nfs4_sequence_args seq_args;
+ struct nfs_lowner lock_owner;
+ };
+
++struct nfs_release_lockowner_res {
++ struct nfs4_sequence_res seq_res;
++};
++
+ struct nfs4_delegreturnargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fhandle;
+diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
+index 712ea36067ab..645d749d3c9c 100644
+--- a/include/trace/ftrace.h
++++ b/include/trace/ftrace.h
+@@ -303,15 +303,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
+ #undef __array
+ #define __array(type, item, len) \
+ do { \
+- mutex_lock(&event_storage_mutex); \
++ char *type_str = #type"["__stringify(len)"]"; \
+ BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
+- snprintf(event_storage, sizeof(event_storage), \
+- "%s[%d]", #type, len); \
+- ret = trace_define_field(event_call, event_storage, #item, \
++ ret = trace_define_field(event_call, type_str, #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item), \
+ is_signed_type(type), FILTER_OTHER); \
+- mutex_unlock(&event_storage_mutex); \
+ if (ret) \
+ return ret; \
+ } while (0);
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index c4f8bc79d075..1c204fdb85d8 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -4410,16 +4410,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ rcu_assign_pointer(cgrp->name, name);
+
+ /*
+- * Temporarily set the pointer to NULL, so idr_find() won't return
+- * a half-baked cgroup.
+- */
+- cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+- if (cgrp->id < 0) {
+- err = -ENOMEM;
+- goto err_free_name;
+- }
+-
+- /*
+ * Only live parents can have children. Note that the liveliness
+ * check isn't strictly necessary because cgroup_mkdir() and
+ * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
+@@ -4428,7 +4418,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ */
+ if (!cgroup_lock_live_group(parent)) {
+ err = -ENODEV;
+- goto err_free_id;
++ goto err_free_name;
+ }
+
+ /* Grab a reference on the superblock so the hierarchy doesn't
+@@ -4438,6 +4428,16 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ * fs */
+ atomic_inc(&sb->s_active);
+
++ /*
++ * Temporarily set the pointer to NULL, so idr_find() won't return
++ * a half-baked cgroup.
++ */
++ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
++ if (cgrp->id < 0) {
++ err = -ENOMEM;
++ goto err_unlock;
++ }
++
+ init_cgroup_housekeeping(cgrp);
+
+ dentry->d_fsdata = cgrp;
+@@ -4544,11 +4544,11 @@ err_free_all:
+ ss->css_free(css);
+ }
+ }
++ idr_remove(&root->cgroup_idr, cgrp->id);
++err_unlock:
+ mutex_unlock(&cgroup_mutex);
+ /* Release the reference count that we took on the superblock */
+ deactivate_super(sb);
+-err_free_id:
+- idr_remove(&root->cgroup_idr, cgrp->id);
+ err_free_name:
+ kfree(rcu_dereference_raw(cgrp->name));
+ err_free_cgrp:
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index b4e8500afdb3..c59896c65ac3 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1080,7 +1080,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ next_seq = log_next_seq;
+
+ len = 0;
+- prev = 0;
+ while (len >= 0 && seq < next_seq) {
+ struct printk_log *msg = log_from_idx(idx);
+ int textlen;
+@@ -2790,7 +2789,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ next_idx = idx;
+
+ l = 0;
+- prev = 0;
+ while (seq < dumper->next_seq) {
+ struct printk_log *msg = log_from_idx(idx);
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index b03b1f897b5e..bc1bd20f7942 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -27,12 +27,6 @@
+
+ DEFINE_MUTEX(event_mutex);
+
+-DEFINE_MUTEX(event_storage_mutex);
+-EXPORT_SYMBOL_GPL(event_storage_mutex);
+-
+-char event_storage[EVENT_STORAGE_SIZE];
+-EXPORT_SYMBOL_GPL(event_storage);
+-
+ LIST_HEAD(ftrace_events);
+ static LIST_HEAD(ftrace_common_fields);
+
+diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
+index d21a74670088..d7d0b50b1b70 100644
+--- a/kernel/trace/trace_export.c
++++ b/kernel/trace/trace_export.c
+@@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \
+ #undef __array
+ #define __array(type, item, len) \
+ do { \
++ char *type_str = #type"["__stringify(len)"]"; \
+ BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
+- mutex_lock(&event_storage_mutex); \
+- snprintf(event_storage, sizeof(event_storage), \
+- "%s[%d]", #type, len); \
+- ret = trace_define_field(event_call, event_storage, #item, \
++ ret = trace_define_field(event_call, type_str, #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item), \
+ is_signed_type(type), filter_type); \
+- mutex_unlock(&event_storage_mutex); \
+ if (ret) \
+ return ret; \
+ } while (0);
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 4a5df7b1cc9f..464303f61730 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -3126,7 +3126,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
+ INIT_LIST_HEAD(&m->data);
+
+ /* front */
+- m->front_max = front_len;
+ if (front_len) {
+ if (front_len > PAGE_CACHE_SIZE) {
+ m->front.iov_base = __vmalloc(front_len, flags,
+@@ -3143,7 +3142,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
+ } else {
+ m->front.iov_base = NULL;
+ }
+- m->front.iov_len = front_len;
++ m->front_alloc_len = m->front.iov_len = front_len;
+
+ dout("ceph_msg_new %p front %d\n", m, front_len);
+ return m;
+@@ -3301,8 +3300,8 @@ EXPORT_SYMBOL(ceph_msg_last_put);
+
+ void ceph_msg_dump(struct ceph_msg *msg)
+ {
+- pr_debug("msg_dump %p (front_max %d length %zd)\n", msg,
+- msg->front_max, msg->data_length);
++ pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
++ msg->front_alloc_len, msg->data_length);
+ print_hex_dump(KERN_DEBUG, "header: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ &msg->hdr, sizeof(msg->hdr), true);
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index 1fe25cd29d0e..2ac9ef35110b 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -152,7 +152,7 @@ static int __open_session(struct ceph_mon_client *monc)
+ /* initiatiate authentication handshake */
+ ret = ceph_auth_build_hello(monc->auth,
+ monc->m_auth->front.iov_base,
+- monc->m_auth->front_max);
++ monc->m_auth->front_alloc_len);
+ __send_prepared_auth_request(monc, ret);
+ } else {
+ dout("open_session mon%d already open\n", monc->cur_mon);
+@@ -196,7 +196,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
+ int num;
+
+ p = msg->front.iov_base;
+- end = p + msg->front_max;
++ end = p + msg->front_alloc_len;
+
+ num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
+ ceph_encode_32(&p, num);
+@@ -897,7 +897,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
+ ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
+ msg->front.iov_len,
+ monc->m_auth->front.iov_base,
+- monc->m_auth->front_max);
++ monc->m_auth->front_alloc_len);
+ if (ret < 0) {
+ monc->client->auth_err = ret;
+ wake_up_all(&monc->client->auth_wq);
+@@ -939,7 +939,7 @@ static int __validate_auth(struct ceph_mon_client *monc)
+ return 0;
+
+ ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
+- monc->m_auth->front_max);
++ monc->m_auth->front_alloc_len);
+ if (ret <= 0)
+ return ret; /* either an error, or no need to authenticate */
+ __send_prepared_auth_request(monc, ret);
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 2b4b32aaa893..e6b2db68b4fa 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1232,6 +1232,22 @@ void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
+ EXPORT_SYMBOL(ceph_osdc_set_request_linger);
+
+ /*
++ * Returns whether a request should be blocked from being sent
++ * based on the current osdmap and osd_client settings.
++ *
++ * Caller should hold map_sem for read.
++ */
++static bool __req_should_be_paused(struct ceph_osd_client *osdc,
++ struct ceph_osd_request *req)
++{
++ bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
++ bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
++ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
++ return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) ||
++ (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr);
++}
++
++/*
+ * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
+ * (as needed), and set the request r_osd appropriately. If there is
+ * no up osd, set r_osd to NULL. Move the request to the appropriate list
+@@ -1248,6 +1264,7 @@ static int __map_request(struct ceph_osd_client *osdc,
+ int acting[CEPH_PG_MAX_SIZE];
+ int o = -1, num = 0;
+ int err;
++ bool was_paused;
+
+ dout("map_request %p tid %lld\n", req, req->r_tid);
+ err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
+@@ -1264,12 +1281,18 @@ static int __map_request(struct ceph_osd_client *osdc,
+ num = err;
+ }
+
++ was_paused = req->r_paused;
++ req->r_paused = __req_should_be_paused(osdc, req);
++ if (was_paused && !req->r_paused)
++ force_resend = 1;
++
+ if ((!force_resend &&
+ req->r_osd && req->r_osd->o_osd == o &&
+ req->r_sent >= req->r_osd->o_incarnation &&
+ req->r_num_pg_osds == num &&
+ memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
+- (req->r_osd == NULL && o == -1))
++ (req->r_osd == NULL && o == -1) ||
++ req->r_paused)
+ return 0; /* no change */
+
+ dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
+@@ -1613,14 +1636,17 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
+ *
+ * Caller should hold map_sem for read.
+ */
+-static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
++static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
++ bool force_resend_writes)
+ {
+ struct ceph_osd_request *req, *nreq;
+ struct rb_node *p;
+ int needmap = 0;
+ int err;
++ bool force_resend_req;
+
+- dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
++ dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
++ force_resend_writes ? " (force resend writes)" : "");
+ mutex_lock(&osdc->request_mutex);
+ for (p = rb_first(&osdc->requests); p; ) {
+ req = rb_entry(p, struct ceph_osd_request, r_node);
+@@ -1645,7 +1671,10 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+ continue;
+ }
+
+- err = __map_request(osdc, req, force_resend);
++ force_resend_req = force_resend ||
++ (force_resend_writes &&
++ req->r_flags & CEPH_OSD_FLAG_WRITE);
++ err = __map_request(osdc, req, force_resend_req);
+ if (err < 0)
+ continue; /* error */
+ if (req->r_osd == NULL) {
+@@ -1665,7 +1694,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+ r_linger_item) {
+ dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
+
+- err = __map_request(osdc, req, force_resend);
++ err = __map_request(osdc, req,
++ force_resend || force_resend_writes);
+ dout("__map_request returned %d\n", err);
+ if (err == 0)
+ continue; /* no change and no osd was specified */
+@@ -1707,6 +1737,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+ struct ceph_osdmap *newmap = NULL, *oldmap;
+ int err;
+ struct ceph_fsid fsid;
++ bool was_full;
+
+ dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
+ p = msg->front.iov_base;
+@@ -1720,6 +1751,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+
+ down_write(&osdc->map_sem);
+
++ was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
++
+ /* incremental maps */
+ ceph_decode_32_safe(&p, end, nr_maps, bad);
+ dout(" %d inc maps\n", nr_maps);
+@@ -1744,7 +1777,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+ ceph_osdmap_destroy(osdc->osdmap);
+ osdc->osdmap = newmap;
+ }
+- kick_requests(osdc, 0);
++ was_full = was_full ||
++ ceph_osdmap_flag(osdc->osdmap,
++ CEPH_OSDMAP_FULL);
++ kick_requests(osdc, 0, was_full);
+ } else {
+ dout("ignoring incremental map %u len %d\n",
+ epoch, maplen);
+@@ -1787,7 +1823,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+ skipped_map = 1;
+ ceph_osdmap_destroy(oldmap);
+ }
+- kick_requests(osdc, skipped_map);
++ was_full = was_full ||
++ ceph_osdmap_flag(osdc->osdmap,
++ CEPH_OSDMAP_FULL);
++ kick_requests(osdc, skipped_map, was_full);
+ }
+ p += maplen;
+ nr_maps--;
+@@ -1804,7 +1843,9 @@ done:
+ * we find out when we are no longer full and stop returning
+ * ENOSPC.
+ */
+- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
++ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
++ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
++ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
+ ceph_monc_request_next_osdmap(&osdc->client->monc);
+
+ mutex_lock(&osdc->request_mutex);
+@@ -2454,7 +2495,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
+ struct ceph_osd_client *osdc = osd->o_osdc;
+ struct ceph_msg *m;
+ struct ceph_osd_request *req;
+- int front = le32_to_cpu(hdr->front_len);
++ int front_len = le32_to_cpu(hdr->front_len);
+ int data_len = le32_to_cpu(hdr->data_len);
+ u64 tid;
+
+@@ -2474,12 +2515,13 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
+ req->r_reply, req->r_reply->con);
+ ceph_msg_revoke_incoming(req->r_reply);
+
+- if (front > req->r_reply->front.iov_len) {
++ if (front_len > req->r_reply->front_alloc_len) {
+ pr_warning("get_reply front %d > preallocated %d (%u#%llu)\n",
+- front, (int)req->r_reply->front.iov_len,
++ front_len, req->r_reply->front_alloc_len,
+ (unsigned int)con->peer_name.type,
+ le64_to_cpu(con->peer_name.num));
+- m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
++ m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
++ false);
+ if (!m)
+ goto out;
+ ceph_msg_put(req->r_reply);
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index 90e521fde35f..c1bb9be00fa0 100644
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -41,9 +41,9 @@ create_package() {
+ parisc*)
+ debarch=hppa ;;
+ mips*)
+- debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el) ;;
++ debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
+ arm*)
+- debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el) ;;
++ debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el || true) ;;
+ *)
+ echo "" >&2
+ echo "** ** ** WARNING ** ** **" >&2
+@@ -62,7 +62,7 @@ create_package() {
+ fi
+
+ # Create the package
+- dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
++ dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
+ dpkg --build "$pdir" ..
+ }
+
+@@ -288,15 +288,14 @@ mkdir -p "$destdir"
+ (cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
+ ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
+ rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
+-arch=$(dpkg --print-architecture)
+
+ cat <<EOF >> debian/control
+
+ Package: $kernel_headers_packagename
+ Provides: linux-headers, linux-headers-2.6
+-Architecture: $arch
+-Description: Linux kernel headers for $KERNELRELEASE on $arch
+- This package provides kernel header files for $KERNELRELEASE on $arch
++Architecture: any
++Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
++ This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
+ .
+ This is useful for people who need to build external modules
+ EOF
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index d9af6387f37c..dac296a7faad 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -133,7 +133,7 @@ static int snd_compr_open(struct inode *inode, struct file *f)
+ kfree(data);
+ }
+ snd_card_unref(compr->card);
+- return 0;
++ return ret;
+ }
+
+ static int snd_compr_free(struct inode *inode, struct file *f)
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ccf5eb6b3d37..b5c4c2e4360b 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -4007,6 +4007,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ { PCI_DEVICE(0x8086, 0x0d0c),
+ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
++ /* Broadwell */
++ { PCI_DEVICE(0x8086, 0x160c),
++ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ /* 5 Series/3400 */
+ { PCI_DEVICE(0x8086, 0x3b56),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index adb374babd18..23e0bc6d6568 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -45,6 +45,9 @@ module_param(static_hdmi_pcm, bool, 0644);
+ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+
+ #define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
++#define is_broadwell(codec) ((codec)->vendor_id == 0x80862808)
++#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec))
++
+ #define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
+
+ struct hdmi_spec_per_cvt {
+@@ -1014,7 +1017,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ if (!channels)
+ return;
+
+- if (is_haswell(codec))
++ if (is_haswell_plus(codec))
+ snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE,
+ AMP_OUT_UNMUTE);
+@@ -1196,7 +1199,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
+ int pinctl;
+ int new_pinctl = 0;
+
+- if (is_haswell(codec))
++ if (is_haswell_plus(codec))
+ haswell_verify_D0(codec, cvt_nid, pin_nid);
+
+ if (snd_hda_query_pin_caps(codec, pin_nid) & AC_PINCAP_HBR) {
+@@ -1357,7 +1360,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ mux_idx);
+
+ /* configure unused pins to choose other converters */
+- if (is_haswell(codec) || is_valleyview(codec))
++ if (is_haswell_plus(codec) || is_valleyview(codec))
+ intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
+
+ snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+@@ -1543,7 +1546,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
+ if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
+ return 0;
+
+- if (is_haswell(codec))
++ if (is_haswell_plus(codec))
+ intel_haswell_fixup_connect_list(codec, pin_nid);
+
+ pin_idx = spec->num_pins;
+@@ -2169,7 +2172,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
+ codec->spec = spec;
+ hdmi_array_init(spec, 4);
+
+- if (is_haswell(codec)) {
++ if (is_haswell_plus(codec)) {
+ intel_haswell_enable_all_pins(codec, true);
+ intel_haswell_fixup_enable_dp12(codec);
+ }
+@@ -2180,7 +2183,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
+ return -EINVAL;
+ }
+ codec->patch_ops = generic_hdmi_patch_ops;
+- if (is_haswell(codec)) {
++ if (is_haswell_plus(codec)) {
+ codec->patch_ops.set_power_state = haswell_set_power_state;
+ codec->dp_mst = true;
+ }
+@@ -2846,6 +2849,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x80862805, .name = "CougarPoint HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
++{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
+@@ -2901,6 +2905,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862804");
+ MODULE_ALIAS("snd-hda-codec-id:80862805");
+ MODULE_ALIAS("snd-hda-codec-id:80862806");
+ MODULE_ALIAS("snd-hda-codec-id:80862807");
++MODULE_ALIAS("snd-hda-codec-id:80862808");
+ MODULE_ALIAS("snd-hda-codec-id:80862880");
+ MODULE_ALIAS("snd-hda-codec-id:80862882");
+ MODULE_ALIAS("snd-hda-codec-id:808629fb");
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 5f728808eed4..8bddf3f20a5e 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -336,6 +336,7 @@ static bool max98090_readable_register(struct device *dev, unsigned int reg)
+ case M98090_REG_RECORD_TDM_SLOT:
+ case M98090_REG_SAMPLE_RATE:
+ case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
++ case M98090_REG_REVISION_ID:
+ return true;
+ default:
+ return false;
diff --git a/1016_linux-3.12.17.patch b/1016_linux-3.12.17.patch
new file mode 100644
index 00000000..289ba79e
--- /dev/null
+++ b/1016_linux-3.12.17.patch
@@ -0,0 +1,1896 @@
+diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
+index d29dea0f3232..7b0dcdb57173 100644
+--- a/Documentation/i2c/busses/i2c-i801
++++ b/Documentation/i2c/busses/i2c-i801
+@@ -25,6 +25,7 @@ Supported adapters:
+ * Intel Avoton (SOC)
+ * Intel Wellsburg (PCH)
+ * Intel Coleto Creek (PCH)
++ * Intel Wildcat Point-LP (PCH)
+ Datasheets: Publicly available at the Intel website
+
+ On Intel Patsburg and later chipsets, both the normal host SMBus controller
+diff --git a/Makefile b/Makefile
+index 4aab3be88e9b..fbd1ee8afea8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
+index 5be8e472f57d..65fc3979c2f1 100644
+--- a/arch/s390/kernel/vdso32/clock_gettime.S
++++ b/arch/s390/kernel/vdso32/clock_gettime.S
+@@ -46,18 +46,13 @@ __kernel_clock_gettime:
+ jnm 3f
+ a %r0,__VDSO_TK_MULT(%r5)
+ 3: alr %r0,%r2
+- al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
+- al %r1,__VDSO_XTIME_NSEC+4(%r5)
+- brc 12,4f
+- ahi %r0,1
+-4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
++ al %r0,__VDSO_WTOM_NSEC(%r5)
+ al %r1,__VDSO_WTOM_NSEC+4(%r5)
+ brc 12,5f
+ ahi %r0,1
+ 5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srdl %r0,0(%r2) /* >> tk->shift */
+- l %r2,__VDSO_XTIME_SEC+4(%r5)
+- al %r2,__VDSO_WTOM_SEC+4(%r5)
++ l %r2,__VDSO_WTOM_SEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 1b
+ basr %r5,0
+diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
+index 0add1072ba30..c09cda31c894 100644
+--- a/arch/s390/kernel/vdso64/clock_gettime.S
++++ b/arch/s390/kernel/vdso64/clock_gettime.S
+@@ -35,13 +35,11 @@ __kernel_clock_gettime:
+ jnz 0b
+ stck 48(%r15) /* Store TOD clock */
+ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+- lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
+- alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
++ lg %r0,__VDSO_WTOM_SEC(%r5)
+ lg %r1,48(%r15)
+ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
+- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
+- alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
++ alg %r1,__VDSO_WTOM_NSEC(%r5)
+ srlg %r1,%r1,0(%r2) /* >> tk->shift */
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 0b
+diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
+index 50226c4b86ed..e52947f80e68 100644
+--- a/arch/x86/crypto/sha256_ssse3_glue.c
++++ b/arch/x86/crypto/sha256_ssse3_glue.c
+@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
+ /* allow AVX to override SSSE3, it's a little faster */
+ if (avx_usable()) {
+ #ifdef CONFIG_AS_AVX2
+- if (boot_cpu_has(X86_FEATURE_AVX2))
++ if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
+ sha256_transform_asm = sha256_transform_rorx;
+ else
+ #endif
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 5ad38ad07890..bbc8b12fa443 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b)
+ return a.pte == b.pte;
+ }
+
+-static inline int pteval_present(pteval_t pteval)
+-{
+- /*
+- * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
+- * way clearly states that the intent is that protnone and numa
+- * hinting ptes are considered present for the purposes of
+- * pagetable operations like zapping, protection changes, gup etc.
+- */
+- return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
+-}
+-
+ static inline int pte_present(pte_t a)
+ {
+- return pteval_present(pte_flags(a));
++ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
++ _PAGE_NUMA);
+ }
+
+ #define pte_accessible pte_accessible
+diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
+index d35f24e231cd..1306d117967d 100644
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
+
+ extern const struct cpumask *cpu_coregroup_mask(int cpu);
+
+-#ifdef ENABLE_TOPO_DEFINES
+ #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
+ #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
++
++#ifdef ENABLE_TOPO_DEFINES
+ #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
+ #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+ #endif
+diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
+index 266ca912f62e..5ecf65117e6f 100644
+--- a/arch/x86/mm/srat.c
++++ b/arch/x86/mm/srat.c
+@@ -42,15 +42,25 @@ static __init inline int srat_disabled(void)
+ return acpi_numa < 0;
+ }
+
+-/* Callback for SLIT parsing */
++/*
++ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
++ * I/O localities since SRAT does not list them. I/O localities are
++ * not supported at this point.
++ */
+ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
+ {
+ int i, j;
+
+- for (i = 0; i < slit->locality_count; i++)
+- for (j = 0; j < slit->locality_count; j++)
++ for (i = 0; i < slit->locality_count; i++) {
++ if (pxm_to_node(i) == NUMA_NO_NODE)
++ continue;
++ for (j = 0; j < slit->locality_count; j++) {
++ if (pxm_to_node(j) == NUMA_NO_NODE)
++ continue;
+ numa_set_distance(pxm_to_node(i), pxm_to_node(j),
+ slit->entry[slit->locality_count * i + j]);
++ }
++ }
+ }
+
+ /* Callback for Proximity Domain -> x2APIC mapping */
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index d5af43af64dc..fdc3ba28ca38 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+ /* Assume pteval_t is equivalent to all the other *val_t types. */
+ static pteval_t pte_mfn_to_pfn(pteval_t val)
+ {
+- if (pteval_present(val)) {
++ if (val & _PAGE_PRESENT) {
+ unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+
+@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
+
+ static pteval_t pte_pfn_to_mfn(pteval_t val)
+ {
+- if (pteval_present(val)) {
++ if (val & _PAGE_PRESENT) {
+ unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+ pteval_t flags = val & PTE_FLAGS_MASK;
+ unsigned long mfn;
+diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
+index f86dc5fcc4ad..d9351ee19f18 100644
+--- a/crypto/asymmetric_keys/x509_parser.h
++++ b/crypto/asymmetric_keys/x509_parser.h
+@@ -9,6 +9,7 @@
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
++#include <linux/time.h>
+ #include <crypto/public_key.h>
+
+ struct x509_certificate {
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index d2515435e23f..8fb295350efb 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -905,7 +905,7 @@ bio_pageinc(struct bio *bio)
+ /* Non-zero page count for non-head members of
+ * compound pages is no longer allowed by the kernel.
+ */
+- page = compound_trans_head(bv->bv_page);
++ page = compound_head(bv->bv_page);
+ atomic_inc(&page->_count);
+ }
+ }
+@@ -918,7 +918,7 @@ bio_pagedec(struct bio *bio)
+ int i;
+
+ bio_for_each_segment(bv, bio, i) {
+- page = compound_trans_head(bv->bv_page);
++ page = compound_head(bv->bv_page);
+ atomic_dec(&page->_count);
+ }
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
+index 1f7b4caefb6e..c7fa2e420d49 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -536,7 +536,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start / PAGE_SIZE,
+ dev_priv->gtt.base.total / PAGE_SIZE,
+- false);
++ true);
+ }
+
+ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 8160fbddbcfe..200e8564c59d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -66,6 +66,7 @@ bool nouveau_is_v1_dsm(void) {
+ #define NOUVEAU_DSM_HAS_MUX 0x1
+ #define NOUVEAU_DSM_HAS_OPT 0x2
+
++#ifdef CONFIG_VGA_SWITCHEROO
+ static const char nouveau_dsm_muid[] = {
+ 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
+@@ -378,6 +379,11 @@ void nouveau_unregister_dsm_handler(void)
+ if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
+ vga_switcheroo_unregister_handler();
+ }
++#else
++void nouveau_register_dsm_handler(void) {}
++void nouveau_unregister_dsm_handler(void) {}
++void nouveau_switcheroo_optimus_dsm(void) {}
++#endif
+
+ /* retrieve the ROM in 4k blocks */
+ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index cd30d98ac510..4e901081e287 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -109,6 +109,7 @@ config I2C_I801
+ Avoton (SOC)
+ Wellsburg (PCH)
+ Coleto Creek (PCH)
++ Wildcat Point-LP (PCH)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
+index b2b8aa9adc0e..c5121459196b 100644
+--- a/drivers/i2c/busses/i2c-cpm.c
++++ b/drivers/i2c/busses/i2c-cpm.c
+@@ -40,7 +40,9 @@
+ #include <linux/i2c.h>
+ #include <linux/io.h>
+ #include <linux/dma-mapping.h>
++#include <linux/of_address.h>
+ #include <linux/of_device.h>
++#include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+ #include <sysdev/fsl_soc.h>
+ #include <asm/cpm.h>
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 4296d1721272..737e29866887 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -59,6 +59,7 @@
+ Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
+ Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
+ Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
++ Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
+
+ Features supported by this driver:
+ Software PEC no
+@@ -177,6 +178,7 @@
+ #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e
+ #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
++#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2
+
+ struct i801_mux_config {
+ char *gpio_chip;
+@@ -819,6 +821,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
+ { 0, }
+ };
+
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index d47bb0f267f7..53235814ea0f 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -1,7 +1,7 @@
+ /*
+ * intel_idle.c - native hardware idle loop for modern Intel processors
+ *
+- * Copyright (c) 2010, Intel Corporation.
++ * Copyright (c) 2013, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+@@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+ {
+ .enter = NULL }
+ };
++static struct cpuidle_state avn_cstates[] __initdata = {
++ {
++ .name = "C1-AVN",
++ .desc = "MWAIT 0x00",
++ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
++ .exit_latency = 2,
++ .target_residency = 2,
++ .enter = &intel_idle },
++ {
++ .name = "C6-AVN",
++ .desc = "MWAIT 0x51",
++ .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++ .exit_latency = 15,
++ .target_residency = 45,
++ .enter = &intel_idle },
++};
+
+ /**
+ * intel_idle
+@@ -465,6 +481,11 @@ static const struct idle_cpu idle_cpu_hsw = {
+ .disable_promotion_to_c1e = true,
+ };
+
++static const struct idle_cpu idle_cpu_avn = {
++ .state_table = avn_cstates,
++ .disable_promotion_to_c1e = true,
++};
++
+ #define ICPU(model, cpu) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
+
+@@ -486,6 +507,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
+ ICPU(0x3f, idle_cpu_hsw),
+ ICPU(0x45, idle_cpu_hsw),
+ ICPU(0x46, idle_cpu_hsw),
++ ICPU(0x4D, idle_cpu_avn),
+ {}
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
+diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
+index 888a81a7ea3d..0aaea7ad6cee 100644
+--- a/drivers/input/mouse/cypress_ps2.c
++++ b/drivers/input/mouse/cypress_ps2.c
+@@ -410,7 +410,6 @@ static int cypress_set_input_params(struct input_dev *input,
+ __clear_bit(REL_X, input->relbit);
+ __clear_bit(REL_Y, input->relbit);
+
+- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(BTN_RIGHT, input->keybit);
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 26386f9d2569..d8d49d10f9bb 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
+ * Read touchpad resolution and maximum reported coordinates
+ * Resolution is left zero if touchpad does not support the query
+ */
++
++static const int *quirk_min_max;
++
+ static int synaptics_resolution(struct psmouse *psmouse)
+ {
+ struct synaptics_data *priv = psmouse->private;
+ unsigned char resp[3];
+
++ if (quirk_min_max) {
++ priv->x_min = quirk_min_max[0];
++ priv->x_max = quirk_min_max[1];
++ priv->y_min = quirk_min_max[2];
++ priv->y_max = quirk_min_max[3];
++ return 0;
++ }
++
+ if (SYN_ID_MAJOR(priv->identity) < 4)
+ return 0;
+
+@@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
+ { }
+ };
+
++static const struct dmi_system_id min_max_dmi_table[] __initconst = {
++#if defined(CONFIG_DMI)
++ {
++ /* Lenovo ThinkPad Helix */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
++ },
++ .driver_data = (int []){1024, 5052, 2258, 4832},
++ },
++ {
++ /* Lenovo ThinkPad X240 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
++ },
++ .driver_data = (int []){1232, 5710, 1156, 4696},
++ },
++ {
++ /* Lenovo ThinkPad T440s */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
++ /* Lenovo ThinkPad T540p */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
++ },
++ .driver_data = (int []){1024, 5056, 2058, 4832},
++ },
++#endif
++ { }
++};
++
+ void __init synaptics_module_init(void)
+ {
++ const struct dmi_system_id *min_max_dmi;
++
+ impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
+ broken_olpc_ec = dmi_check_system(olpc_dmi_table);
++
++ min_max_dmi = dmi_first_match(min_max_dmi_table);
++ if (min_max_dmi)
++ quirk_min_max = min_max_dmi->driver_data;
+ }
+
+ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
+diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
+index 4c842c320c2e..b604564dec5c 100644
+--- a/drivers/input/mousedev.c
++++ b/drivers/input/mousedev.c
+@@ -67,7 +67,6 @@ struct mousedev {
+ struct device dev;
+ struct cdev cdev;
+ bool exist;
+- bool is_mixdev;
+
+ struct list_head mixdev_node;
+ bool opened_by_mixdev;
+@@ -77,6 +76,9 @@ struct mousedev {
+ int old_x[4], old_y[4];
+ int frac_dx, frac_dy;
+ unsigned long touch;
++
++ int (*open_device)(struct mousedev *mousedev);
++ void (*close_device)(struct mousedev *mousedev);
+ };
+
+ enum mousedev_emul {
+@@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
+ static struct mousedev *mousedev_mix;
+ static LIST_HEAD(mousedev_mix_list);
+
+-static void mixdev_open_devices(void);
+-static void mixdev_close_devices(void);
+-
+ #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
+ #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
+
+@@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
+ if (retval)
+ return retval;
+
+- if (mousedev->is_mixdev)
+- mixdev_open_devices();
+- else if (!mousedev->exist)
++ if (!mousedev->exist)
+ retval = -ENODEV;
+ else if (!mousedev->open++) {
+ retval = input_open_device(&mousedev->handle);
+@@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
+ {
+ mutex_lock(&mousedev->mutex);
+
+- if (mousedev->is_mixdev)
+- mixdev_close_devices();
+- else if (mousedev->exist && !--mousedev->open)
++ if (mousedev->exist && !--mousedev->open)
+ input_close_device(&mousedev->handle);
+
+ mutex_unlock(&mousedev->mutex);
+@@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev)
+ * stream. Note that this function is called with mousedev_mix->mutex
+ * held.
+ */
+-static void mixdev_open_devices(void)
++static int mixdev_open_devices(struct mousedev *mixdev)
+ {
+- struct mousedev *mousedev;
++ int error;
++
++ error = mutex_lock_interruptible(&mixdev->mutex);
++ if (error)
++ return error;
+
+- if (mousedev_mix->open++)
+- return;
++ if (!mixdev->open++) {
++ struct mousedev *mousedev;
+
+- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+- if (!mousedev->opened_by_mixdev) {
+- if (mousedev_open_device(mousedev))
+- continue;
++ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
++ if (!mousedev->opened_by_mixdev) {
++ if (mousedev_open_device(mousedev))
++ continue;
+
+- mousedev->opened_by_mixdev = true;
++ mousedev->opened_by_mixdev = true;
++ }
+ }
+ }
++
++ mutex_unlock(&mixdev->mutex);
++ return 0;
+ }
+
+ /*
+@@ -481,19 +484,22 @@ static void mixdev_open_devices(void)
+ * device. Note that this function is called with mousedev_mix->mutex
+ * held.
+ */
+-static void mixdev_close_devices(void)
++static void mixdev_close_devices(struct mousedev *mixdev)
+ {
+- struct mousedev *mousedev;
++ mutex_lock(&mixdev->mutex);
+
+- if (--mousedev_mix->open)
+- return;
++ if (!--mixdev->open) {
++ struct mousedev *mousedev;
+
+- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+- if (mousedev->opened_by_mixdev) {
+- mousedev->opened_by_mixdev = false;
+- mousedev_close_device(mousedev);
++ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
++ if (mousedev->opened_by_mixdev) {
++ mousedev->opened_by_mixdev = false;
++ mousedev_close_device(mousedev);
++ }
+ }
+ }
++
++ mutex_unlock(&mixdev->mutex);
+ }
+
+
+@@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file)
+ mousedev_detach_client(mousedev, client);
+ kfree(client);
+
+- mousedev_close_device(mousedev);
++ mousedev->close_device(mousedev);
+
+ return 0;
+ }
+@@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
+ client->mousedev = mousedev;
+ mousedev_attach_client(mousedev, client);
+
+- error = mousedev_open_device(mousedev);
++ error = mousedev->open_device(mousedev);
+ if (error)
+ goto err_free_client;
+
+@@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
+
+ if (mixdev) {
+ dev_set_name(&mousedev->dev, "mice");
++
++ mousedev->open_device = mixdev_open_devices;
++ mousedev->close_device = mixdev_close_devices;
+ } else {
+ int dev_no = minor;
+ /* Normalize device number if it falls into legacy range */
+ if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
+ dev_no -= MOUSEDEV_MINOR_BASE;
+ dev_set_name(&mousedev->dev, "mouse%d", dev_no);
++
++ mousedev->open_device = mousedev_open_device;
++ mousedev->close_device = mousedev_close_device;
+ }
+
+ mousedev->exist = true;
+- mousedev->is_mixdev = mixdev;
+ mousedev->handle.dev = input_get_device(dev);
+ mousedev->handle.name = dev_name(&mousedev->dev);
+ mousedev->handle.handler = handler;
+@@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev)
+ device_del(&mousedev->dev);
+ mousedev_cleanup(mousedev);
+ input_free_minor(MINOR(mousedev->dev.devt));
+- if (!mousedev->is_mixdev)
++ if (mousedev != mousedev_mix)
+ input_unregister_handle(&mousedev->handle);
+ put_device(&mousedev->dev);
+ }
+diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
+index a6debe13d5b9..44a1fb6fa4a0 100644
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -722,7 +722,7 @@ static int wacom_led_control(struct wacom *wacom)
+ return -ENOMEM;
+
+ if (wacom->wacom_wac.features.type >= INTUOS5S &&
+- wacom->wacom_wac.features.type <= INTUOS5L) {
++ wacom->wacom_wac.features.type <= INTUOSPL) {
+ /*
+ * Touch Ring and crop mark LED luminance may take on
+ * one of four values:
+@@ -984,6 +984,9 @@ static int wacom_initialize_leds(struct wacom *wacom)
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
++ case INTUOSPS:
++ case INTUOSPM:
++ case INTUOSPL:
+ wacom->led.select[0] = 0;
+ wacom->led.select[1] = 0;
+ wacom->led.llv = 32;
+@@ -1027,6 +1030,9 @@ static void wacom_destroy_leds(struct wacom *wacom)
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
++ case INTUOSPS:
++ case INTUOSPM:
++ case INTUOSPL:
+ sysfs_remove_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
+ break;
+@@ -1305,7 +1311,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
+ * HID descriptor. If this is the touch interface (wMaxPacketSize
+ * of WACOM_PKGLEN_BBTOUCH3), override the table values.
+ */
+- if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
++ if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+ if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
+ features->device_type = BTN_TOOL_FINGER;
+ features->pktlen = WACOM_PKGLEN_BBTOUCH3;
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index c59b797eeafa..0091bdedb240 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -621,14 +621,14 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
+ } else {
+ input_report_abs(input, ABS_MISC, 0);
+ }
+- } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
++ } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+ int i;
+
+ /* Touch ring mode switch has no capacitive sensor */
+ input_report_key(input, BTN_0, (data[3] & 0x01));
+
+ /*
+- * ExpressKeys on Intuos5 have a capacitive sensor in
++ * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in
+ * addition to the mechanical switch. Switch data is
+ * stored in data[4], capacitive data in data[5].
+ */
+@@ -716,7 +716,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
+ features->type == INTUOS4 ||
+ features->type == INTUOS4S ||
+ features->type == INTUOS5 ||
+- features->type == INTUOS5S)) {
++ features->type == INTUOS5S ||
++ features->type == INTUOSPM ||
++ features->type == INTUOSPS)) {
+
+ return 0;
+ }
+@@ -769,8 +771,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
+
+ } else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
+ /* I4 mouse */
+- if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
+- (features->type >= INTUOS5S && features->type <= INTUOS5L)) {
++ if (features->type >= INTUOS4S && features->type <= INTUOSPL) {
+ input_report_key(input, BTN_LEFT, data[6] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[6] & 0x04);
+@@ -797,7 +798,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
+ }
+ }
+ } else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
+- features->type == INTUOS4L || features->type == INTUOS5L) &&
++ features->type == INTUOS4L || features->type == INTUOS5L ||
++ features->type == INTUOSPL) &&
+ wacom->tool[idx] == BTN_TOOL_LENS) {
+ /* Lens cursor packets */
+ input_report_key(input, BTN_LEFT, data[8] & 0x01);
+@@ -1107,6 +1109,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
+
+ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
+ {
++ struct wacom_features *features = &wacom->features;
+ struct input_dev *input = wacom->input;
+ bool touch = data[1] & 0x80;
+ int slot = input_mt_get_slot_by_key(input, data[0]);
+@@ -1122,14 +1125,23 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
+ if (touch) {
+ int x = (data[2] << 4) | (data[4] >> 4);
+ int y = (data[3] << 4) | (data[4] & 0x0f);
+- int a = data[5];
++ int width, height;
+
+- // "a" is a scaled-down area which we assume is roughly
+- // circular and which can be described as: a=(pi*r^2)/C.
+- int x_res = input_abs_get_res(input, ABS_X);
+- int y_res = input_abs_get_res(input, ABS_Y);
+- int width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+- int height = width * y_res / x_res;
++ if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
++ width = data[5];
++ height = data[6];
++ } else {
++ /*
++ * "a" is a scaled-down area which we assume is
++ * roughly circular and which can be described as:
++ * a=(pi*r^2)/C.
++ */
++ int a = data[5];
++ int x_res = input_abs_get_res(input, ABS_X);
++ int y_res = input_abs_get_res(input, ABS_Y);
++ width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
++ height = width * y_res / x_res;
++ }
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+@@ -1337,6 +1349,9 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
++ case INTUOSPS:
++ case INTUOSPM:
++ case INTUOSPL:
+ if (len == WACOM_PKGLEN_BBTOUCH3)
+ sync = wacom_bpt3_touch(wacom_wac);
+ else
+@@ -1420,7 +1435,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
+
+ /* these device have multiple inputs */
+ if (features->type >= WIRELESS ||
+- (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
++ (features->type >= INTUOS5S && features->type <= INTUOSPL) ||
+ (features->oVid && features->oPid))
+ features->quirks |= WACOM_QUIRK_MULTI_INPUT;
+
+@@ -1627,6 +1642,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
+
+ case INTUOS5:
+ case INTUOS5L:
++ case INTUOSPM:
++ case INTUOSPL:
+ if (features->device_type == BTN_TOOL_PEN) {
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+@@ -1634,6 +1651,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
+ /* fall through */
+
+ case INTUOS5S:
++ case INTUOSPS:
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ if (features->device_type == BTN_TOOL_PEN) {
+@@ -1952,6 +1970,18 @@ static const struct wacom_features wacom_features_0x29 =
+ static const struct wacom_features wacom_features_0x2A =
+ { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
+ 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
++static const struct wacom_features wacom_features_0x314 =
++ { "Wacom Intuos Pro S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
++ 63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
++ .touch_max = 16 };
++static const struct wacom_features wacom_features_0x315 =
++ { "Wacom Intuos Pro M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
++ 63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
++ .touch_max = 16 };
++static const struct wacom_features wacom_features_0x317 =
++ { "Wacom Intuos Pro L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
++ 63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
++ .touch_max = 16 };
+ static const struct wacom_features wacom_features_0xF4 =
+ { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
+ 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+@@ -2259,6 +2289,9 @@ const struct usb_device_id wacom_ids[] = {
+ { USB_DEVICE_WACOM(0x300) },
+ { USB_DEVICE_WACOM(0x301) },
+ { USB_DEVICE_WACOM(0x304) },
++ { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
++ { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
++ { USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0x4001) },
+ { USB_DEVICE_WACOM(0x47) },
+ { USB_DEVICE_WACOM(0xF4) },
+diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
+index dfc9e08e7f70..d6dec5800791 100644
+--- a/drivers/input/tablet/wacom_wac.h
++++ b/drivers/input/tablet/wacom_wac.h
+@@ -76,6 +76,9 @@ enum {
+ INTUOS5S,
+ INTUOS5,
+ INTUOS5L,
++ INTUOSPS,
++ INTUOSPM,
++ INTUOSPL,
+ WACOM_21UX2,
+ WACOM_22HD,
+ DTK,
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index e35bac7cfdf1..71d9cad02704 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -88,8 +88,9 @@
+ #define MVNETA_TX_IN_PRGRS BIT(1)
+ #define MVNETA_TX_FIFO_EMPTY BIT(8)
+ #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+-#define MVNETA_SGMII_SERDES_CFG 0x24A0
++#define MVNETA_SERDES_CFG 0x24A0
+ #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
++#define MVNETA_RGMII_SERDES_PROTO 0x0667
+ #define MVNETA_TYPE_PRIO 0x24bc
+ #define MVNETA_FORCE_UNI BIT(21)
+ #define MVNETA_TXQ_CMD_1 0x24e4
+@@ -121,7 +122,7 @@
+ #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+ #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
+ #define MVNETA_GMAC_CTRL_2 0x2c08
+-#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
++#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
+ #define MVNETA_GMAC2_PORT_RGMII BIT(4)
+ #define MVNETA_GMAC2_PORT_RESET BIT(6)
+ #define MVNETA_GMAC_STATUS 0x2c10
+@@ -665,35 +666,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+ }
+
+-
+-
+-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
+-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
+-{
+- u32 val;
+-
+- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+-
+- if (enable)
+- val |= MVNETA_GMAC2_PORT_RGMII;
+- else
+- val &= ~MVNETA_GMAC2_PORT_RGMII;
+-
+- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+-}
+-
+-/* Config SGMII port */
+-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
+-{
+- u32 val;
+-
+- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+- val |= MVNETA_GMAC2_PSC_ENABLE;
+- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+-
+- mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+-}
+-
+ /* Start the Ethernet port RX and TX activity */
+ static void mvneta_port_up(struct mvneta_port *pp)
+ {
+@@ -2723,12 +2695,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
+
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII)
+- mvneta_port_sgmii_config(pp);
++ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
++ else
++ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
+
+- mvneta_gmac_rgmii_set(pp, 1);
++ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
++
++ val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+
+ /* Cancel Port Reset */
+- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val &= ~MVNETA_GMAC2_PORT_RESET;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 4fb7a8f83c8a..54af4e933695 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn)
+ if (pfn_valid(pfn)) {
+ bool reserved;
+ struct page *tail = pfn_to_page(pfn);
+- struct page *head = compound_trans_head(tail);
++ struct page *head = compound_head(tail);
+ reserved = !!(PageReserved(head));
+ if (head != tail) {
+ /*
+ * "head" is not a dangling pointer
+- * (compound_trans_head takes care of that)
++ * (compound_head takes care of that)
+ * but the hugepage may have been split
+ * from under us (and we may not hold a
+ * reference count on the head page so it can
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index dacaf74256a3..8659eb160b4d 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1916,6 +1916,9 @@ int fb_get_options(const char *name, char **option)
+ options = opt + name_len + 1;
+ }
+ }
++ /* No match, pass global option */
++ if (!options && option && fb_mode_option)
++ options = kstrdup(fb_mode_option, GFP_KERNEL);
+ if (options && !strncmp(options, "off", 3))
+ retval = 1;
+
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index b232908a6192..4c437efa9d91 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -405,11 +405,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
+ state = BP_EAGAIN;
+ break;
+ }
++ scrub_page(page);
+
+- pfn = page_to_pfn(page);
+- frame_list[i] = pfn_to_mfn(pfn);
++ frame_list[i] = page_to_pfn(page);
++ }
+
+- scrub_page(page);
++ /*
++ * Ensure that ballooned highmem pages don't have kmaps.
++ *
++ * Do this before changing the p2m as kmap_flush_unused()
++ * reads PTEs to obtain pages (and hence needs the original
++ * p2m entry).
++ */
++ kmap_flush_unused();
++
++ /* Update direct mapping, invalidate P2M, and add to balloon. */
++ for (i = 0; i < nr_pages; i++) {
++ pfn = frame_list[i];
++ frame_list[i] = pfn_to_mfn(pfn);
++ page = pfn_to_page(pfn);
+
+ /*
+ * Ballooned out frames are effectively replaced with
+@@ -433,11 +447,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
+ }
+ put_balloon_scratch_page();
+
+- balloon_append(pfn_to_page(pfn));
++ balloon_append(page);
+ }
+
+- /* Ensure that ballooned highmem pages don't have kmaps. */
+- kmap_flush_unused();
+ flush_tlb_all();
+
+ set_xen_guest_handle(reservation.extent_start, frame_list);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 4021e0172602..30b38e23caa7 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2846,9 +2846,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+ u32 dlen = ACCESS_ONCE(name->len);
+ char *p;
+
+- if (*buflen < dlen + 1)
+- return -ENAMETOOLONG;
+ *buflen -= dlen + 1;
++ if (*buflen < 0)
++ return -ENAMETOOLONG;
+ p = *buffer -= dlen + 1;
+ *p++ = '/';
+ while (dlen--) {
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index d90909ec6aa6..a5e34dd6a32c 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -649,6 +649,7 @@ static void process_sctp_notification(struct connection *con,
+ struct msghdr *msg, char *buf)
+ {
+ union sctp_notification *sn = (union sctp_notification *)buf;
++ struct linger linger;
+
+ switch (sn->sn_header.sn_type) {
+ case SCTP_SEND_FAILED:
+@@ -727,6 +728,13 @@ static void process_sctp_notification(struct connection *con,
+ }
+ add_sock(new_con->sock, new_con);
+
++ linger.l_onoff = 1;
++ linger.l_linger = 0;
++ ret = kernel_setsockopt(new_con->sock, SOL_SOCKET, SO_LINGER,
++ (char *)&linger, sizeof(linger));
++ if (ret < 0)
++ log_print("set socket option SO_LINGER failed");
++
+ log_print("connecting to %d sctp association %d",
+ nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 55fe412b2410..786bf0708904 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -38,6 +38,7 @@
+ #include <linux/slab.h>
+ #include <linux/ratelimit.h>
+ #include <linux/aio.h>
++#include <linux/bitops.h>
+
+ #include "ext4_jbd2.h"
+ #include "xattr.h"
+@@ -3922,18 +3923,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
+ void ext4_set_inode_flags(struct inode *inode)
+ {
+ unsigned int flags = EXT4_I(inode)->i_flags;
++ unsigned int new_fl = 0;
+
+- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ if (flags & EXT4_SYNC_FL)
+- inode->i_flags |= S_SYNC;
++ new_fl |= S_SYNC;
+ if (flags & EXT4_APPEND_FL)
+- inode->i_flags |= S_APPEND;
++ new_fl |= S_APPEND;
+ if (flags & EXT4_IMMUTABLE_FL)
+- inode->i_flags |= S_IMMUTABLE;
++ new_fl |= S_IMMUTABLE;
+ if (flags & EXT4_NOATIME_FL)
+- inode->i_flags |= S_NOATIME;
++ new_fl |= S_NOATIME;
+ if (flags & EXT4_DIRSYNC_FL)
+- inode->i_flags |= S_DIRSYNC;
++ new_fl |= S_DIRSYNC;
++ set_mask_bits(&inode->i_flags,
++ S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
+ }
+
+ /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
+diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
+index e1959efad64f..b5ebc2d7d80d 100644
+--- a/fs/fscache/object-list.c
++++ b/fs/fscache/object-list.c
+@@ -50,6 +50,8 @@ void fscache_objlist_add(struct fscache_object *obj)
+ struct fscache_object *xobj;
+ struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
+
++ ASSERT(RB_EMPTY_NODE(&obj->objlist_link));
++
+ write_lock(&fscache_object_list_lock);
+
+ while (*p) {
+@@ -75,6 +77,9 @@ void fscache_objlist_add(struct fscache_object *obj)
+ */
+ void fscache_objlist_remove(struct fscache_object *obj)
+ {
++ if (RB_EMPTY_NODE(&obj->objlist_link))
++ return;
++
+ write_lock(&fscache_object_list_lock);
+
+ BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
+diff --git a/fs/fscache/object.c b/fs/fscache/object.c
+index 86d75a60b20c..fec41344dacc 100644
+--- a/fs/fscache/object.c
++++ b/fs/fscache/object.c
+@@ -314,6 +314,9 @@ void fscache_object_init(struct fscache_object *object,
+ object->cache = cache;
+ object->cookie = cookie;
+ object->parent = NULL;
++#ifdef CONFIG_FSCACHE_OBJECT_LIST
++ RB_CLEAR_NODE(&object->objlist_link);
++#endif
+
+ object->oob_event_mask = 0;
+ for (t = object->oob_table; t->events; t++)
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index e81a1cae81b5..140280623348 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -276,6 +276,15 @@ out_eof:
+ return -EBADCOOKIE;
+ }
+
++static bool
++nfs_readdir_inode_mapping_valid(struct nfs_inode *nfsi)
++{
++ if (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
++ return false;
++ smp_rmb();
++ return !test_bit(NFS_INO_INVALIDATING, &nfsi->flags);
++}
++
+ static
+ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
+ {
+@@ -289,8 +298,8 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
+
+ new_pos = desc->current_index + i;
+- if (ctx->attr_gencount != nfsi->attr_gencount
+- || (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) {
++ if (ctx->attr_gencount != nfsi->attr_gencount ||
++ !nfs_readdir_inode_mapping_valid(nfsi)) {
+ ctx->duped = 0;
+ ctx->attr_gencount = nfsi->attr_gencount;
+ } else if (new_pos < desc->ctx->pos) {
+@@ -1139,7 +1148,13 @@ out_zap_parent:
+ if (inode && S_ISDIR(inode->i_mode)) {
+ /* Purge readdir caches. */
+ nfs_zap_caches(inode);
+- if (dentry->d_flags & DCACHE_DISCONNECTED)
++ /*
++ * We can't d_drop the root of a disconnected tree:
++ * its d_hash is on the s_anon list and d_drop() would hide
++ * it from shrink_dcache_for_unmount(), leading to busy
++ * inodes on unmount and further oopses.
++ */
++ if (IS_ROOT(dentry))
+ goto out_valid;
+ }
+ /* If we have submounts, don't unhash ! */
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 91ff089d3412..af5f3ffcb157 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -223,14 +223,31 @@ out:
+ * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
+ * the iocb is still valid here if this is a synchronous request.
+ */
+-static void nfs_direct_complete(struct nfs_direct_req *dreq)
++static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
+ {
++ struct inode *inode = dreq->inode;
++
++ if (dreq->iocb && write) {
++ loff_t pos = dreq->iocb->ki_pos + dreq->count;
++
++ spin_lock(&inode->i_lock);
++ if (i_size_read(inode) < pos)
++ i_size_write(inode, pos);
++ spin_unlock(&inode->i_lock);
++ }
++
++ if (write)
++ nfs_zap_mapping(inode, inode->i_mapping);
++
++ inode_dio_done(inode);
++
+ if (dreq->iocb) {
+ long res = (long) dreq->error;
+ if (!res)
+ res = (long) dreq->count;
+ aio_complete(dreq->iocb, res, 0);
+ }
++
+ complete_all(&dreq->completion);
+
+ nfs_direct_req_release(dreq);
+@@ -273,7 +290,7 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
+ }
+ out_put:
+ if (put_dreq(dreq))
+- nfs_direct_complete(dreq);
++ nfs_direct_complete(dreq, false);
+ hdr->release(hdr);
+ }
+
+@@ -403,6 +420,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
+ loff_t pos, bool uio)
+ {
+ struct nfs_pageio_descriptor desc;
++ struct inode *inode = dreq->inode;
+ ssize_t result = -EINVAL;
+ size_t requested_bytes = 0;
+ unsigned long seg;
+@@ -411,6 +429,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
+ &nfs_direct_read_completion_ops);
+ get_dreq(dreq);
+ desc.pg_dreq = dreq;
++ atomic_inc(&inode->i_dio_count);
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ const struct iovec *vec = &iov[seg];
+@@ -430,12 +449,13 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
+ * generic layer handle the completion.
+ */
+ if (requested_bytes == 0) {
++ inode_dio_done(inode);
+ nfs_direct_req_release(dreq);
+ return result < 0 ? result : -EIO;
+ }
+
+ if (put_dreq(dreq))
+- nfs_direct_complete(dreq);
++ nfs_direct_complete(dreq, false);
+ return 0;
+ }
+
+@@ -473,12 +493,6 @@ out:
+ return result;
+ }
+
+-static void nfs_inode_dio_write_done(struct inode *inode)
+-{
+- nfs_zap_mapping(inode, inode->i_mapping);
+- inode_dio_done(inode);
+-}
+-
+ #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
+ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+ {
+@@ -594,8 +608,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
+ nfs_direct_write_reschedule(dreq);
+ break;
+ default:
+- nfs_inode_dio_write_done(dreq->inode);
+- nfs_direct_complete(dreq);
++ nfs_direct_complete(dreq, true);
+ }
+ }
+
+@@ -611,8 +624,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
+
+ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
+ {
+- nfs_inode_dio_write_done(inode);
+- nfs_direct_complete(dreq);
++ nfs_direct_complete(dreq, true);
+ }
+ #endif
+
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 0ee22ab9ef97..fdeeb28f287b 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -988,11 +988,11 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
+ if (ret < 0)
+ return ret;
+ }
+- spin_lock(&inode->i_lock);
+- nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
+- if (S_ISDIR(inode->i_mode))
++ if (S_ISDIR(inode->i_mode)) {
++ spin_lock(&inode->i_lock);
+ memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+- spin_unlock(&inode->i_lock);
++ spin_unlock(&inode->i_lock);
++ }
+ nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
+ nfs_fscache_wait_on_invalidate(inode);
+
+@@ -1018,6 +1018,7 @@ static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
+ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+ {
+ struct nfs_inode *nfsi = NFS_I(inode);
++ unsigned long *bitlock = &nfsi->flags;
+ int ret = 0;
+
+ /* swapfiles are not supposed to be shared. */
+@@ -1029,12 +1030,46 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+ if (ret < 0)
+ goto out;
+ }
+- if (nfsi->cache_validity & NFS_INO_INVALID_DATA) {
+- trace_nfs_invalidate_mapping_enter(inode);
+- ret = nfs_invalidate_mapping(inode, mapping);
+- trace_nfs_invalidate_mapping_exit(inode, ret);
++
++ /*
++ * We must clear NFS_INO_INVALID_DATA first to ensure that
++ * invalidations that come in while we're shooting down the mappings
++ * are respected. But, that leaves a race window where one revalidator
++ * can clear the flag, and then another checks it before the mapping
++ * gets invalidated. Fix that by serializing access to this part of
++ * the function.
++ *
++ * At the same time, we need to allow other tasks to see whether we
++ * might be in the middle of invalidating the pages, so we only set
++ * the bit lock here if it looks like we're going to be doing that.
++ */
++ for (;;) {
++ ret = wait_on_bit(bitlock, NFS_INO_INVALIDATING,
++ nfs_wait_bit_killable, TASK_KILLABLE);
++ if (ret)
++ goto out;
++ spin_lock(&inode->i_lock);
++ if (test_bit(NFS_INO_INVALIDATING, bitlock)) {
++ spin_unlock(&inode->i_lock);
++ continue;
++ }
++ if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
++ break;
++ spin_unlock(&inode->i_lock);
++ goto out;
+ }
+
++ set_bit(NFS_INO_INVALIDATING, bitlock);
++ smp_wmb();
++ nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
++ spin_unlock(&inode->i_lock);
++ trace_nfs_invalidate_mapping_enter(inode);
++ ret = nfs_invalidate_mapping(inode, mapping);
++ trace_nfs_invalidate_mapping_exit(inode, ret);
++
++ clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
++ smp_mb__after_clear_bit();
++ wake_up_bit(bitlock, NFS_INO_INVALIDATING);
+ out:
+ return ret;
+ }
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index 28842abafab4..fdfd59157dce 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -249,6 +249,7 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser
+ extern int nfs41_setup_sequence(struct nfs4_session *session,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ struct rpc_task *task);
++extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *);
+ extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
+ extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
+ extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
+diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
+index b86464ba25e1..394b0a0c54bf 100644
+--- a/fs/nfs/nfs4filelayout.c
++++ b/fs/nfs/nfs4filelayout.c
+@@ -335,8 +335,11 @@ static void filelayout_read_call_done(struct rpc_task *task, void *data)
+ dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
+
+ if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
+- task->tk_status == 0)
++ task->tk_status == 0) {
++ if (rdata->res.seq_res.sr_slot != NULL)
++ nfs41_sequence_done(task, &rdata->res.seq_res);
+ return;
++ }
+
+ /* Note this may cause RPC to be resent */
+ rdata->header->mds_ops->rpc_call_done(task, data);
+@@ -442,8 +445,11 @@ static void filelayout_write_call_done(struct rpc_task *task, void *data)
+ struct nfs_write_data *wdata = data;
+
+ if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
+- task->tk_status == 0)
++ task->tk_status == 0) {
++ if (wdata->res.seq_res.sr_slot != NULL)
++ nfs41_sequence_done(task, &wdata->res.seq_res);
+ return;
++ }
+
+ /* Note this may cause RPC to be resent */
+ wdata->header->mds_ops->rpc_call_done(task, data);
+@@ -1216,17 +1222,17 @@ static void filelayout_recover_commit_reqs(struct list_head *dst,
+ struct pnfs_commit_bucket *b;
+ int i;
+
+- /* NOTE cinfo->lock is NOT held, relying on fact that this is
+- * only called on single thread per dreq.
+- * Can't take the lock because need to do pnfs_put_lseg
+- */
++ spin_lock(cinfo->lock);
+ for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
+ if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
++ spin_unlock(cinfo->lock);
+ pnfs_put_lseg(b->wlseg);
+ b->wlseg = NULL;
++ spin_lock(cinfo->lock);
+ }
+ }
+ cinfo->ds->nwritten = 0;
++ spin_unlock(cinfo->lock);
+ }
+
+ static unsigned int
+diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
+index c7c295e556ed..efac602edb37 100644
+--- a/fs/nfs/nfs4filelayoutdev.c
++++ b/fs/nfs/nfs4filelayoutdev.c
+@@ -95,7 +95,7 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
+ b6 = (struct sockaddr_in6 *)addr2;
+
+ /* LINKLOCAL addresses must have matching scope_id */
+- if (ipv6_addr_scope(&a6->sin6_addr) ==
++ if (ipv6_addr_src_scope(&a6->sin6_addr) ==
+ IPV6_ADDR_SCOPE_LINKLOCAL &&
+ a6->sin6_scope_id != b6->sin6_scope_id)
+ return false;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5a5fb98edb8a..bcd42fbc04e7 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -585,7 +585,7 @@ out_unlock:
+ nfs41_server_notify_highest_slotid_update(session->clp);
+ }
+
+-static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
++int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+ {
+ struct nfs4_session *session;
+ struct nfs4_slot *slot;
+@@ -685,6 +685,7 @@ out_retry:
+ rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(nfs41_sequence_done);
+
+ static int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
+@@ -7273,7 +7274,14 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
+ return;
+
+ server = NFS_SERVER(lrp->args.inode);
+- if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
++ switch (task->tk_status) {
++ default:
++ task->tk_status = 0;
++ case 0:
++ break;
++ case -NFS4ERR_DELAY:
++ if (nfs4_async_handle_error(task, server, NULL) != -EAGAIN)
++ break;
+ rpc_restart_call_prepare(task);
+ return;
+ }
+diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
+index 89fe741e58b1..59f838cdc009 100644
+--- a/fs/nfs/nfstrace.h
++++ b/fs/nfs/nfstrace.h
+@@ -36,6 +36,7 @@
+ __print_flags(v, "|", \
+ { 1 << NFS_INO_ADVISE_RDPLUS, "ADVISE_RDPLUS" }, \
+ { 1 << NFS_INO_STALE, "STALE" }, \
++ { 1 << NFS_INO_INVALIDATING, "INVALIDATING" }, \
+ { 1 << NFS_INO_FLUSHING, "FLUSHING" }, \
+ { 1 << NFS_INO_FSCACHE, "FSCACHE" }, \
+ { 1 << NFS_INO_COMMIT, "COMMIT" }, \
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 28466be64eeb..c6aa89f92558 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -909,9 +909,14 @@ bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
+ */
+ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
+ {
++ struct nfs_inode *nfsi = NFS_I(inode);
++
+ if (nfs_have_delegated_attributes(inode))
+ goto out;
+- if (NFS_I(inode)->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
++ if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
++ return false;
++ smp_rmb();
++ if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
+ return false;
+ out:
+ return PageUptodate(page) != 0;
+diff --git a/fs/proc/page.c b/fs/proc/page.c
+index b8730d9ebaee..2a8cc94bb641 100644
+--- a/fs/proc/page.c
++++ b/fs/proc/page.c
+@@ -121,7 +121,7 @@ u64 stable_page_flags(struct page *page)
+ * just checks PG_head/PG_tail, so we need to check PageLRU to make
+ * sure a given page is a thp, not a non-huge compound page.
+ */
+- else if (PageTransCompound(page) && PageLRU(compound_trans_head(page)))
++ else if (PageTransCompound(page) && PageLRU(compound_head(page)))
+ u |= 1 << KPF_THP;
+
+ /*
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index a3b6b82108b9..c1dde8e00d25 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -185,6 +185,21 @@ static inline unsigned long __ffs64(u64 word)
+
+ #ifdef __KERNEL__
+
++#ifndef set_mask_bits
++#define set_mask_bits(ptr, _mask, _bits) \
++({ \
++ const typeof(*ptr) mask = (_mask), bits = (_bits); \
++ typeof(*ptr) old, new; \
++ \
++ do { \
++ old = ACCESS_ONCE(*ptr); \
++ new = (old & ~mask) | bits; \
++ } while (cmpxchg(ptr, old, new) != old); \
++ \
++ new; \
++})
++#endif
++
+ #ifndef find_last_bit
+ /**
+ * find_last_bit - find the last set bit in a memory region
+diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
+index 3935428c57cf..a291552ab767 100644
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -156,23 +156,6 @@ static inline int hpage_nr_pages(struct page *page)
+ return HPAGE_PMD_NR;
+ return 1;
+ }
+-static inline struct page *compound_trans_head(struct page *page)
+-{
+- if (PageTail(page)) {
+- struct page *head;
+- head = page->first_page;
+- smp_rmb();
+- /*
+- * head may be a dangling pointer.
+- * __split_huge_page_refcount clears PageTail before
+- * overwriting first_page, so if PageTail is still
+- * there it means the head pointer isn't dangling.
+- */
+- if (PageTail(page))
+- return head;
+- }
+- return page;
+-}
+
+ extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd, pmd_t *pmdp);
+@@ -202,7 +185,6 @@ static inline int split_huge_page(struct page *page)
+ do { } while (0)
+ #define split_huge_page_pmd_mm(__mm, __address, __pmd) \
+ do { } while (0)
+-#define compound_trans_head(page) compound_head(page)
+ static inline int hugepage_madvise(struct vm_area_struct *vma,
+ unsigned long *vm_flags, int advice)
+ {
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 648bcb007eba..073734339583 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -371,8 +371,18 @@ static inline void compound_unlock_irqrestore(struct page *page,
+
+ static inline struct page *compound_head(struct page *page)
+ {
+- if (unlikely(PageTail(page)))
+- return page->first_page;
++ if (unlikely(PageTail(page))) {
++ struct page *head = page->first_page;
++
++ /*
++ * page->first_page may be a dangling pointer to an old
++ * compound page, so recheck that it is still a tail
++ * page before returning.
++ */
++ smp_rmb();
++ if (likely(PageTail(page)))
++ return head;
++ }
+ return page;
+ }
+
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 3ea4cde8701c..a632498d42fa 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -215,6 +215,7 @@ struct nfs_inode {
+ #define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
+ #define NFS_INO_STALE (1) /* possible stale inode */
+ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
++#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
+ #define NFS_INO_FLUSHING (4) /* inode is flushing out data */
+ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
+ #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 0bea2b262a47..c78fff1e9eae 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item)
+ static struct page *page_trans_compound_anon(struct page *page)
+ {
+ if (PageTransCompound(page)) {
+- struct page *head = compound_trans_head(page);
++ struct page *head = compound_head(page);
+ /*
+ * head may actually be splitted and freed from under
+ * us but it's ok here.
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index de476c2f8827..5ea3cf734138 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1659,7 +1659,7 @@ int soft_offline_page(struct page *page, int flags)
+ {
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
+- struct page *hpage = compound_trans_head(page);
++ struct page *hpage = compound_head(page);
+
+ if (PageHWPoison(page)) {
+ pr_info("soft offline: %#lx page already poisoned\n", pfn);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 06f847933eeb..6fca39097766 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)
+ __SetPageHead(page);
+ for (i = 1; i < nr_pages; i++) {
+ struct page *p = page + i;
+- __SetPageTail(p);
+ set_page_count(p, 0);
+ p->first_page = page;
++ /* Make sure p->first_page is always valid for PageTail() */
++ smp_wmb();
++ __SetPageTail(p);
+ }
+ }
+
+diff --git a/mm/swap.c b/mm/swap.c
+index 0c8f7a471925..aa4da5d9401d 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -84,7 +84,7 @@ static void put_compound_page(struct page *page)
+ {
+ if (unlikely(PageTail(page))) {
+ /* __split_huge_page_refcount can run under us */
+- struct page *page_head = compound_trans_head(page);
++ struct page *page_head = compound_head(page);
+
+ if (likely(page != page_head &&
+ get_page_unless_zero(page_head))) {
+@@ -222,7 +222,7 @@ bool __get_page_tail(struct page *page)
+ */
+ unsigned long flags;
+ bool got = false;
+- struct page *page_head = compound_trans_head(page);
++ struct page *page_head = compound_head(page);
+
+ if (likely(page != page_head && get_page_unless_zero(page_head))) {
+ /* Ref to put_compound_page() comment. */
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index 4c8e5c0aa1ab..d585626d7676 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -871,11 +871,11 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
+ cp->protocol = p->protocol;
+ ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
+ cp->cport = p->cport;
+- ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr);
+- cp->vport = p->vport;
+- /* proto should only be IPPROTO_IP if d_addr is a fwmark */
++ /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
+ ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
+- &cp->daddr, daddr);
++ &cp->vaddr, p->vaddr);
++ cp->vport = p->vport;
++ ip_vs_addr_set(p->af, &cp->daddr, daddr);
+ cp->dport = dport;
+ cp->flags = flags;
+ cp->fwmark = fwmark;
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
+index a99b6c3427b0..59359bec328a 100644
+--- a/net/netfilter/nf_conntrack_proto_dccp.c
++++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ const char *msg;
+ u_int8_t state;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ BUG_ON(dh == NULL);
+
+ state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
+@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
+ u_int8_t type, old_state, new_state;
+ enum ct_dccp_roles role;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ BUG_ON(dh == NULL);
+ type = dh->dccph_type;
+
+@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
+ unsigned int cscov;
+ const char *msg;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ if (dh == NULL) {
+ msg = "nf_ct_dccp: short packet ";
+ goto out_invalid;
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index a7f9821d1760..bb035f8451c6 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -1508,7 +1508,7 @@ out:
+ static int
+ gss_refresh_null(struct rpc_task *task)
+ {
+- return -EACCES;
++ return 0;
+ }
+
+ static __be32 *
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 7d4ccfa48008..31da88bf6c1c 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -3264,7 +3264,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
+ mutex_unlock(&codec->control_mutex);
+ snd_hda_codec_flush_cache(codec); /* flush the updates */
+ if (err >= 0 && spec->cap_sync_hook)
+- spec->cap_sync_hook(codec, ucontrol);
++ spec->cap_sync_hook(codec, kcontrol, ucontrol);
+ return err;
+ }
+
+@@ -3385,7 +3385,7 @@ static int cap_single_sw_put(struct snd_kcontrol *kcontrol,
+ return ret;
+
+ if (spec->cap_sync_hook)
+- spec->cap_sync_hook(codec, ucontrol);
++ spec->cap_sync_hook(codec, kcontrol, ucontrol);
+
+ return ret;
+ }
+@@ -3790,7 +3790,7 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
+ return 0;
+ snd_hda_activate_path(codec, path, true, false);
+ if (spec->cap_sync_hook)
+- spec->cap_sync_hook(codec, NULL);
++ spec->cap_sync_hook(codec, NULL, NULL);
+ path_power_down_sync(codec, old_path);
+ return 1;
+ }
+@@ -5233,7 +5233,7 @@ static void init_input_src(struct hda_codec *codec)
+ }
+
+ if (spec->cap_sync_hook)
+- spec->cap_sync_hook(codec, NULL);
++ spec->cap_sync_hook(codec, NULL, NULL);
+ }
+
+ /* set right pin controls for digital I/O */
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index 0929a06df812..a1095de808c8 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -274,6 +274,7 @@ struct hda_gen_spec {
+ void (*init_hook)(struct hda_codec *codec);
+ void (*automute_hook)(struct hda_codec *codec);
+ void (*cap_sync_hook)(struct hda_codec *codec,
++ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+ /* PCM hooks */
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 96f07ce56603..fde381d02afd 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3282,7 +3282,8 @@ static void cxt_update_headset_mode(struct hda_codec *codec)
+ }
+
+ static void cxt_update_headset_mode_hook(struct hda_codec *codec,
+- struct snd_ctl_elem_value *ucontrol)
++ struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
+ {
+ cxt_update_headset_mode(codec);
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index deddee9c1565..6a32c857f704 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -695,7 +695,8 @@ static void alc_inv_dmic_sync(struct hda_codec *codec, bool force)
+ }
+
+ static void alc_inv_dmic_hook(struct hda_codec *codec,
+- struct snd_ctl_elem_value *ucontrol)
++ struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
+ {
+ alc_inv_dmic_sync(codec, false);
+ }
+@@ -3141,7 +3142,8 @@ static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled)
+
+ /* turn on/off mic-mute LED per capture hook */
+ static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
+- struct snd_ctl_elem_value *ucontrol)
++ struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct alc_spec *spec = codec->spec;
+ unsigned int oldval = spec->gpio_led;
+@@ -3403,7 +3405,8 @@ static void alc_update_headset_mode(struct hda_codec *codec)
+ }
+
+ static void alc_update_headset_mode_hook(struct hda_codec *codec,
+- struct snd_ctl_elem_value *ucontrol)
++ struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
+ {
+ alc_update_headset_mode(codec);
+ }
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 6133423821d1..d761c0b879c9 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -195,7 +195,7 @@ struct sigmatel_spec {
+ int default_polarity;
+
+ unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */
+- bool mic_mute_led_on; /* current mic mute state */
++ unsigned int mic_enabled; /* current mic mute state (bitmask) */
+
+ /* stream */
+ unsigned int stream_delay;
+@@ -325,19 +325,26 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
+
+ /* hook for controlling mic-mute LED GPIO */
+ static void stac_capture_led_hook(struct hda_codec *codec,
+- struct snd_ctl_elem_value *ucontrol)
++ struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct sigmatel_spec *spec = codec->spec;
+- bool mute;
++ unsigned int mask;
++ bool cur_mute, prev_mute;
+
+- if (!ucontrol)
++ if (!kcontrol || !ucontrol)
+ return;
+
+- mute = !(ucontrol->value.integer.value[0] ||
+- ucontrol->value.integer.value[1]);
+- if (spec->mic_mute_led_on != mute) {
+- spec->mic_mute_led_on = mute;
+- if (mute)
++ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
++ prev_mute = !spec->mic_enabled;
++ if (ucontrol->value.integer.value[0] ||
++ ucontrol->value.integer.value[1])
++ spec->mic_enabled |= mask;
++ else
++ spec->mic_enabled &= ~mask;
++ cur_mute = !spec->mic_enabled;
++ if (cur_mute != prev_mute) {
++ if (cur_mute)
+ spec->gpio_data |= spec->mic_mute_led_gpio;
+ else
+ spec->gpio_data &= ~spec->mic_mute_led_gpio;
+@@ -3974,7 +3981,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
+ if (spec->mic_mute_led_gpio) {
+ spec->gpio_mask |= spec->mic_mute_led_gpio;
+ spec->gpio_dir |= spec->mic_mute_led_gpio;
+- spec->mic_mute_led_on = true;
++ spec->mic_enabled = 0;
+ spec->gpio_data |= spec->mic_mute_led_gpio;
+
+ spec->gen.cap_sync_hook = stac_capture_led_hook;
diff --git a/1017_linux-3.12.18.patch b/1017_linux-3.12.18.patch
new file mode 100644
index 00000000..d92029ca
--- /dev/null
+++ b/1017_linux-3.12.18.patch
@@ -0,0 +1,3245 @@
+diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+index 11ace3c3d805..4fc392763611 100644
+--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
++++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+@@ -7,3 +7,4 @@ Required properties:
+
+ Optional properties:
+ - local-mac-address : Ethernet mac address to use
++- vdd-supply: supply for Ethernet mac
+diff --git a/Makefile b/Makefile
+index fbd1ee8afea8..fc0dcf63a8d9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
+index ea16d782af58..4f31b2eb5cdf 100644
+--- a/arch/arc/boot/dts/nsimosci.dts
++++ b/arch/arc/boot/dts/nsimosci.dts
+@@ -11,13 +11,16 @@
+
+ / {
+ compatible = "snps,nsimosci";
+- clock-frequency = <80000000>; /* 80 MHZ */
++ clock-frequency = <20000000>; /* 20 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen {
+- bootargs = "console=tty0 consoleblank=0";
++ /* this is for console on PGU */
++ /* bootargs = "console=tty0 consoleblank=0"; */
++ /* this is for console on serial */
++ bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
+ };
+
+ aliases {
+@@ -44,15 +47,14 @@
+ };
+
+ uart0: serial@c0000000 {
+- compatible = "snps,dw-apb-uart";
++ compatible = "ns8250";
+ reg = <0xc0000000 0x2000>;
+ interrupts = <11>;
+- #clock-frequency = <80000000>;
+ clock-frequency = <3686400>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+- status = "okay";
++ no-loopback-test = <1>;
+ };
+
+ pgu0: pgu@c9000000 {
+diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
+index 451af30914f6..c01ba35a4eff 100644
+--- a/arch/arc/configs/nsimosci_defconfig
++++ b/arch/arc/configs/nsimosci_defconfig
+@@ -54,6 +54,7 @@ CONFIG_SERIO_ARC_PS2=y
+ CONFIG_SERIAL_8250=y
+ CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_8250_DW=y
++CONFIG_SERIAL_OF_PLATFORM=y
+ CONFIG_SERIAL_ARC=y
+ CONFIG_SERIAL_ARC_CONSOLE=y
+ # CONFIG_HW_RANDOM is not set
+diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
+index 311a300d48cc..ee121a0f5b00 100644
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -16,6 +16,7 @@ config M68K
+ select FPU if MMU
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
++ select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
+ select HAVE_MOD_ARCH_SPECIFIC
+ select MODULES_USE_ELF_REL
+ select MODULES_USE_ELF_RELA
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 3e01afa21710..6671e8db1861 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -116,6 +116,7 @@ config S390
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
++ select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+index 586f41aac361..185fad49d86f 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
++++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+@@ -24,10 +24,6 @@
+ .align 16
+ .Lbswap_mask:
+ .octa 0x000102030405060708090a0b0c0d0e0f
+-.Lpoly:
+- .octa 0xc2000000000000000000000000000001
+-.Ltwo_one:
+- .octa 0x00000001000000000000000000000001
+
+ #define DATA %xmm0
+ #define SHASH %xmm1
+@@ -134,28 +130,3 @@ ENTRY(clmul_ghash_update)
+ .Lupdate_just_ret:
+ ret
+ ENDPROC(clmul_ghash_update)
+-
+-/*
+- * void clmul_ghash_setkey(be128 *shash, const u8 *key);
+- *
+- * Calculate hash_key << 1 mod poly
+- */
+-ENTRY(clmul_ghash_setkey)
+- movaps .Lbswap_mask, BSWAP
+- movups (%rsi), %xmm0
+- PSHUFB_XMM BSWAP %xmm0
+- movaps %xmm0, %xmm1
+- psllq $1, %xmm0
+- psrlq $63, %xmm1
+- movaps %xmm1, %xmm2
+- pslldq $8, %xmm1
+- psrldq $8, %xmm2
+- por %xmm1, %xmm0
+- # reduction
+- pshufd $0b00100100, %xmm2, %xmm1
+- pcmpeqd .Ltwo_one, %xmm1
+- pand .Lpoly, %xmm1
+- pxor %xmm1, %xmm0
+- movups %xmm0, (%rdi)
+- ret
+-ENDPROC(clmul_ghash_setkey)
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index 6759dd1135be..d785cf2c529c 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -30,8 +30,6 @@ void clmul_ghash_mul(char *dst, const be128 *shash);
+ void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
+ const be128 *shash);
+
+-void clmul_ghash_setkey(be128 *shash, const u8 *key);
+-
+ struct ghash_async_ctx {
+ struct cryptd_ahash *cryptd_tfm;
+ };
+@@ -58,13 +56,23 @@ static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+ {
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
++ be128 *x = (be128 *)key;
++ u64 a, b;
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+- clmul_ghash_setkey(&ctx->shash, key);
++ /* perform multiplication by 'x' in GF(2^128) */
++ a = be64_to_cpu(x->a);
++ b = be64_to_cpu(x->b);
++
++ ctx->shash.a = (__be64)((b << 1) | (a >> 63));
++ ctx->shash.b = (__be64)((a << 1) | (b >> 63));
++
++ if (a >> 63)
++ ctx->shash.b ^= cpu_to_be64(0xc2);
+
+ return 0;
+ }
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 04ceb7e2fadd..690011de912a 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -3691,9 +3691,12 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
+ if (!(mode & FMODE_NDELAY)) {
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ UDRS->last_checked = 0;
++ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ check_disk_change(bdev);
+ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ goto out;
++ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
++ goto out;
+ }
+ res = -EROFS;
+ if ((mode & FMODE_WRITE) &&
+@@ -3746,17 +3749,29 @@ static unsigned int floppy_check_events(struct gendisk *disk,
+ * a disk in the drive, and whether that disk is writable.
+ */
+
+-static void floppy_rb0_complete(struct bio *bio, int err)
++struct rb0_cbdata {
++ int drive;
++ struct completion complete;
++};
++
++static void floppy_rb0_cb(struct bio *bio, int err)
+ {
+- complete((struct completion *)bio->bi_private);
++ struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
++ int drive = cbdata->drive;
++
++ if (err) {
++ pr_info("floppy: error %d while reading block 0", err);
++ set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
++ }
++ complete(&cbdata->complete);
+ }
+
+-static int __floppy_read_block_0(struct block_device *bdev)
++static int __floppy_read_block_0(struct block_device *bdev, int drive)
+ {
+ struct bio bio;
+ struct bio_vec bio_vec;
+- struct completion complete;
+ struct page *page;
++ struct rb0_cbdata cbdata;
+ size_t size;
+
+ page = alloc_page(GFP_NOIO);
+@@ -3769,6 +3784,8 @@ static int __floppy_read_block_0(struct block_device *bdev)
+ if (!size)
+ size = 1024;
+
++ cbdata.drive = drive;
++
+ bio_init(&bio);
+ bio.bi_io_vec = &bio_vec;
+ bio_vec.bv_page = page;
+@@ -3779,13 +3796,14 @@ static int __floppy_read_block_0(struct block_device *bdev)
+ bio.bi_bdev = bdev;
+ bio.bi_sector = 0;
+ bio.bi_flags = (1 << BIO_QUIET);
+- init_completion(&complete);
+- bio.bi_private = &complete;
+- bio.bi_end_io = floppy_rb0_complete;
++ bio.bi_private = &cbdata;
++ bio.bi_end_io = floppy_rb0_cb;
+
+ submit_bio(READ, &bio);
+ process_fd_request();
+- wait_for_completion(&complete);
++
++ init_completion(&cbdata.complete);
++ wait_for_completion(&cbdata.complete);
+
+ __free_page(page);
+
+@@ -3827,7 +3845,7 @@ static int floppy_revalidate(struct gendisk *disk)
+ UDRS->generation++;
+ if (drive_no_geom(drive)) {
+ /* auto-sensing */
+- res = __floppy_read_block_0(opened_bdev[drive]);
++ res = __floppy_read_block_0(opened_bdev[drive], drive);
+ } else {
+ if (cf)
+ poll_drive(false, FD_RAW_NEED_DISK);
+diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
+index a22a7a502740..8156cafad11a 100644
+--- a/drivers/char/ipmi/ipmi_bt_sm.c
++++ b/drivers/char/ipmi/ipmi_bt_sm.c
+@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
+
+ static inline int read_all_bytes(struct si_sm_data *bt)
+ {
+- unsigned char i;
++ unsigned int i;
+
+ /*
+ * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
+index 85f1c8c25ddc..4fe6521c30d5 100644
+--- a/drivers/cpufreq/powernow-k6.c
++++ b/drivers/cpufreq/powernow-k6.c
+@@ -26,41 +26,108 @@
+ static unsigned int busfreq; /* FSB, in 10 kHz */
+ static unsigned int max_multiplier;
+
++static unsigned int param_busfreq = 0;
++static unsigned int param_max_multiplier = 0;
++
++module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
++MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
++
++module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
++MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
+
+ /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
+ static struct cpufreq_frequency_table clock_ratio[] = {
+- {45, /* 000 -> 4.5x */ 0},
++ {60, /* 110 -> 6.0x */ 0},
++ {55, /* 011 -> 5.5x */ 0},
+ {50, /* 001 -> 5.0x */ 0},
++ {45, /* 000 -> 4.5x */ 0},
+ {40, /* 010 -> 4.0x */ 0},
+- {55, /* 011 -> 5.5x */ 0},
+- {20, /* 100 -> 2.0x */ 0},
+- {30, /* 101 -> 3.0x */ 0},
+- {60, /* 110 -> 6.0x */ 0},
+ {35, /* 111 -> 3.5x */ 0},
++ {30, /* 101 -> 3.0x */ 0},
++ {20, /* 100 -> 2.0x */ 0},
+ {0, CPUFREQ_TABLE_END}
+ };
+
++static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
++static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
++
++static const struct {
++ unsigned freq;
++ unsigned mult;
++} usual_frequency_table[] = {
++ { 400000, 40 }, // 100 * 4
++ { 450000, 45 }, // 100 * 4.5
++ { 475000, 50 }, // 95 * 5
++ { 500000, 50 }, // 100 * 5
++ { 506250, 45 }, // 112.5 * 4.5
++ { 533500, 55 }, // 97 * 5.5
++ { 550000, 55 }, // 100 * 5.5
++ { 562500, 50 }, // 112.5 * 5
++ { 570000, 60 }, // 95 * 6
++ { 600000, 60 }, // 100 * 6
++ { 618750, 55 }, // 112.5 * 5.5
++ { 660000, 55 }, // 120 * 5.5
++ { 675000, 60 }, // 112.5 * 6
++ { 720000, 60 }, // 120 * 6
++};
++
++#define FREQ_RANGE 3000
+
+ /**
+ * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
+ *
+- * Returns the current setting of the frequency multiplier. Core clock
++ * Returns the current setting of the frequency multiplier. Core clock
+ * speed is frequency of the Front-Side Bus multiplied with this value.
+ */
+ static int powernow_k6_get_cpu_multiplier(void)
+ {
+- u64 invalue = 0;
++ unsigned long invalue = 0;
+ u32 msrval;
+
++ local_irq_disable();
++
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+- return clock_ratio[(invalue >> 5)&7].driver_data;
++ local_irq_enable();
++
++ return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
+ }
+
++static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
++{
++ unsigned long outvalue, invalue;
++ unsigned long msrval;
++ unsigned long cr0;
++
++ /* we now need to transform best_i to the BVC format, see AMD#23446 */
++
++ /*
++ * The processor doesn't respond to inquiry cycles while changing the
++ * frequency, so we must disable cache.
++ */
++ local_irq_disable();
++ cr0 = read_cr0();
++ write_cr0(cr0 | X86_CR0_CD);
++ wbinvd();
++
++ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
++
++ msrval = POWERNOW_IOPORT + 0x1;
++ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
++ invalue = inl(POWERNOW_IOPORT + 0x8);
++ invalue = invalue & 0x1f;
++ outvalue = outvalue | invalue;
++ outl(outvalue, (POWERNOW_IOPORT + 0x8));
++ msrval = POWERNOW_IOPORT + 0x0;
++ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
++
++ write_cr0(cr0);
++ local_irq_enable();
++}
+
+ /**
+ * powernow_k6_set_state - set the PowerNow! multiplier
+@@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void)
+ static void powernow_k6_set_state(struct cpufreq_policy *policy,
+ unsigned int best_i)
+ {
+- unsigned long outvalue = 0, invalue = 0;
+- unsigned long msrval;
+ struct cpufreq_freqs freqs;
+
+ if (clock_ratio[best_i].driver_data > max_multiplier) {
+@@ -85,18 +150,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+- /* we now need to transform best_i to the BVC format, see AMD#23446 */
+-
+- outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
+-
+- msrval = POWERNOW_IOPORT + 0x1;
+- wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+- invalue = inl(POWERNOW_IOPORT + 0x8);
+- invalue = invalue & 0xf;
+- outvalue = outvalue | invalue;
+- outl(outvalue , (POWERNOW_IOPORT + 0x8));
+- msrval = POWERNOW_IOPORT + 0x0;
+- wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
++ powernow_k6_set_cpu_multiplier(best_i);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+@@ -141,18 +195,57 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
+ return 0;
+ }
+
+-
+ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+ {
+ unsigned int i, f;
+ int result;
++ unsigned khz;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+- /* get frequencies */
+- max_multiplier = powernow_k6_get_cpu_multiplier();
+- busfreq = cpu_khz / max_multiplier;
++ max_multiplier = 0;
++ khz = cpu_khz;
++ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
++ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
++ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
++ khz = usual_frequency_table[i].freq;
++ max_multiplier = usual_frequency_table[i].mult;
++ break;
++ }
++ }
++ if (param_max_multiplier) {
++ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
++ if (clock_ratio[i].driver_data == param_max_multiplier) {
++ max_multiplier = param_max_multiplier;
++ goto have_max_multiplier;
++ }
++ }
++ printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
++ return -EINVAL;
++ }
++
++ if (!max_multiplier) {
++ printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
++ printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
++ return -EOPNOTSUPP;
++ }
++
++have_max_multiplier:
++ param_max_multiplier = max_multiplier;
++
++ if (param_busfreq) {
++ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
++ busfreq = param_busfreq / 10;
++ goto have_busfreq;
++ }
++ printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
++ return -EINVAL;
++ }
++
++ busfreq = khz / max_multiplier;
++have_busfreq:
++ param_busfreq = busfreq * 10;
+
+ /* table init */
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+@@ -164,7 +257,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+ }
+
+ /* cpuinfo and default policy values */
+- policy->cpuinfo.transition_latency = 200000;
++ policy->cpuinfo.transition_latency = 500000;
+ policy->cur = busfreq * max_multiplier;
+
+ result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
+diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+index 86d779a9c245..32bbba0a787b 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
++++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+@@ -233,6 +233,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
+ info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = cdev->mc.vram_size;
+
++ info->fix.smem_start = cdev->dev->mode_config.fb_base;
++ info->fix.smem_len = cdev->mc.vram_size;
++
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
+index 379a47ea99f6..3592616d484b 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
++++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
+@@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
+
+ int cirrus_vga_get_modes(struct drm_connector *connector)
+ {
+- /* Just add a static list of modes */
+- drm_add_modes_noedid(connector, 640, 480);
+- drm_add_modes_noedid(connector, 800, 600);
+- drm_add_modes_noedid(connector, 1024, 768);
+- drm_add_modes_noedid(connector, 1280, 1024);
++ int count;
+
+- return 4;
++ /* Just add a static list of modes */
++ count = drm_add_modes_noedid(connector, 1280, 1024);
++ drm_set_preferred_mode(connector, 1024, 768);
++ return count;
+ }
+
+ static int cirrus_vga_mode_valid(struct drm_connector *connector,
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index d0d3eae05a1a..1cb50268a224 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -3296,6 +3296,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
+ }
+ EXPORT_SYMBOL(drm_add_modes_noedid);
+
++void drm_set_preferred_mode(struct drm_connector *connector,
++ int hpref, int vpref)
++{
++ struct drm_display_mode *mode;
++
++ list_for_each_entry(mode, &connector->probed_modes, head) {
++ if (drm_mode_width(mode) == hpref &&
++ drm_mode_height(mode) == vpref)
++ mode->type |= DRM_MODE_TYPE_PREFERRED;
++ }
++}
++EXPORT_SYMBOL(drm_set_preferred_mode);
++
+ /**
+ * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
+ * data from a DRM display mode
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 3d13ca6e257f..49557c957be8 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1163,6 +1163,7 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
+ {
+ struct drm_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
++ bool prefer_non_interlace;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+@@ -1174,6 +1175,8 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
++ prefer_non_interlace = !cmdline_mode->interlace;
++ again:
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+@@ -1188,10 +1191,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
++ } else if (prefer_non_interlace) {
++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++ continue;
+ }
+ return mode;
+ }
+
++ if (prefer_non_interlace) {
++ prefer_non_interlace = false;
++ goto again;
++ }
++
+ create_mode:
+ mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
+ cmdline_mode);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 9dcf34f9a22d..5aa836e6e190 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -10073,8 +10073,7 @@ static struct intel_quirk intel_quirks[] = {
+ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+- /* 830/845 need to leave pipe A & dpll A up */
+- { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
++ /* 830 needs to leave pipe A & dpll A up */
+ { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+ /* Lenovo U160 cannot use SSC on LVDS */
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 2a2879e53bd5..bbcd2dd653a3 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -226,13 +226,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
+ return !ASIC_IS_NODCE(rdev);
+ }
+
+-static void dce6_audio_enable(struct radeon_device *rdev,
+- struct r600_audio_pin *pin,
+- bool enable)
++void dce6_audio_enable(struct radeon_device *rdev,
++ struct r600_audio_pin *pin,
++ bool enable)
+ {
++ if (!pin)
++ return;
++
+ WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
+ enable ? AUDIO_ENABLED : 0);
+- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+ }
+
+ static const u32 pin_offsets[7] =
+@@ -269,7 +271,8 @@ int dce6_audio_init(struct radeon_device *rdev)
+ rdev->audio.pin[i].connected = false;
+ rdev->audio.pin[i].offset = pin_offsets[i];
+ rdev->audio.pin[i].id = i;
+- dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
++ /* disable audio. it will be set up later */
++ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index b347fffa4519..da4e504b78a4 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -257,6 +257,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
+ return;
+ offset = dig->afmt->offset;
+
++ /* disable audio prior to setting up hw */
++ if (ASIC_IS_DCE6(rdev)) {
++ dig->afmt->pin = dce6_audio_get_pin(rdev);
++ dce6_audio_enable(rdev, dig->afmt->pin, false);
++ } else {
++ dig->afmt->pin = r600_audio_get_pin(rdev);
++ r600_audio_enable(rdev, dig->afmt->pin, false);
++ }
++
+ evergreen_audio_set_dto(encoder, mode->clock);
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+@@ -358,12 +367,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
+ WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
++
++ /* enable audio after to setting up hw */
++ if (ASIC_IS_DCE6(rdev))
++ dce6_audio_enable(rdev, dig->afmt->pin, true);
++ else
++ r600_audio_enable(rdev, dig->afmt->pin, true);
+ }
+
+ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+ {
+- struct drm_device *dev = encoder->dev;
+- struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+@@ -376,15 +389,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+- if (enable) {
+- if (ASIC_IS_DCE6(rdev))
+- dig->afmt->pin = dce6_audio_get_pin(rdev);
+- else
+- dig->afmt->pin = r600_audio_get_pin(rdev);
+- } else {
+- dig->afmt->pin = NULL;
+- }
+-
+ dig->afmt->enabled = enable;
+
+ DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
+index 47fc2b886979..bffac10c4296 100644
+--- a/drivers/gpu/drm/radeon/r600_audio.c
++++ b/drivers/gpu/drm/radeon/r600_audio.c
+@@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work)
+ }
+
+ /* enable the audio stream */
+-static void r600_audio_enable(struct radeon_device *rdev,
+- struct r600_audio_pin *pin,
+- bool enable)
++void r600_audio_enable(struct radeon_device *rdev,
++ struct r600_audio_pin *pin,
++ bool enable)
+ {
+ u32 value = 0;
+
++ if (!pin)
++ return;
++
+ if (ASIC_IS_DCE4(rdev)) {
+ if (enable) {
+ value |= 0x81000000; /* Required to enable audio */
+@@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev,
+ WREG32_P(R600_AUDIO_ENABLE,
+ enable ? 0x81000000 : 0x0, ~0x81000000);
+ }
+- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+ }
+
+ /*
+@@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev)
+ rdev->audio.pin[0].status_bits = 0;
+ rdev->audio.pin[0].category_code = 0;
+ rdev->audio.pin[0].id = 0;
+-
+- r600_audio_enable(rdev, &rdev->audio.pin[0], true);
++ /* disable audio. it will be set up later */
++ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index 7f3b0d9aaada..d38b725563e4 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+ u8 *sadb;
+ int sad_count;
+
+- /* XXX: setting this register causes hangs on some asics */
+- return;
+-
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+@@ -446,6 +443,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
+ return;
+ offset = dig->afmt->offset;
+
++ /* disable audio prior to setting up hw */
++ dig->afmt->pin = r600_audio_get_pin(rdev);
++ r600_audio_enable(rdev, dig->afmt->pin, false);
++
+ r600_audio_set_dto(encoder, mode->clock);
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+@@ -517,6 +518,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
+ WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+
+ r600_hdmi_audio_workaround(encoder);
++
++ /* enable audio after to setting up hw */
++ r600_audio_enable(rdev, dig->afmt->pin, true);
+ }
+
+ /*
+@@ -637,11 +641,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+- if (enable)
+- dig->afmt->pin = r600_audio_get_pin(rdev);
+- else
+- dig->afmt->pin = NULL;
+-
+ /* Older chipsets require setting HDMI and routing manually */
+ if (!ASIC_IS_DCE3(rdev)) {
+ if (enable)
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index f44ca5853ff2..b11433f75578 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -2717,6 +2717,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ void r600_audio_update_hdmi(struct work_struct *work);
+ struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
+ struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
++void r600_audio_enable(struct radeon_device *rdev,
++ struct r600_audio_pin *pin,
++ bool enable);
++void dce6_audio_enable(struct radeon_device *rdev,
++ struct r600_audio_pin *pin,
++ bool enable);
+
+ /*
+ * R600 vram scratch functions
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index 02125e6a9109..5a4da94aefb0 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
+ static void
+ isdnloop_fake_err(isdnloop_card *card)
+ {
+- char buf[60];
++ char buf[64];
+
+- sprintf(buf, "E%s", card->omsg);
++ snprintf(buf, sizeof(buf), "E%s", card->omsg);
+ isdnloop_fake(card, buf, -1);
+ isdnloop_fake(card, "NAK", -1);
+ }
+@@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
+ case 7:
+ /* 0x;EAZ */
+ p += 3;
++ if (strlen(p) >= sizeof(card->eazlist[0]))
++ break;
+ strcpy(card->eazlist[ch - 1], p);
+ break;
+ case 8:
+@@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
+ return -EBUSY;
+ if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
+ return -EFAULT;
++
++ for (i = 0; i < 3; i++) {
++ if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
++ return -EINVAL;
++ }
++
+ spin_lock_irqsave(&card->isdnloop_lock, flags);
+ switch (sdef.ptype) {
+ case ISDN_PTYPE_EURO:
+@@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
+ {
+ ulong a;
+ int i;
+- char cbuf[60];
++ char cbuf[80];
+ isdn_ctrl cmd;
+ isdnloop_cdef cdef;
+
+@@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
+ break;
+ if ((c->arg & 255) < ISDNLOOP_BCH) {
+ char *p;
+- char dial[50];
+ char dcode[4];
+
+ a = c->arg;
+@@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
+ } else
+ /* Normal Dial */
+ strcpy(dcode, "CAL");
+- strcpy(dial, p);
+- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+- dcode, dial, c->parm.setup.si1,
+- c->parm.setup.si2, c->parm.setup.eazmsn);
++ snprintf(cbuf, sizeof(cbuf),
++ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
++ dcode, p, c->parm.setup.si1,
++ c->parm.setup.si2, c->parm.setup.eazmsn);
+ i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
+ }
+ break;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index f428ef574372..71adb692e457 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -694,7 +694,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
+ client_info->ntt = 0;
+ }
+
+- if (!vlan_get_tag(skb, &client_info->vlan_id))
++ if (vlan_get_tag(skb, &client_info->vlan_id))
+ client_info->vlan_id = 0;
+
+ if (!client_info->assigned) {
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index e838a3f74b69..8f9e76d2dd8b 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -2490,6 +2490,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
+
+ bp->fw_wr_seq++;
+ msg_data |= bp->fw_wr_seq;
++ bp->fw_last_msg = msg_data;
+
+ bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+
+@@ -3982,8 +3983,23 @@ bnx2_setup_wol(struct bnx2 *bp)
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+ }
+
+- if (!(bp->flags & BNX2_FLAG_NO_WOL))
+- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
++ if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
++ u32 val;
++
++ wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
++ if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
++ bnx2_fw_sync(bp, wol_msg, 1, 0);
++ return;
++ }
++ /* Tell firmware not to power down the PHY yet, otherwise
++ * the chip will take a long time to respond to MMIO reads.
++ */
++ val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
++ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
++ val | BNX2_PORT_FEATURE_ASF_ENABLED);
++ bnx2_fw_sync(bp, wol_msg, 1, 0);
++ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
++ }
+
+ }
+
+@@ -4015,9 +4031,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+
+ if (bp->wol)
+ pci_set_power_state(bp->pdev, PCI_D3hot);
+- } else {
+- pci_set_power_state(bp->pdev, PCI_D3hot);
++ break;
++
++ }
++ if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
++ u32 val;
++
++ /* Tell firmware not to power down the PHY yet,
++ * otherwise the other port may not respond to
++ * MMIO reads.
++ */
++ val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
++ val &= ~BNX2_CONDITION_PM_STATE_MASK;
++ val |= BNX2_CONDITION_PM_STATE_UNPREP;
++ bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
+ }
++ pci_set_power_state(bp->pdev, PCI_D3hot);
+
+ /* No more memory access after this point until
+ * device is brought back to D0.
+diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
+index 18cb2d23e56b..0eb2a65c35b4 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.h
++++ b/drivers/net/ethernet/broadcom/bnx2.h
+@@ -6890,6 +6890,7 @@ struct bnx2 {
+
+ u16 fw_wr_seq;
+ u16 fw_drv_pulse_wr_seq;
++ u32 fw_last_msg;
+
+ int rx_max_ring;
+ int rx_ring_size;
+@@ -7396,6 +7397,10 @@ struct bnx2_rv2p_fw_file {
+ #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
+ #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
+ #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
++#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
++#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
++#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
++#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
+
+ #define BNX2_BC_STATE_DEBUG_CMD 0x1dc
+ #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 14a50a11d72e..aae7ba66e7bb 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17480,8 +17480,6 @@ static int tg3_init_one(struct pci_dev *pdev,
+
+ tg3_init_bufmgr_config(tp);
+
+- features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+-
+ /* 5700 B0 chips do not support checksumming correctly due
+ * to hardware bugs.
+ */
+@@ -17513,7 +17511,8 @@ static int tg3_init_one(struct pci_dev *pdev,
+ features |= NETIF_F_TSO_ECN;
+ }
+
+- dev->features |= features;
++ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->vlan_features |= features;
+
+ /*
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 63090c0ddeb9..8672547a2a47 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -525,13 +525,6 @@ fec_restart(struct net_device *ndev, int duplex)
+ /* Clear any outstanding interrupt. */
+ writel(0xffc00000, fep->hwp + FEC_IEVENT);
+
+- /* Setup multicast filter. */
+- set_multicast_list(ndev);
+-#ifndef CONFIG_M5272
+- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+-#endif
+-
+ /* Set maximum receive buffer size. */
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+
+@@ -652,6 +645,13 @@ fec_restart(struct net_device *ndev, int duplex)
+
+ writel(rcntl, fep->hwp + FEC_R_CNTRL);
+
++ /* Setup multicast filter. */
++ set_multicast_list(ndev);
++#ifndef CONFIG_M5272
++ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
++ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
++#endif
++
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ /* enable ENET endian swap */
+ ecntl |= (1 << 8);
+diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
+index 727b546a9eb8..e0c92e0e5e1d 100644
+--- a/drivers/net/ethernet/micrel/ks8851.c
++++ b/drivers/net/ethernet/micrel/ks8851.c
+@@ -23,6 +23,7 @@
+ #include <linux/crc32.h>
+ #include <linux/mii.h>
+ #include <linux/eeprom_93cx6.h>
++#include <linux/regulator/consumer.h>
+
+ #include <linux/spi/spi.h>
+
+@@ -83,6 +84,7 @@ union ks8851_tx_hdr {
+ * @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
++ * @vdd_reg: Optional regulator supplying the chip
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+@@ -130,6 +132,7 @@ struct ks8851_net {
+ struct spi_transfer spi_xfer2[2];
+
+ struct eeprom_93cx6 eeprom;
++ struct regulator *vdd_reg;
+ };
+
+ static int msg_enable;
+@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
+ ks->spidev = spi;
+ ks->tx_space = 6144;
+
++ ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
++ if (IS_ERR(ks->vdd_reg)) {
++ ret = PTR_ERR(ks->vdd_reg);
++ if (ret == -EPROBE_DEFER)
++ goto err_reg;
++ } else {
++ ret = regulator_enable(ks->vdd_reg);
++ if (ret) {
++ dev_err(&spi->dev, "regulator enable fail: %d\n",
++ ret);
++ goto err_reg_en;
++ }
++ }
++
++
+ mutex_init(&ks->lock);
+ spin_lock_init(&ks->statelock);
+
+@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
+ err_netdev:
+ free_irq(ndev->irq, ks);
+
+-err_id:
+ err_irq:
++err_id:
++ if (!IS_ERR(ks->vdd_reg))
++ regulator_disable(ks->vdd_reg);
++err_reg_en:
++ if (!IS_ERR(ks->vdd_reg))
++ regulator_put(ks->vdd_reg);
++err_reg:
+ free_netdev(ndev);
+ return ret;
+ }
+@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
+
+ unregister_netdev(priv->netdev);
+ free_irq(spi->irq, priv);
++ if (!IS_ERR(priv->vdd_reg)) {
++ regulator_disable(priv->vdd_reg);
++ regulator_put(priv->vdd_reg);
++ }
+ free_netdev(priv->netdev);
+
+ return 0;
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index a91fa49b81c3..1d4da74595f9 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -753,14 +753,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
+ // precondition: never called in_interrupt
+ static void usbnet_terminate_urbs(struct usbnet *dev)
+ {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+ DECLARE_WAITQUEUE(wait, current);
+ int temp;
+
+ /* ensure there are no more active urbs */
+- add_wait_queue(&unlink_wakeup, &wait);
++ add_wait_queue(&dev->wait, &wait);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- dev->wait = &unlink_wakeup;
+ temp = unlink_urbs(dev, &dev->txq) +
+ unlink_urbs(dev, &dev->rxq);
+
+@@ -774,15 +772,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
+ "waited for %d urb completions\n", temp);
+ }
+ set_current_state(TASK_RUNNING);
+- dev->wait = NULL;
+- remove_wait_queue(&unlink_wakeup, &wait);
++ remove_wait_queue(&dev->wait, &wait);
+ }
+
+ int usbnet_stop (struct net_device *net)
+ {
+ struct usbnet *dev = netdev_priv(net);
+ struct driver_info *info = dev->driver_info;
+- int retval;
++ int retval, pm;
+
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
+ netif_stop_queue (net);
+@@ -792,6 +789,8 @@ int usbnet_stop (struct net_device *net)
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors);
+
++ /* to not race resume */
++ pm = usb_autopm_get_interface(dev->intf);
+ /* allow minidriver to stop correctly (wireless devices to turn off
+ * radio etc) */
+ if (info->stop) {
+@@ -818,6 +817,9 @@ int usbnet_stop (struct net_device *net)
+ dev->flags = 0;
+ del_timer_sync (&dev->delay);
+ tasklet_kill (&dev->bh);
++ if (!pm)
++ usb_autopm_put_interface(dev->intf);
++
+ if (info->manage_power &&
+ !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+ info->manage_power(dev, 0);
+@@ -1438,11 +1440,12 @@ static void usbnet_bh (unsigned long param)
+ /* restart RX again after disabling due to high error rate */
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
+- // waiting for all pending urbs to complete?
+- if (dev->wait) {
+- if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
+- wake_up (dev->wait);
+- }
++ /* waiting for all pending urbs to complete?
++ * only then can we forgo submitting anew
++ */
++ if (waitqueue_active(&dev->wait)) {
++ if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
++ wake_up_all(&dev->wait);
+
+ // or are we maybe short a few urbs?
+ } else if (netif_running (dev->net) &&
+@@ -1581,6 +1584,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ dev->driver_name = name;
+ dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
+ | NETIF_MSG_PROBE | NETIF_MSG_LINK);
++ init_waitqueue_head(&dev->wait);
+ skb_queue_head_init (&dev->rxq);
+ skb_queue_head_init (&dev->txq);
+ skb_queue_head_init (&dev->done);
+@@ -1792,9 +1796,10 @@ int usbnet_resume (struct usb_interface *intf)
+ spin_unlock_irq(&dev->txq.lock);
+
+ if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+- /* handle remote wakeup ASAP */
+- if (!dev->wait &&
+- netif_device_present(dev->net) &&
++ /* handle remote wakeup ASAP
++ * we cannot race against stop
++ */
++ if (netif_device_present(dev->net) &&
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags))
+ rx_alloc_submit(dev, GFP_NOIO);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 32c45c3d820d..4ecdf3c22bc6 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -781,6 +781,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ if (err)
+ return err;
+
++ if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
++ return -EAFNOSUPPORT;
++
+ spin_lock_bh(&vxlan->hash_lock);
+ err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+ port, vni, ifindex, ndm->ndm_flags);
+@@ -1212,6 +1215,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
+
+ neigh_release(n);
+
++ if (reply == NULL)
++ goto out;
++
+ skb_reset_mac_header(reply);
+ __skb_pull(reply, skb_network_offset(reply));
+ reply->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -1233,15 +1239,103 @@ out:
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
++
++static struct sk_buff *vxlan_na_create(struct sk_buff *request,
++ struct neighbour *n, bool isrouter)
++{
++ struct net_device *dev = request->dev;
++ struct sk_buff *reply;
++ struct nd_msg *ns, *na;
++ struct ipv6hdr *pip6;
++ u8 *daddr;
++ int na_olen = 8; /* opt hdr + ETH_ALEN for target */
++ int ns_olen;
++ int i, len;
++
++ if (dev == NULL)
++ return NULL;
++
++ len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
++ sizeof(*na) + na_olen + dev->needed_tailroom;
++ reply = alloc_skb(len, GFP_ATOMIC);
++ if (reply == NULL)
++ return NULL;
++
++ reply->protocol = htons(ETH_P_IPV6);
++ reply->dev = dev;
++ skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
++ skb_push(reply, sizeof(struct ethhdr));
++ skb_set_mac_header(reply, 0);
++
++ ns = (struct nd_msg *)skb_transport_header(request);
++
++ daddr = eth_hdr(request)->h_source;
++ ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
++ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
++ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
++ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
++ break;
++ }
++ }
++
++ /* Ethernet header */
++ memcpy(eth_hdr(reply)->h_dest, daddr, ETH_ALEN);
++ memcpy(eth_hdr(reply)->h_source, n->ha, ETH_ALEN);
++ eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
++ reply->protocol = htons(ETH_P_IPV6);
++
++ skb_pull(reply, sizeof(struct ethhdr));
++ skb_set_network_header(reply, 0);
++ skb_put(reply, sizeof(struct ipv6hdr));
++
++ /* IPv6 header */
++
++ pip6 = ipv6_hdr(reply);
++ memset(pip6, 0, sizeof(struct ipv6hdr));
++ pip6->version = 6;
++ pip6->priority = ipv6_hdr(request)->priority;
++ pip6->nexthdr = IPPROTO_ICMPV6;
++ pip6->hop_limit = 255;
++ pip6->daddr = ipv6_hdr(request)->saddr;
++ pip6->saddr = *(struct in6_addr *)n->primary_key;
++
++ skb_pull(reply, sizeof(struct ipv6hdr));
++ skb_set_transport_header(reply, 0);
++
++ na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
++
++ /* Neighbor Advertisement */
++ memset(na, 0, sizeof(*na)+na_olen);
++ na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
++ na->icmph.icmp6_router = isrouter;
++ na->icmph.icmp6_override = 1;
++ na->icmph.icmp6_solicited = 1;
++ na->target = ns->target;
++ memcpy(&na->opt[2], n->ha, ETH_ALEN);
++ na->opt[0] = ND_OPT_TARGET_LL_ADDR;
++ na->opt[1] = na_olen >> 3;
++
++ na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
++ &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
++ csum_partial(na, sizeof(*na)+na_olen, 0));
++
++ pip6->payload_len = htons(sizeof(*na)+na_olen);
++
++ skb_push(reply, sizeof(struct ipv6hdr));
++
++ reply->ip_summed = CHECKSUM_UNNECESSARY;
++
++ return reply;
++}
++
+ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+- struct neighbour *n;
+- union vxlan_addr ipa;
++ struct nd_msg *msg;
+ const struct ipv6hdr *iphdr;
+ const struct in6_addr *saddr, *daddr;
+- struct nd_msg *msg;
+- struct inet6_dev *in6_dev = NULL;
++ struct neighbour *n;
++ struct inet6_dev *in6_dev;
+
+ in6_dev = __in6_dev_get(dev);
+ if (!in6_dev)
+@@ -1254,19 +1348,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ saddr = &iphdr->saddr;
+ daddr = &iphdr->daddr;
+
+- if (ipv6_addr_loopback(daddr) ||
+- ipv6_addr_is_multicast(daddr))
+- goto out;
+-
+ msg = (struct nd_msg *)skb_transport_header(skb);
+ if (msg->icmph.icmp6_code != 0 ||
+ msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
+ goto out;
+
+- n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
++ if (ipv6_addr_loopback(daddr) ||
++ ipv6_addr_is_multicast(&msg->target))
++ goto out;
++
++ n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
+
+ if (n) {
+ struct vxlan_fdb *f;
++ struct sk_buff *reply;
+
+ if (!(n->nud_state & NUD_CONNECTED)) {
+ neigh_release(n);
+@@ -1280,13 +1375,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ goto out;
+ }
+
+- ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
+- !!in6_dev->cnf.forwarding,
+- true, false, false);
++ reply = vxlan_na_create(skb, n,
++ !!(f ? f->flags & NTF_ROUTER : 0));
++
+ neigh_release(n);
++
++ if (reply == NULL)
++ goto out;
++
++ if (netif_rx_ni(reply) == NET_RX_DROP)
++ dev->stats.rx_dropped++;
++
+ } else if (vxlan->flags & VXLAN_F_L3MISS) {
+- ipa.sin6.sin6_addr = *daddr;
+- ipa.sa.sa_family = AF_INET6;
++ union vxlan_addr ipa = {
++ .sin6.sin6_addr = msg->target,
++ .sa.sa_family = AF_INET6,
++ };
++
+ vxlan_ip_miss(dev, &ipa);
+ }
+
+@@ -2383,9 +2488,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
+ vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+ dst->remote_vni = vni;
+
++ /* Unless IPv6 is explicitly requested, assume IPv4 */
++ dst->remote_ip.sa.sa_family = AF_INET;
+ if (data[IFLA_VXLAN_GROUP]) {
+ dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+- dst->remote_ip.sa.sa_family = AF_INET;
+ } else if (data[IFLA_VXLAN_GROUP6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 400fea1de080..a7501cb9b53b 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -102,6 +102,11 @@ struct xenvif {
+ domid_t domid;
+ unsigned int handle;
+
++ /* Is this interface disabled? True when backend discovers
++ * frontend is rogue.
++ */
++ bool disabled;
++
+ /* Use NAPI for guest TX */
+ struct napi_struct napi;
+ /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 459935a6bfae..adfe46068581 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -66,6 +66,15 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
+ struct xenvif *vif = container_of(napi, struct xenvif, napi);
+ int work_done;
+
++ /* This vif is rogue, we pretend we've there is nothing to do
++ * for this vif to deschedule it from NAPI. But this interface
++ * will be turned off in thread context later.
++ */
++ if (unlikely(vif->disabled)) {
++ napi_complete(napi);
++ return 0;
++ }
++
+ work_done = xenvif_tx_action(vif, budget);
+
+ if (work_done < budget) {
+@@ -309,6 +318,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ vif->csum = 1;
+ vif->dev = dev;
+
++ vif->disabled = false;
++
+ vif->credit_bytes = vif->remaining_credit = ~0UL;
+ vif->credit_usec = 0UL;
+ init_timer(&vif->credit_timeout);
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 625585034ef4..a1186533cee8 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -206,8 +206,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+ * into multiple copies tend to give large frags their
+ * own buffers as before.
+ */
+- if ((offset + size > MAX_BUFFER_OFFSET) &&
+- (size <= MAX_BUFFER_OFFSET) && offset && !head)
++ BUG_ON(size > MAX_BUFFER_OFFSET);
++ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+ return true;
+
+ return false;
+@@ -731,7 +731,8 @@ static void xenvif_tx_err(struct xenvif *vif,
+ static void xenvif_fatal_tx_err(struct xenvif *vif)
+ {
+ netdev_err(vif->dev, "fatal error; disabling device\n");
+- xenvif_carrier_off(vif);
++ vif->disabled = true;
++ xenvif_kick_thread(vif);
+ }
+
+ static int xenvif_count_requests(struct xenvif *vif,
+@@ -1242,7 +1243,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
+ vif->tx.sring->req_prod, vif->tx.req_cons,
+ XEN_NETIF_TX_RING_SIZE);
+ xenvif_fatal_tx_err(vif);
+- continue;
++ break;
+ }
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+@@ -1642,7 +1643,18 @@ int xenvif_kthread(void *data)
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(vif->wq,
+ rx_work_todo(vif) ||
++ vif->disabled ||
+ kthread_should_stop());
++
++ /* This frontend is found to be rogue, disable it in
++ * kthread context. Currently this is only set when
++ * netback finds out frontend sends malformed packet,
++ * but we cannot disable the interface in softirq
++ * context so we defer it here.
++ */
++ if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
++ xenvif_carrier_off(vif);
++
+ if (kthread_should_stop())
+ break;
+
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index 1953c1680986..8efd11dafd44 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -866,11 +866,23 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
+ continue;
+ }
+
++ port->clk = of_clk_get_by_name(child, NULL);
++ if (IS_ERR(port->clk)) {
++ dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
++ port->port, port->lane);
++ continue;
++ }
++
++ ret = clk_prepare_enable(port->clk);
++ if (ret)
++ continue;
++
+ port->base = mvebu_pcie_map_registers(pdev, child, port);
+ if (IS_ERR(port->base)) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
+ port->port, port->lane);
+ port->base = NULL;
++ clk_disable_unprepare(port->clk);
+ continue;
+ }
+
+@@ -886,22 +898,9 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
+ port->port, port->lane);
+ }
+
+- port->clk = of_clk_get_by_name(child, NULL);
+- if (IS_ERR(port->clk)) {
+- dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
+- port->port, port->lane);
+- iounmap(port->base);
+- port->haslink = 0;
+- continue;
+- }
+-
+ port->dn = child;
+-
+- clk_prepare_enable(port->clk);
+ spin_lock_init(&port->conf_lock);
+-
+ mvebu_sw_pci_bridge_init(port);
+-
+ i++;
+ }
+
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index b12176f2013c..5264d839474a 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -501,9 +501,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
+ r = -ENOBUFS;
+ goto err;
+ }
+- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
++ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ ARRAY_SIZE(vq->iov) - seg, &out,
+ &in, log, log_num);
++ if (unlikely(r < 0))
++ goto err;
++
++ d = r;
+ if (d == vq->num) {
+ r = 0;
+ goto err;
+@@ -528,6 +532,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
+ *iovcount = seg;
+ if (unlikely(log))
+ *log_num = nlogs;
++
++ /* Detect overrun */
++ if (unlikely(datalen > 0)) {
++ r = UIO_MAXIOV + 1;
++ goto err;
++ }
+ return headcount;
+ err:
+ vhost_discard_vq_desc(vq, headcount);
+@@ -583,6 +593,14 @@ static void handle_rx(struct vhost_net *net)
+ /* On error, stop handling until the next kick. */
+ if (unlikely(headcount < 0))
+ break;
++ /* On overrun, truncate and discard */
++ if (unlikely(headcount > UIO_MAXIOV)) {
++ msg.msg_iovlen = 1;
++ err = sock->ops->recvmsg(NULL, sock, &msg,
++ 1, MSG_DONTWAIT | MSG_TRUNC);
++ pr_debug("Discarded rx packet: len %zd\n", sock_len);
++ continue;
++ }
+ /* OK, now we need to know about added descriptors. */
+ if (!headcount) {
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index 8659eb160b4d..b6d5008f361f 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1108,14 +1108,16 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ case FBIOPUT_VSCREENINFO:
+ if (copy_from_user(&var, argp, sizeof(var)))
+ return -EFAULT;
+- if (!lock_fb_info(info))
+- return -ENODEV;
+ console_lock();
++ if (!lock_fb_info(info)) {
++ console_unlock();
++ return -ENODEV;
++ }
+ info->flags |= FBINFO_MISC_USEREVENT;
+ ret = fb_set_var(info, &var);
+ info->flags &= ~FBINFO_MISC_USEREVENT;
+- console_unlock();
+ unlock_fb_info(info);
++ console_unlock();
+ if (!ret && copy_to_user(argp, &var, sizeof(var)))
+ ret = -EFAULT;
+ break;
+@@ -1144,12 +1146,14 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ case FBIOPAN_DISPLAY:
+ if (copy_from_user(&var, argp, sizeof(var)))
+ return -EFAULT;
+- if (!lock_fb_info(info))
+- return -ENODEV;
+ console_lock();
++ if (!lock_fb_info(info)) {
++ console_unlock();
++ return -ENODEV;
++ }
+ ret = fb_pan_display(info, &var);
+- console_unlock();
+ unlock_fb_info(info);
++ console_unlock();
+ if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
+ return -EFAULT;
+ break;
+@@ -1184,23 +1188,27 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ break;
+ }
+ event.data = &con2fb;
+- if (!lock_fb_info(info))
+- return -ENODEV;
+ console_lock();
++ if (!lock_fb_info(info)) {
++ console_unlock();
++ return -ENODEV;
++ }
+ event.info = info;
+ ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
+- console_unlock();
+ unlock_fb_info(info);
++ console_unlock();
+ break;
+ case FBIOBLANK:
+- if (!lock_fb_info(info))
+- return -ENODEV;
+ console_lock();
++ if (!lock_fb_info(info)) {
++ console_unlock();
++ return -ENODEV;
++ }
+ info->flags |= FBINFO_MISC_USEREVENT;
+ ret = fb_blank(info, arg);
+ info->flags &= ~FBINFO_MISC_USEREVENT;
+- console_unlock();
+ unlock_fb_info(info);
++ console_unlock();
+ break;
+ default:
+ if (!lock_fb_info(info))
+@@ -1569,10 +1577,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
+ static int do_unregister_framebuffer(struct fb_info *fb_info);
+
+ #define VGA_FB_PHYS 0xA0000
+-static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
+- const char *name, bool primary)
++static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
++ const char *name, bool primary)
+ {
+- int i;
++ int i, ret;
+
+ /* check all firmware fbs and kick off if the base addr overlaps */
+ for (i = 0 ; i < FB_MAX; i++) {
+@@ -1588,25 +1596,31 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
+ (primary && gen_aper && gen_aper->count &&
+ gen_aper->ranges[0].base == VGA_FB_PHYS)) {
+
+- printk(KERN_INFO "fb: conflicting fb hw usage "
+- "%s vs %s - removing generic driver\n",
++ printk(KERN_INFO "fb: switching to %s from %s\n",
+ name, registered_fb[i]->fix.id);
+- do_unregister_framebuffer(registered_fb[i]);
++ ret = do_unregister_framebuffer(registered_fb[i]);
++ if (ret)
++ return ret;
+ }
+ }
++
++ return 0;
+ }
+
+ static int do_register_framebuffer(struct fb_info *fb_info)
+ {
+- int i;
++ int i, ret;
+ struct fb_event event;
+ struct fb_videomode mode;
+
+ if (fb_check_foreignness(fb_info))
+ return -ENOSYS;
+
+- do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
+- fb_is_primary_device(fb_info));
++ ret = do_remove_conflicting_framebuffers(fb_info->apertures,
++ fb_info->fix.id,
++ fb_is_primary_device(fb_info));
++ if (ret)
++ return ret;
+
+ if (num_registered_fb == FB_MAX)
+ return -ENXIO;
+@@ -1660,12 +1674,15 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ registered_fb[i] = fb_info;
+
+ event.info = fb_info;
+- if (!lock_fb_info(fb_info))
+- return -ENODEV;
+ console_lock();
++ if (!lock_fb_info(fb_info)) {
++ console_unlock();
++ return -ENODEV;
++ }
++
+ fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
+- console_unlock();
+ unlock_fb_info(fb_info);
++ console_unlock();
+ return 0;
+ }
+
+@@ -1678,13 +1695,16 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
+ return -EINVAL;
+
+- if (!lock_fb_info(fb_info))
+- return -ENODEV;
+ console_lock();
++ if (!lock_fb_info(fb_info)) {
++ console_unlock();
++ return -ENODEV;
++ }
++
+ event.info = fb_info;
+ ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+- console_unlock();
+ unlock_fb_info(fb_info);
++ console_unlock();
+
+ if (ret)
+ return -EINVAL;
+@@ -1725,12 +1745,16 @@ int unlink_framebuffer(struct fb_info *fb_info)
+ }
+ EXPORT_SYMBOL(unlink_framebuffer);
+
+-void remove_conflicting_framebuffers(struct apertures_struct *a,
+- const char *name, bool primary)
++int remove_conflicting_framebuffers(struct apertures_struct *a,
++ const char *name, bool primary)
+ {
++ int ret;
++
+ mutex_lock(&registration_lock);
+- do_remove_conflicting_framebuffers(a, name, primary);
++ ret = do_remove_conflicting_framebuffers(a, name, primary);
+ mutex_unlock(&registration_lock);
++
++ return ret;
+ }
+ EXPORT_SYMBOL(remove_conflicting_framebuffers);
+
+diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
+index ef476b02fbe5..53444ac19fe0 100644
+--- a/drivers/video/fbsysfs.c
++++ b/drivers/video/fbsysfs.c
+@@ -177,9 +177,12 @@ static ssize_t store_modes(struct device *device,
+ if (i * sizeof(struct fb_videomode) != count)
+ return -EINVAL;
+
+- if (!lock_fb_info(fb_info))
+- return -ENODEV;
+ console_lock();
++ if (!lock_fb_info(fb_info)) {
++ console_unlock();
++ return -ENODEV;
++ }
++
+ list_splice(&fb_info->modelist, &old_list);
+ fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
+ &fb_info->modelist);
+@@ -189,8 +192,8 @@ static ssize_t store_modes(struct device *device,
+ } else
+ fb_destroy_modelist(&old_list);
+
+- console_unlock();
+ unlock_fb_info(fb_info);
++ console_unlock();
+
+ return 0;
+ }
+@@ -404,12 +407,16 @@ static ssize_t store_fbstate(struct device *device,
+
+ state = simple_strtoul(buf, &last, 0);
+
+- if (!lock_fb_info(fb_info))
+- return -ENODEV;
+ console_lock();
++ if (!lock_fb_info(fb_info)) {
++ console_unlock();
++ return -ENODEV;
++ }
++
+ fb_set_suspend(fb_info, (int)state);
+- console_unlock();
++
+ unlock_fb_info(fb_info);
++ console_unlock();
+
+ return count;
+ }
+diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
+index 0264704a52be..45d031233253 100644
+--- a/drivers/video/sh_mobile_lcdcfb.c
++++ b/drivers/video/sh_mobile_lcdcfb.c
+@@ -574,8 +574,9 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
+ switch (event) {
+ case SH_MOBILE_LCDC_EVENT_DISPLAY_CONNECT:
+ /* HDMI plug in */
++ console_lock();
+ if (lock_fb_info(info)) {
+- console_lock();
++
+
+ ch->display.width = monspec->max_x * 10;
+ ch->display.height = monspec->max_y * 10;
+@@ -594,19 +595,20 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
+ fb_set_suspend(info, 0);
+ }
+
+- console_unlock();
++
+ unlock_fb_info(info);
+ }
++ console_unlock();
+ break;
+
+ case SH_MOBILE_LCDC_EVENT_DISPLAY_DISCONNECT:
+ /* HDMI disconnect */
++ console_lock();
+ if (lock_fb_info(info)) {
+- console_lock();
+ fb_set_suspend(info, 1);
+- console_unlock();
+ unlock_fb_info(info);
+ }
++ console_unlock();
+ break;
+
+ case SH_MOBILE_LCDC_EVENT_DISPLAY_MODE:
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 786bf0708904..f173ef12c97a 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4447,7 +4447,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
+ return -EIO;
+ }
+
+- if (wbc->sync_mode != WB_SYNC_ALL)
++ /*
++ * No need to force transaction in WB_SYNC_NONE mode. Also
++ * ext4_sync_fs() will force the commit after everything is
++ * written.
++ */
++ if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
+ return 0;
+
+ err = ext4_force_commit(inode->i_sb);
+@@ -4457,7 +4462,11 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
+ err = __ext4_get_inode_loc(inode, &iloc, 0);
+ if (err)
+ return err;
+- if (wbc->sync_mode == WB_SYNC_ALL)
++ /*
++ * sync(2) will flush the whole buffer cache. No need to do
++ * it here separately for each inode.
++ */
++ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
+ sync_dirty_buffer(iloc.bh);
+ if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
+ EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
+diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
+index 4a1aafba6a20..4612291e7cc0 100644
+--- a/fs/nfs/nfs3acl.c
++++ b/fs/nfs/nfs3acl.c
+@@ -289,8 +289,8 @@ getout:
+ return acl;
+ }
+
+-static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+- struct posix_acl *dfacl)
++static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
++ struct posix_acl *dfacl)
+ {
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_fattr *fattr;
+@@ -373,6 +373,15 @@ out:
+ return status;
+ }
+
++int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
++ struct posix_acl *dfacl)
++{
++ int ret;
++ ret = __nfs3_proc_setacls(inode, acl, dfacl);
++ return (ret == -EOPNOTSUPP) ? 0 : ret;
++
++}
++
+ int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl)
+ {
+ struct posix_acl *alloc = NULL, *dfacl = NULL;
+@@ -406,7 +415,7 @@ int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl)
+ if (IS_ERR(alloc))
+ goto fail;
+ }
+- status = nfs3_proc_setacls(inode, acl, dfacl);
++ status = __nfs3_proc_setacls(inode, acl, dfacl);
+ posix_acl_release(alloc);
+ return status;
+
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index b2f842d0901b..1c2beb18a713 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -3405,7 +3405,7 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
+ {
+ __be32 *p;
+
+- *res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL;
++ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
+diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+index 24f499569a2f..ec5d737f93c5 100644
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -1108,6 +1108,8 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+ int GTF_2C, int GTF_K, int GTF_2J);
+ extern int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
++extern void drm_set_preferred_mode(struct drm_connector *connector,
++ int hpref, int vpref);
+
+ extern int drm_edid_header_is_valid(const u8 *raw_edid);
+ extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index ffac70aab3e9..8439a1600c1a 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -613,8 +613,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
+ extern int register_framebuffer(struct fb_info *fb_info);
+ extern int unregister_framebuffer(struct fb_info *fb_info);
+ extern int unlink_framebuffer(struct fb_info *fb_info);
+-extern void remove_conflicting_framebuffers(struct apertures_struct *a,
+- const char *name, bool primary);
++extern int remove_conflicting_framebuffers(struct apertures_struct *a,
++ const char *name, bool primary);
+ extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
+ extern int fb_show_logo(struct fb_info *fb_info, int rotate);
+ extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
+diff --git a/include/linux/futex.h b/include/linux/futex.h
+index b0d95cac826e..6435f46d6e13 100644
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -55,7 +55,11 @@ union futex_key {
+ #ifdef CONFIG_FUTEX
+ extern void exit_robust_list(struct task_struct *curr);
+ extern void exit_pi_state_list(struct task_struct *curr);
++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
++#define futex_cmpxchg_enabled 1
++#else
+ extern int futex_cmpxchg_enabled;
++#endif
+ #else
+ static inline void exit_robust_list(struct task_struct *curr)
+ {
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index e303eef94dd5..0662e98fef72 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -30,7 +30,7 @@ struct usbnet {
+ struct driver_info *driver_info;
+ const char *driver_name;
+ void *driver_priv;
+- wait_queue_head_t *wait;
++ wait_queue_head_t wait;
+ struct mutex phy_mutex;
+ unsigned char suspend_count;
+ unsigned char pkt_cnt, pkt_err;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 808cbc2ec6c1..6e2c4901a477 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1459,6 +1459,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+ */
+ #define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
+
++static inline void sock_release_ownership(struct sock *sk)
++{
++ sk->sk_lock.owned = 0;
++}
++
+ /*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 51dcc6faa561..31c48908ae32 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -484,20 +484,21 @@ extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ #ifdef CONFIG_SYN_COOKIES
+ #include <linux/ktime.h>
+
+-/* Syncookies use a monotonic timer which increments every 64 seconds.
++/* Syncookies use a monotonic timer which increments every 60 seconds.
+ * This counter is used both as a hash input and partially encoded into
+ * the cookie value. A cookie is only validated further if the delta
+ * between the current counter value and the encoded one is less than this,
+- * i.e. a sent cookie is valid only at most for 128 seconds (or less if
++ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
+ * the counter advances immediately after a cookie is generated).
+ */
+ #define MAX_SYNCOOKIE_AGE 2
+
+ static inline u32 tcp_cookie_time(void)
+ {
+- struct timespec now;
+- getnstimeofday(&now);
+- return now.tv_sec >> 6; /* 64 seconds granularity */
++ u64 val = get_jiffies_64();
++
++ do_div(val, 60 * HZ);
++ return val;
+ }
+
+ extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
+diff --git a/include/uapi/linux/fd.h b/include/uapi/linux/fd.h
+index f1f3dd5981b2..84c517cbce90 100644
+--- a/include/uapi/linux/fd.h
++++ b/include/uapi/linux/fd.h
+@@ -185,7 +185,8 @@ enum {
+ * to clear media change status */
+ FD_UNUSED_BIT,
+ FD_DISK_CHANGED_BIT, /* disk has been changed since last i/o */
+- FD_DISK_WRITABLE_BIT /* disk is writable */
++ FD_DISK_WRITABLE_BIT, /* disk is writable */
++ FD_OPEN_SHOULD_FAIL_BIT
+ };
+
+ #define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
+diff --git a/init/Kconfig b/init/Kconfig
+index 3ecd8a1178f1..d42dc7c6ba64 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1406,6 +1406,13 @@ config FUTEX
+ support for "fast userspace mutexes". The resulting kernel may not
+ run glibc-based applications correctly.
+
++config HAVE_FUTEX_CMPXCHG
++ bool
++ help
++ Architectures should select this if futex_atomic_cmpxchg_inatomic()
++ is implemented and always working. This removes a couple of runtime
++ checks.
++
+ config EPOLL
+ bool "Enable eventpoll support" if EXPERT
+ default y
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 231754863a87..d8347b7a064f 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -68,7 +68,9 @@
+
+ #include "rtmutex_common.h"
+
++#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+ int __read_mostly futex_cmpxchg_enabled;
++#endif
+
+ #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
+
+@@ -2731,10 +2733,10 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+ return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
+ }
+
+-static int __init futex_init(void)
++static void __init futex_detect_cmpxchg(void)
+ {
++#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+ u32 curval;
+- int i;
+
+ /*
+ * This will fail and we want it. Some arch implementations do
+@@ -2748,6 +2750,14 @@ static int __init futex_init(void)
+ */
+ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+ futex_cmpxchg_enabled = 1;
++#endif
++}
++
++static int __init futex_init(void)
++{
++ int i;
++
++ futex_detect_cmpxchg();
+
+ for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+ plist_head_init(&futex_queues[i].chain);
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index 18eca7809b08..fc6754720ced 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
+ */
+ int nla_strcmp(const struct nlattr *nla, const char *str)
+ {
+- int len = strlen(str) + 1;
+- int d = nla_len(nla) - len;
++ int len = strlen(str);
++ char *buf = nla_data(nla);
++ int attrlen = nla_len(nla);
++ int d;
+
++ if (attrlen > 0 && buf[attrlen - 1] == '\0')
++ attrlen--;
++
++ d = attrlen - len;
+ if (d == 0)
+ d = memcmp(nla_data(nla), str, len);
+
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 61fc573f1142..856499fdb10f 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
+ static void vlan_transfer_features(struct net_device *dev,
+ struct net_device *vlandev)
+ {
++ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
++
+ vlandev->gso_max_size = dev->gso_max_size;
+
+- if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
++ if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
+ vlandev->hard_header_len = dev->hard_header_len;
+ else
+ vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index edf44d079da7..d1537dcd4df8 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -557,6 +557,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
++ if (saddr == NULL)
++ saddr = dev->dev_addr;
++
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+ }
+
+@@ -608,7 +611,8 @@ static int vlan_dev_init(struct net_device *dev)
+ #endif
+
+ dev->needed_headroom = real_dev->needed_headroom;
+- if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
++ if (vlan_hw_offload_capable(real_dev->features,
++ vlan_dev_priv(dev)->vlan_proto)) {
+ dev->header_ops = &vlan_passthru_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len;
+ } else {
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 1b148a3affa7..162d6c78ad05 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1129,9 +1129,10 @@ static void br_multicast_query_received(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct bridge_mcast_querier *querier,
+ int saddr,
++ bool is_general_query,
+ unsigned long max_delay)
+ {
+- if (saddr)
++ if (saddr && is_general_query)
+ br_multicast_update_querier_timer(br, querier, max_delay);
+ else if (timer_pending(&querier->timer))
+ return;
+@@ -1183,8 +1184,16 @@ static int br_ip4_multicast_query(struct net_bridge *br,
+ IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
+ }
+
++ /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
++ * all-systems destination addresses (224.0.0.1) for general queries
++ */
++ if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
+- max_delay);
++ !group, max_delay);
+
+ if (!group)
+ goto out;
+@@ -1230,6 +1239,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ unsigned long max_delay;
+ unsigned long now = jiffies;
+ const struct in6_addr *group = NULL;
++ bool is_general_query;
+ int err = 0;
+ u16 vid = 0;
+
+@@ -1238,6 +1248,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ (port && port->state == BR_STATE_DISABLED))
+ goto out;
+
++ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
++ if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ if (skb->len == sizeof(*mld)) {
+ if (!pskb_may_pull(skb, sizeof(*mld))) {
+ err = -EINVAL;
+@@ -1259,8 +1275,19 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
+ }
+
++ is_general_query = group && ipv6_addr_any(group);
++
++ /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
++ * all-nodes destination address (ff02::1) for general queries
++ */
++ if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ br_multicast_query_received(br, port, &br->ip6_querier,
+- !ipv6_addr_any(&ip6h->saddr), max_delay);
++ !ipv6_addr_any(&ip6h->saddr),
++ is_general_query, max_delay);
+
+ if (!group)
+ goto out;
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 462cdc97fad8..9b40f234b802 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -740,7 +740,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
+ struct nd_msg *msg;
+ struct ipv6hdr *hdr;
+
+- if (skb->protocol != htons(ETH_P_ARP))
++ if (skb->protocol != htons(ETH_P_IPV6))
+ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
+ return false;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 2a0e21de3060..37b492eaa4f8 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2014,12 +2014,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
+ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
+ struct net_device *dev,
+ u8 *addr, u32 pid, u32 seq,
+- int type, unsigned int flags)
++ int type, unsigned int flags,
++ int nlflags)
+ {
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
++ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+@@ -2057,7 +2058,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
+ if (!skb)
+ goto errout;
+
+- err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
++ err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto errout;
+@@ -2282,7 +2283,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
+
+ err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
+ portid, seq,
+- RTM_NEWNEIGH, NTF_SELF);
++ RTM_NEWNEIGH, NTF_SELF,
++ NLM_F_MULTI);
+ if (err < 0)
+ return err;
+ skip:
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 831a0d0af49f..ec228a30e7dc 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2359,10 +2359,13 @@ void release_sock(struct sock *sk)
+ if (sk->sk_backlog.tail)
+ __release_sock(sk);
+
++ /* Warning : release_cb() might need to release sk ownership,
++ * ie call sock_release_ownership(sk) before us.
++ */
+ if (sk->sk_prot->release_cb)
+ sk->sk_prot->release_cb(sk);
+
+- sk->sk_lock.owned = 0;
++ sock_release_ownership(sk);
+ if (waitqueue_active(&sk->sk_lock.wq))
+ wake_up(&sk->sk_lock.wq);
+ spin_unlock_bh(&sk->sk_lock.slock);
+diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
+index 736c9fc3ef93..0c0c1f09fd17 100644
+--- a/net/ipv4/gre_demux.c
++++ b/net/ipv4/gre_demux.c
+@@ -211,6 +211,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
+ int i;
+ bool csum_err = false;
+
++#ifdef CONFIG_NET_IPGRE_BROADCAST
++ if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
++ /* Looped back packet, drop it! */
++ if (rt_is_output_route(skb_rtable(skb)))
++ goto drop;
++ }
++#endif
++
+ if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+ goto drop;
+
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index c5313a9c019b..12b80fbfe767 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -211,7 +211,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
+ }
+
+ work = frag_mem_limit(nf) - nf->low_thresh;
+- while (work > 0) {
++ while (work > 0 || force) {
+ spin_lock(&nf->lru_lock);
+
+ if (list_empty(&nf->lru_list)) {
+@@ -281,9 +281,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
+
+ atomic_inc(&qp->refcnt);
+ hlist_add_head(&qp->list, &hb->chain);
++ inet_frag_lru_add(nf, qp);
+ spin_unlock(&hb->chain_lock);
+ read_unlock(&f->lock);
+- inet_frag_lru_add(nf, qp);
++
+ return qp;
+ }
+
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 995a0bb33a65..3bedb26cfb53 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -411,9 +411,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+
+ #ifdef CONFIG_NET_IPGRE_BROADCAST
+ if (ipv4_is_multicast(iph->daddr)) {
+- /* Looped back packet, drop it! */
+- if (rt_is_output_route(skb_rtable(skb)))
+- goto drop;
+ tunnel->dev->stats.multicast++;
+ skb->pkt_type = PACKET_BROADCAST;
+ }
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index ba22cc3a5a53..c31e3ad98ef2 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -109,6 +109,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+ secpath_reset(skb);
+ if (!skb->l4_rxhash)
+ skb->rxhash = 0;
++ skb_dst_drop(skb);
+ skb->vlan_tci = 0;
+ skb_set_queue_mapping(skb, 0);
+ skb->pkt_type = PACKET_HOST;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 1672409f5ba5..6fbf3393d842 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -2253,13 +2253,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ }
+
+ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+- u32 portid, u32 seq, struct mfc_cache *c, int cmd)
++ u32 portid, u32 seq, struct mfc_cache *c, int cmd,
++ int flags)
+ {
+ struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
+ int err;
+
+- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
++ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+@@ -2327,7 +2328,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
+ if (skb == NULL)
+ goto errout;
+
+- err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
++ err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
+ if (err < 0)
+ goto errout;
+
+@@ -2366,7 +2367,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+ if (ipmr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+- mfc, RTM_NEWROUTE) < 0)
++ mfc, RTM_NEWROUTE,
++ NLM_F_MULTI) < 0)
+ goto done;
+ next_entry:
+ e++;
+@@ -2380,7 +2382,8 @@ next_entry:
+ if (ipmr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+- mfc, RTM_NEWROUTE) < 0) {
++ mfc, RTM_NEWROUTE,
++ NLM_F_MULTI) < 0) {
+ spin_unlock_bh(&mfc_unres_lock);
+ goto done;
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index e088932bcfae..826fc6fab576 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -765,6 +765,17 @@ void tcp_release_cb(struct sock *sk)
+ if (flags & (1UL << TCP_TSQ_DEFERRED))
+ tcp_tsq_handler(sk);
+
++ /* Here begins the tricky part :
++ * We are called from release_sock() with :
++ * 1) BH disabled
++ * 2) sk_lock.slock spinlock held
++ * 3) socket owned by us (sk->sk_lock.owned == 1)
++ *
++ * But following code is meant to be called from BH handlers,
++ * so we should keep BH disabled, but early release socket ownership
++ */
++ sock_release_ownership(sk);
++
+ if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
+ tcp_write_timer_handler(sk);
+ __sock_put(sk);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index cd3fb301da38..5dac9fd72465 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1079,8 +1079,11 @@ retry:
+ * Lifetime is greater than REGEN_ADVANCE time units. In particular,
+ * an implementation must not create a temporary address with a zero
+ * Preferred Lifetime.
++ * Use age calculation as in addrconf_verify to avoid unnecessary
++ * temporary addresses being generated.
+ */
+- if (tmp_prefered_lft <= regen_advance) {
++ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
++ if (tmp_prefered_lft <= regen_advance + age) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ ret = -1;
+diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
+index cf77f3abfd06..447a7fbd1bb6 100644
+--- a/net/ipv6/exthdrs_offload.c
++++ b/net/ipv6/exthdrs_offload.c
+@@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void)
+ int ret;
+
+ ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
+- if (!ret)
++ if (ret)
+ goto out;
+
+ ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
+- if (!ret)
++ if (ret)
+ goto out_rt;
+
+ out:
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index eef8d945b362..e2c9ff840f63 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -516,7 +516,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+ np->tclass, NULL, &fl6, (struct rt6_info *)dst,
+ MSG_DONTWAIT, np->dontfrag);
+ if (err) {
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
+ ip6_flush_pending_frames(sk);
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 68fd4918315c..516e136f15ca 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1088,21 +1088,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+ struct rt6_info *rt,
+- bool pmtuprobe)
++ unsigned int orig_mtu)
+ {
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+ /* first fragment, reserve header_len */
+- *mtu = *mtu - rt->dst.header_len;
++ *mtu = orig_mtu - rt->dst.header_len;
+
+ } else {
+ /*
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+- *mtu = min(*mtu, pmtuprobe ?
+- rt->dst.dev->mtu :
+- dst_mtu(rt->dst.path));
++ *mtu = orig_mtu;
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+@@ -1119,7 +1117,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_cork *cork;
+ struct sk_buff *skb, *skb_prev = NULL;
+- unsigned int maxfraglen, fragheaderlen, mtu;
++ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+ int exthdrlen;
+ int dst_exthdrlen;
+ int hh_len;
+@@ -1201,6 +1199,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ dst_exthdrlen = 0;
+ mtu = cork->fragsize;
+ }
++ orig_mtu = mtu;
+
+ hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+
+@@ -1280,8 +1279,7 @@ alloc_new_skb:
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+ fragheaderlen, skb, rt,
+- np->pmtudisc ==
+- IPV6_PMTUDISC_PROBE);
++ orig_mtu);
+
+ skb_prev = skb;
+
+@@ -1537,8 +1535,8 @@ int ip6_push_pending_frames(struct sock *sk)
+ if (proto == IPPROTO_ICMPV6) {
+ struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+
+- ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
++ ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+ }
+
+ err = ip6_local_out(skb);
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 0eb4038a4d63..8737400af0a0 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net,
+ }
+
+ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
+- u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
++ u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
++ int flags)
+ {
+ struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
+ int err;
+
+- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
++ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+@@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
+ if (skb == NULL)
+ goto errout;
+
+- err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
++ err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
+ if (err < 0)
+ goto errout;
+
+@@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+ if (ip6mr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+- mfc, RTM_NEWROUTE) < 0)
++ mfc, RTM_NEWROUTE,
++ NLM_F_MULTI) < 0)
+ goto done;
+ next_entry:
+ e++;
+@@ -2476,7 +2478,8 @@ next_entry:
+ if (ip6mr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+- mfc, RTM_NEWROUTE) < 0) {
++ mfc, RTM_NEWROUTE,
++ NLM_F_MULTI) < 0) {
+ spin_unlock_bh(&mfc_unres_lock);
+ goto done;
+ }
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index d18f9f903db6..d81abd5ba767 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1620,11 +1620,12 @@ static void mld_sendpack(struct sk_buff *skb)
+ dst_output);
+ out:
+ if (!err) {
+- ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+- IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+- } else
+- IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
++ IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
++ } else {
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ }
+
+ rcu_read_unlock();
+ return;
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 7856e962a3e6..6acab0bce9d8 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -182,8 +182,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ MSG_DONTWAIT, np->dontfrag);
+
+ if (err) {
+- ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
+- ICMP6_MIB_OUTERRORS);
++ ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
++ ICMP6_MIB_OUTERRORS);
+ ip6_flush_pending_frames(sk);
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 0accb1321dd6..77f81beabbd3 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1500,7 +1500,7 @@ int ip6_route_add(struct fib6_config *cfg)
+ if (!table)
+ goto out;
+
+- rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
++ rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+
+ if (!rt) {
+ err = -ENOMEM;
+diff --git a/net/rds/iw.c b/net/rds/iw.c
+index 7826d46baa70..589935661d66 100644
+--- a/net/rds/iw.c
++++ b/net/rds/iw.c
+@@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ /* due to this, we will claim to support IB devices unless we
+ check node_type. */
+- if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
++ if (ret || !cm_id->device ||
++ cm_id->device->node_type != RDMA_NODE_RNIC)
+ ret = -EADDRNOTAVAIL;
+
+ rdsdebug("addr %pI4 ret %d node type %d\n",
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 2e55f8189502..52229f91b115 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -577,9 +577,11 @@ static void fq_rehash(struct fq_sched_data *q,
+ q->stat_gc_flows += fcnt;
+ }
+
+-static int fq_resize(struct fq_sched_data *q, u32 log)
++static int fq_resize(struct Qdisc *sch, u32 log)
+ {
++ struct fq_sched_data *q = qdisc_priv(sch);
+ struct rb_root *array;
++ void *old_fq_root;
+ u32 idx;
+
+ if (q->fq_root && log == q->fq_trees_log)
+@@ -592,13 +594,19 @@ static int fq_resize(struct fq_sched_data *q, u32 log)
+ for (idx = 0; idx < (1U << log); idx++)
+ array[idx] = RB_ROOT;
+
+- if (q->fq_root) {
+- fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
+- kfree(q->fq_root);
+- }
++ sch_tree_lock(sch);
++
++ old_fq_root = q->fq_root;
++ if (old_fq_root)
++ fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
++
+ q->fq_root = array;
+ q->fq_trees_log = log;
+
++ sch_tree_unlock(sch);
++
++ kfree(old_fq_root);
++
+ return 0;
+ }
+
+@@ -674,9 +682,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
+ }
+
+- if (!err)
+- err = fq_resize(q, fq_log);
+-
++ if (!err) {
++ sch_tree_unlock(sch);
++ err = fq_resize(sch, fq_log);
++ sch_tree_lock(sch);
++ }
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = fq_dequeue(sch);
+
+@@ -722,7 +732,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+ if (opt)
+ err = fq_change(sch, opt);
+ else
+- err = fq_resize(q, q->fq_trees_log);
++ err = fq_resize(sch, q->fq_trees_log);
+
+ return err;
+ }
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index d244a23ab8d3..26be077b8267 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1433,8 +1433,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
+ BUG_ON(!list_empty(&chunk->list));
+ list_del_init(&chunk->transmitted_list);
+
+- /* Free the chunk skb data and the SCTP_chunk stub itself. */
+- dev_kfree_skb(chunk->skb);
++ consume_skb(chunk->skb);
++ consume_skb(chunk->auth_chunk);
+
+ SCTP_DBG_OBJCNT_DEC(chunk);
+ kmem_cache_free(sctp_chunk_cachep, chunk);
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 56ebe71cfe13..0a5f0508c43a 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -761,7 +761,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+
+ /* Make sure that we and the peer are AUTH capable */
+ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
+- kfree_skb(chunk->auth_chunk);
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+@@ -776,10 +775,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+ auth.transport = chunk->transport;
+
+ ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
+-
+- /* We can now safely free the auth_chunk clone */
+- kfree_skb(chunk->auth_chunk);
+-
+ if (ret != SCTP_IERROR_NO_ERROR) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+diff --git a/net/socket.c b/net/socket.c
+index e83c416708af..dc57dae20a9a 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1972,6 +1972,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ {
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
++
++ if (kmsg->msg_namelen < 0)
++ return -EINVAL;
++
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ return 0;
+diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
+index 890a29912d5a..e860d4f7ed2a 100644
+--- a/net/sunrpc/backchannel_rqst.c
++++ b/net/sunrpc/backchannel_rqst.c
+@@ -64,7 +64,6 @@ static void xprt_free_allocation(struct rpc_rqst *req)
+ free_page((unsigned long)xbufp->head[0].iov_base);
+ xbufp = &req->rq_snd_buf;
+ free_page((unsigned long)xbufp->head[0].iov_base);
+- list_del(&req->rq_bc_pa_list);
+ kfree(req);
+ }
+
+@@ -168,8 +167,10 @@ out_free:
+ /*
+ * Memory allocation failed, free the temporary list
+ */
+- list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
++ list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
++ list_del(&req->rq_bc_pa_list);
+ xprt_free_allocation(req);
++ }
+
+ dprintk("RPC: setup backchannel transport failed\n");
+ return -ENOMEM;
+@@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
+ xprt_dec_alloc_count(xprt, max_reqs);
+ list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
+ dprintk("RPC: req=%p\n", req);
++ list_del(&req->rq_bc_pa_list);
+ xprt_free_allocation(req);
+ if (--max_reqs == 0)
+ break;
+diff --git a/net/tipc/config.c b/net/tipc/config.c
+index c301a9a592d8..5afe633114e0 100644
+--- a/net/tipc/config.c
++++ b/net/tipc/config.c
+@@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+ struct tipc_cfg_msg_hdr *req_hdr;
+ struct tipc_cfg_msg_hdr *rep_hdr;
+ struct sk_buff *rep_buf;
+- int ret;
+
+ /* Validate configuration message header (ignore invalid message) */
+ req_hdr = (struct tipc_cfg_msg_hdr *)buf;
+@@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+ memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
+ rep_hdr->tcm_len = htonl(rep_buf->len);
+ rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
+-
+- ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
+- rep_buf->len);
+- if (ret < 0)
+- pr_err("Sending cfg reply message failed, no memory\n");
+-
++ tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
++ rep_buf->len);
+ kfree_skb(rep_buf);
+ }
+ }
+diff --git a/net/tipc/handler.c b/net/tipc/handler.c
+index b36f0fcd9bdf..79b991e044a9 100644
+--- a/net/tipc/handler.c
++++ b/net/tipc/handler.c
+@@ -57,7 +57,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
+ struct queue_item *item;
+
+ if (!handler_enabled) {
+- pr_err("Signal request ignored by handler\n");
+ return -ENOPROTOOPT;
+ }
+
+diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
+index 09dcd54b04e1..299e45af7e4e 100644
+--- a/net/tipc/name_table.c
++++ b/net/tipc/name_table.c
+@@ -942,20 +942,51 @@ int tipc_nametbl_init(void)
+ return 0;
+ }
+
++/**
++ * tipc_purge_publications - remove all publications for a given type
++ *
++ * tipc_nametbl_lock must be held when calling this function
++ */
++static void tipc_purge_publications(struct name_seq *seq)
++{
++ struct publication *publ, *safe;
++ struct sub_seq *sseq;
++ struct name_info *info;
++
++ if (!seq->sseqs) {
++ nameseq_delete_empty(seq);
++ return;
++ }
++ sseq = seq->sseqs;
++ info = sseq->info;
++ list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
++ tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
++ publ->ref, publ->key);
++ }
++}
++
+ void tipc_nametbl_stop(void)
+ {
+ u32 i;
++ struct name_seq *seq;
++ struct hlist_head *seq_head;
++ struct hlist_node *safe;
+
+ if (!table.types)
+ return;
+
+- /* Verify name table is empty, then release it */
++ /* Verify name table is empty and purge any lingering
++ * publications, then release the name table
++ */
+ write_lock_bh(&tipc_nametbl_lock);
+ for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
+ if (hlist_empty(&table.types[i]))
+ continue;
+- pr_err("nametbl_stop(): orphaned hash chain detected\n");
+- break;
++ seq_head = &table.types[i];
++ hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
++ tipc_purge_publications(seq);
++ }
++ continue;
+ }
+ kfree(table.types);
+ table.types = NULL;
+diff --git a/net/tipc/server.c b/net/tipc/server.c
+index fd3fa57a410e..bd2336aad0e4 100644
+--- a/net/tipc/server.c
++++ b/net/tipc/server.c
+@@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con);
+ static void tipc_conn_kref_release(struct kref *kref)
+ {
+ struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+- struct tipc_server *s = con->server;
+
+ if (con->sock) {
+ tipc_sock_release_local(con->sock);
+@@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref)
+ }
+
+ tipc_clean_outqueues(con);
+-
+- if (con->conid)
+- s->tipc_conn_shutdown(con->conid, con->usr_data);
+-
+ kfree(con);
+ }
+
+@@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con)
+ struct tipc_server *s = con->server;
+
+ if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
++ if (con->conid)
++ s->tipc_conn_shutdown(con->conid, con->usr_data);
++
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+@@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
+ list_add_tail(&e->list, &con->outqueue);
+ spin_unlock_bh(&con->outqueue_lock);
+
+- if (test_bit(CF_CONNECTED, &con->flags))
++ if (test_bit(CF_CONNECTED, &con->flags)) {
+ if (!queue_work(s->send_wq, &con->swork))
+ conn_put(con);
+-
++ } else {
++ conn_put(con);
++ }
+ return 0;
+ }
+
+diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
+index d38bb45d82e9..c2a37aa12498 100644
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
+ {
+ struct tipc_subscriber *subscriber = sub->subscriber;
+ struct kvec msg_sect;
+- int ret;
+
+ msg_sect.iov_base = (void *)&sub->evt;
+ msg_sect.iov_len = sizeof(struct tipc_event);
+-
+ sub->evt.event = htohl(event, sub->swap);
+ sub->evt.found_lower = htohl(found_lower, sub->swap);
+ sub->evt.found_upper = htohl(found_upper, sub->swap);
+ sub->evt.port.ref = htohl(port_ref, sub->swap);
+ sub->evt.port.node = htohl(node, sub->swap);
+- ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL,
+- msg_sect.iov_base, msg_sect.iov_len);
+- if (ret < 0)
+- pr_err("Sending subscription event failed, no memory\n");
++ tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
++ msg_sect.iov_len);
+ }
+
+ /**
+@@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
+ /* The spin lock per subscriber is used to protect its members */
+ spin_lock_bh(&subscriber->lock);
+
+- /* Validate if the connection related to the subscriber is
+- * closed (in case subscriber is terminating)
+- */
+- if (subscriber->conid == 0) {
+- spin_unlock_bh(&subscriber->lock);
+- return;
+- }
+-
+ /* Validate timeout (in case subscription is being cancelled) */
+ if (sub->timeout == TIPC_WAIT_FOREVER) {
+ spin_unlock_bh(&subscriber->lock);
+@@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber)
+
+ spin_lock_bh(&subscriber->lock);
+
+- /* Invalidate subscriber reference */
+- subscriber->conid = 0;
+-
+ /* Destroy any existing subscriptions for subscriber */
+ list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
+ subscription_list) {
+@@ -278,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s,
+ *
+ * Called with subscriber lock held.
+ */
+-static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+- struct tipc_subscriber *subscriber)
+-{
++static int subscr_subscribe(struct tipc_subscr *s,
++ struct tipc_subscriber *subscriber,
++ struct tipc_subscription **sub_p) {
+ struct tipc_subscription *sub;
+ int swap;
+
+@@ -291,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+ if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
+ s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
+ subscr_cancel(s, subscriber);
+- return NULL;
++ return 0;
+ }
+
+ /* Refuse subscription if global limit exceeded */
+ if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
+ pr_warn("Subscription rejected, limit reached (%u)\n",
+ TIPC_MAX_SUBSCRIPTIONS);
+- subscr_terminate(subscriber);
+- return NULL;
++ return -EINVAL;
+ }
+
+ /* Allocate subscription object */
+ sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
+ if (!sub) {
+ pr_warn("Subscription rejected, no memory\n");
+- subscr_terminate(subscriber);
+- return NULL;
++ return -ENOMEM;
+ }
+
+ /* Initialize subscription object */
+@@ -321,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+ (sub->seq.lower > sub->seq.upper)) {
+ pr_warn("Subscription rejected, illegal request\n");
+ kfree(sub);
+- subscr_terminate(subscriber);
+- return NULL;
++ return -EINVAL;
+ }
+ INIT_LIST_HEAD(&sub->nameseq_list);
+ list_add(&sub->subscription_list, &subscriber->subscription_list);
+@@ -335,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+ (Handler)subscr_timeout, (unsigned long)sub);
+ k_start_timer(&sub->timer, sub->timeout);
+ }
+-
+- return sub;
++ *sub_p = sub;
++ return 0;
+ }
+
+ /* Handle one termination request for the subscriber */
+@@ -350,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+ void *usr_data, void *buf, size_t len)
+ {
+ struct tipc_subscriber *subscriber = usr_data;
+- struct tipc_subscription *sub;
++ struct tipc_subscription *sub = NULL;
+
+ spin_lock_bh(&subscriber->lock);
+- sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
++ if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
++ spin_unlock_bh(&subscriber->lock);
++ subscr_terminate(subscriber);
++ return;
++ }
+ if (sub)
+ tipc_nametbl_subscribe(sub);
+ spin_unlock_bh(&subscriber->lock);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d7c1ac621a90..c3975bcf725f 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1785,8 +1785,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+
+ err = mutex_lock_interruptible(&u->readlock);
+- if (err) {
+- err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
++ if (unlikely(err)) {
++ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++ */
++ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
+
+@@ -1911,6 +1914,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct unix_sock *u = unix_sk(sk);
+ struct sockaddr_un *sunaddr = msg->msg_name;
+ int copied = 0;
++ int noblock = flags & MSG_DONTWAIT;
+ int check_creds = 0;
+ int target;
+ int err = 0;
+@@ -1926,7 +1930,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
++ timeo = sock_rcvtimeo(sk, noblock);
+
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+@@ -1938,8 +1942,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ err = mutex_lock_interruptible(&u->readlock);
+- if (err) {
+- err = sock_intr_errno(timeo);
++ if (unlikely(err)) {
++ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++ */
++ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 25d5ebaf25f9..630b8adf0ce5 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1386,15 +1386,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
+ isec->sid = sbsec->sid;
+
+ if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
+- if (opt_dentry) {
+- isec->sclass = inode_mode_to_security_class(inode->i_mode);
+- rc = selinux_proc_get_sid(opt_dentry,
+- isec->sclass,
+- &sid);
+- if (rc)
+- goto out_unlock;
+- isec->sid = sid;
+- }
++ /* We must have a dentry to determine the label on
++ * procfs inodes */
++ if (opt_dentry)
++ /* Called from d_instantiate or
++ * d_splice_alias. */
++ dentry = dget(opt_dentry);
++ else
++ /* Called from selinux_complete_init, try to
++ * find a dentry. */
++ dentry = d_find_alias(inode);
++ /*
++ * This can be hit on boot when a file is accessed
++ * before the policy is loaded. When we load policy we
++ * may find inodes that have no dentry on the
++ * sbsec->isec_head list. No reason to complain as
++ * these will get fixed up the next time we go through
++ * inode_doinit() with a dentry, before these inodes
++ * could be used again by userspace.
++ */
++ if (!dentry)
++ goto out_unlock;
++ isec->sclass = inode_mode_to_security_class(inode->i_mode);
++ rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
++ dput(dentry);
++ if (rc)
++ goto out_unlock;
++ isec->sid = sid;
+ }
+ break;
+ }
diff --git a/1018_linux-3.12.19.patch b/1018_linux-3.12.19.patch
new file mode 100644
index 00000000..abd1b099
--- /dev/null
+++ b/1018_linux-3.12.19.patch
@@ -0,0 +1,1760 @@
+diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
+index 7b0dcdb57173..babe2ef16139 100644
+--- a/Documentation/i2c/busses/i2c-i801
++++ b/Documentation/i2c/busses/i2c-i801
+@@ -26,6 +26,7 @@ Supported adapters:
+ * Intel Wellsburg (PCH)
+ * Intel Coleto Creek (PCH)
+ * Intel Wildcat Point-LP (PCH)
++ * Intel BayTrail (SOC)
+ Datasheets: Publicly available at the Intel website
+
+ On Intel Patsburg and later chipsets, both the normal host SMBus controller
+diff --git a/Makefile b/Makefile
+index fc0dcf63a8d9..cf5d97e60b39 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 304661d21369..5e85ed371364 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
+ }
+
+ /*
+- * We don't use supersection mappings for mmap() on /dev/mem, which
+- * means that we can't map the memory area above the 4G barrier into
+- * userspace.
++ * Do not allow /dev/mem mappings beyond the supported physical range.
+ */
+ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+ {
+- return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
++ return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
+ }
+
+ #ifdef CONFIG_STRICT_DEVMEM
+diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
+index 0c9107285e66..10a0c2aad8cf 100644
+--- a/arch/parisc/kernel/syscall_table.S
++++ b/arch/parisc/kernel/syscall_table.S
+@@ -392,7 +392,7 @@
+ ENTRY_COMP(vmsplice)
+ ENTRY_COMP(move_pages) /* 295 */
+ ENTRY_SAME(getcpu)
+- ENTRY_SAME(epoll_pwait)
++ ENTRY_COMP(epoll_pwait)
+ ENTRY_COMP(statfs64)
+ ENTRY_COMP(fstatfs64)
+ ENTRY_COMP(kexec_load) /* 300 */
+diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
+index 5143228e3e5f..6636b1d7821b 100644
+--- a/arch/powerpc/boot/util.S
++++ b/arch/powerpc/boot/util.S
+@@ -71,18 +71,32 @@ udelay:
+ add r4,r4,r5
+ addi r4,r4,-1
+ divw r4,r4,r5 /* BUS ticks */
++#ifdef CONFIG_8xx
++1: mftbu r5
++ mftb r6
++ mftbu r7
++#else
+ 1: mfspr r5, SPRN_TBRU
+ mfspr r6, SPRN_TBRL
+ mfspr r7, SPRN_TBRU
++#endif
+ cmpw 0,r5,r7
+ bne 1b /* Get [synced] base time */
+ addc r9,r6,r4 /* Compute end time */
+ addze r8,r5
++#ifdef CONFIG_8xx
++2: mftbu r5
++#else
+ 2: mfspr r5, SPRN_TBRU
++#endif
+ cmpw 0,r5,r8
+ blt 2b
+ bgt 3f
++#ifdef CONFIG_8xx
++ mftb r6
++#else
+ mfspr r6, SPRN_TBRL
++#endif
+ cmpw 0,r6,r9
+ blt 2b
+ 3: blr
+diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
+index c2dcfaa51987..0d2d0f03163b 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -438,6 +438,8 @@ BEGIN_FTR_SECTION_NESTED(96); \
+ cmpwi dest,0; \
+ beq- 90b; \
+ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
++#elif defined(CONFIG_8xx)
++#define MFTB(dest) mftb dest
+ #else
+ #define MFTB(dest) mfspr dest, SPRN_TBRL
+ #endif
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 10d1ef016bf1..7ca729cac073 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -1154,12 +1154,19 @@
+
+ #else /* __powerpc64__ */
+
++#if defined(CONFIG_8xx)
++#define mftbl() ({unsigned long rval; \
++ asm volatile("mftbl %0" : "=r" (rval)); rval;})
++#define mftbu() ({unsigned long rval; \
++ asm volatile("mftbu %0" : "=r" (rval)); rval;})
++#else
+ #define mftbl() ({unsigned long rval; \
+ asm volatile("mfspr %0, %1" : "=r" (rval) : \
+ "i" (SPRN_TBRL)); rval;})
+ #define mftbu() ({unsigned long rval; \
+ asm volatile("mfspr %0, %1" : "=r" (rval) : \
+ "i" (SPRN_TBRU)); rval;})
++#endif
+ #endif /* !__powerpc64__ */
+
+ #define mttbl(v) asm volatile("mttbl %0":: "r"(v))
+diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
+index 18908caa1f3b..2cf846edb3fc 100644
+--- a/arch/powerpc/include/asm/timex.h
++++ b/arch/powerpc/include/asm/timex.h
+@@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void)
+ ret = 0;
+
+ __asm__ __volatile__(
++#ifdef CONFIG_8xx
++ "97: mftb %0\n"
++#else
+ "97: mfspr %0, %2\n"
++#endif
+ "99:\n"
+ ".section __ftr_fixup,\"a\"\n"
+ ".align 2\n"
+@@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void)
+ " .long 0\n"
+ " .long 0\n"
+ ".previous"
++#ifdef CONFIG_8xx
++ : "=r" (ret) : "i" (CPU_FTR_601));
++#else
+ : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
++#endif
+ return ret;
+ #endif
+ }
+diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
+index 6b1f2a6d5517..6b2b69616e77 100644
+--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
++++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
+@@ -232,9 +232,15 @@ __do_get_tspec:
+ lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
+
+ /* Get a stable TB value */
++#ifdef CONFIG_8xx
++2: mftbu r3
++ mftbl r4
++ mftbu r0
++#else
+ 2: mfspr r3, SPRN_TBRU
+ mfspr r4, SPRN_TBRL
+ mfspr r0, SPRN_TBRU
++#endif
+ cmplw cr0,r3,r0
+ bne- 2b
+
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 78c4fdb91bc5..4e5683877b93 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -25,7 +25,7 @@ config SPARC
+ select RTC_DRV_M48T59
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
+- select HAVE_ARCH_JUMP_LABEL
++ select HAVE_ARCH_JUMP_LABEL if SPARC64
+ select GENERIC_IRQ_SHOW
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select USE_GENERIC_SMP_HELPERS if SMP
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index baebab215492..b9cc9763faf4 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -57,9 +57,12 @@ void arch_cpu_idle(void)
+ {
+ if (tlb_type != hypervisor) {
+ touch_nmi_watchdog();
++ local_irq_enable();
+ } else {
+ unsigned long pstate;
+
++ local_irq_enable();
++
+ /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
+ * the cpu sleep hypervisor call.
+ */
+@@ -81,7 +84,6 @@ void arch_cpu_idle(void)
+ : "=&r" (pstate)
+ : "i" (PSTATE_IE));
+ }
+- local_irq_enable();
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index d950197a17e1..6dee79575791 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -189,7 +189,8 @@ linux_sparc_syscall32:
+ mov %i0, %l5 ! IEU1
+ 5: call %l7 ! CTI Group brk forced
+ srl %i5, 0, %o5 ! IEU1
+- ba,a,pt %xcc, 3f
++ ba,pt %xcc, 3f
++ sra %o0, 0, %o0
+
+ /* Linux native system calls enter here... */
+ .align 32
+@@ -217,7 +218,6 @@ linux_sparc_syscall:
+ 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ ret_sys_call:
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+- sra %o0, 0, %o0
+ mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ sllx %g2, 32, %g2
+
+diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
+index b3cd3ebae077..7eb30af8c7a2 100644
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -203,18 +203,15 @@ static void __init intel_remapping_check(int num, int slot, int func)
+ revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
+
+ /*
+- * Revision 13 of all triggering devices id in this quirk have
+- * a problem draining interrupts when irq remapping is enabled,
+- * and should be flagged as broken. Additionally revisions 0x12
+- * and 0x22 of device id 0x3405 has this problem.
++ * Revision <= 13 of all triggering devices id in this quirk
++ * have a problem draining interrupts when irq remapping is
++ * enabled, and should be flagged as broken. Additionally
++ * revision 0x22 of device id 0x3405 has this problem.
+ */
+- if (revision == 0x13)
++ if (revision <= 0x13)
+ set_irq_remapping_broken();
+- else if ((device == 0x3405) &&
+- ((revision == 0x12) ||
+- (revision == 0x22)))
++ else if (device == 0x3405 && revision == 0x22)
+ set_irq_remapping_broken();
+-
+ }
+
+ /*
+diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
+index 52dbf1e400dc..ff898bbf579d 100644
+--- a/arch/x86/kernel/quirks.c
++++ b/arch/x86/kernel/quirks.c
+@@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
+ quirk_amd_nb_node);
+
+ #endif
++
++#ifdef CONFIG_PCI
++/*
++ * Processor does not ensure DRAM scrub read/write sequence
++ * is atomic wrt accesses to CC6 save state area. Therefore
++ * if a concurrent scrub read/write access is to same address
++ * the entry may appear as if it is not written. This quirk
++ * applies to Fam16h models 00h-0Fh
++ *
++ * See "Revision Guide" for AMD F16h models 00h-0fh,
++ * document 51810 rev. 3.04, Nov 2013
++ */
++static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
++{
++ u32 val;
++
++ /*
++ * Suggested workaround:
++ * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
++ */
++ pci_read_config_dword(dev, 0x58, &val);
++ if (val & 0x1F) {
++ val &= ~(0x1F);
++ pci_write_config_dword(dev, 0x58, val);
++ }
++
++ pci_read_config_dword(dev, 0x5C, &val);
++ if (val & BIT(0)) {
++ val &= ~BIT(0);
++ pci_write_config_dword(dev, 0x5c, val);
++ }
++}
++
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
++ amd_disable_seq_and_redirect_scrub);
++
++#endif
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index a55773801c5f..3aa89eb8dbbd 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -302,6 +302,10 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
+ input_sync(input);
+
+ pm_wakeup_event(&device->dev, 0);
++ acpi_bus_generate_netlink_event(
++ device->pnp.device_class,
++ dev_name(&device->dev),
++ event, ++button->pushed);
+ }
+ break;
+ default:
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 3cc0b92e3544..51b700838f64 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -213,13 +213,13 @@ unlock:
+ spin_unlock_irqrestore(&ec->lock, flags);
+ }
+
+-static int acpi_ec_sync_query(struct acpi_ec *ec);
++static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
+
+ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
+ {
+ if (state & ACPI_EC_FLAG_SCI) {
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
+- return acpi_ec_sync_query(ec);
++ return acpi_ec_sync_query(ec, NULL);
+ }
+ return 0;
+ }
+@@ -471,10 +471,8 @@ acpi_handle ec_get_handle(void)
+
+ EXPORT_SYMBOL(ec_get_handle);
+
+-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
+-
+ /*
+- * Clears stale _Q events that might have accumulated in the EC.
++ * Process _Q events that might have accumulated in the EC.
+ * Run with locked ec mutex.
+ */
+ static void acpi_ec_clear(struct acpi_ec *ec)
+@@ -483,7 +481,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
+ u8 value = 0;
+
+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
+- status = acpi_ec_query_unlocked(ec, &value);
++ status = acpi_ec_sync_query(ec, &value);
+ if (status || !value)
+ break;
+ }
+@@ -610,13 +608,18 @@ static void acpi_ec_run(void *cxt)
+ kfree(handler);
+ }
+
+-static int acpi_ec_sync_query(struct acpi_ec *ec)
++static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
+ {
+ u8 value = 0;
+ int status;
+ struct acpi_ec_query_handler *handler, *copy;
+- if ((status = acpi_ec_query_unlocked(ec, &value)))
++
++ status = acpi_ec_query_unlocked(ec, &value);
++ if (data)
++ *data = value;
++ if (status)
+ return status;
++
+ list_for_each_entry(handler, &ec->list, node) {
+ if (value == handler->query_bit) {
+ /* have custom handler for this bit */
+@@ -639,7 +642,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
+ if (!ec)
+ return;
+ mutex_lock(&ec->mutex);
+- acpi_ec_sync_query(ec);
++ acpi_ec_sync_query(ec, NULL);
+ mutex_unlock(&ec->mutex);
+ }
+
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 99e5158456d8..c09e6f646fe4 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -675,11 +675,8 @@ static void acpi_hibernation_leave(void)
+ /* Reprogram control registers */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+ /* Check the hardware signature */
+- if (facs && s4_hardware_signature != facs->hardware_signature) {
+- printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
+- "cannot resume!\n");
+- panic("ACPI S4 hardware signature mismatch");
+- }
++ if (facs && s4_hardware_signature != facs->hardware_signature)
++ pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
+ /* Restore the NVS memory area */
+ suspend_nvs_restore();
+ /* Allow EC transactions to happen. */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index d593c99121c3..6e30356d3e42 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -223,6 +223,7 @@ static struct usb_device_id blacklist_table[] = {
+
+ /* Intel Bluetooth device */
+ { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
++ { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
+
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 15e4a6031934..e5bdd1a2f541 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -249,6 +249,9 @@ struct smi_info {
+ /* The timer for this si. */
+ struct timer_list si_timer;
+
++ /* This flag is set, if the timer is running (timer_pending() isn't enough) */
++ bool timer_running;
++
+ /* The time (in jiffies) the last timeout occurred at. */
+ unsigned long last_timeout_jiffies;
+
+@@ -435,6 +438,13 @@ static void start_clear_flags(struct smi_info *smi_info)
+ smi_info->si_state = SI_CLEARING_FLAGS;
+ }
+
++static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
++{
++ smi_info->last_timeout_jiffies = jiffies;
++ mod_timer(&smi_info->si_timer, new_val);
++ smi_info->timer_running = true;
++}
++
+ /*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+@@ -447,8 +457,7 @@ static inline void disable_si_irq(struct smi_info *smi_info)
+ start_disable_irq(smi_info);
+ smi_info->interrupt_disabled = 1;
+ if (!atomic_read(&smi_info->stop_operation))
+- mod_timer(&smi_info->si_timer,
+- jiffies + SI_TIMEOUT_JIFFIES);
++ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+ }
+ }
+
+@@ -908,15 +917,7 @@ static void sender(void *send_info,
+ list_add_tail(&msg->link, &smi_info->xmit_msgs);
+
+ if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+- /*
+- * last_timeout_jiffies is updated here to avoid
+- * smi_timeout() handler passing very large time_diff
+- * value to smi_event_handler() that causes
+- * the send command to abort.
+- */
+- smi_info->last_timeout_jiffies = jiffies;
+-
+- mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
++ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+@@ -1005,6 +1006,17 @@ static int ipmi_thread(void *data)
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+ smi_result = smi_event_handler(smi_info, 0);
++
++ /*
++ * If the driver is doing something, there is a possible
++ * race with the timer. If the timer handler see idle,
++ * and the thread here sees something else, the timer
++ * handler won't restart the timer even though it is
++ * required. So start it here if necessary.
++ */
++ if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
++ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
++
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+ &busy_until);
+@@ -1074,10 +1086,6 @@ static void smi_timeout(unsigned long data)
+ * SI_USEC_PER_JIFFY);
+ smi_result = smi_event_handler(smi_info, time_diff);
+
+- spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+-
+- smi_info->last_timeout_jiffies = jiffies_now;
+-
+ if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ /* Running with interrupts, only do long timeouts. */
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
+@@ -1099,7 +1107,10 @@ static void smi_timeout(unsigned long data)
+
+ do_mod_timer:
+ if (smi_result != SI_SM_IDLE)
+- mod_timer(&(smi_info->si_timer), timeout);
++ smi_mod_timer(smi_info, timeout);
++ else
++ smi_info->timer_running = false;
++ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ }
+
+ static irqreturn_t si_irq_handler(int irq, void *data)
+@@ -1147,8 +1158,7 @@ static int smi_start_processing(void *send_info,
+
+ /* Set up the timer that drives the interface. */
+ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+- new_smi->last_timeout_jiffies = jiffies;
+- mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
++ smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+
+ /*
+ * Check if the user forcefully enabled the daemon.
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index f895a8c8a244..d1f4675809f8 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -100,6 +100,9 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ ssize_t read, sz;
+ char *ptr;
+
++ if (p != *ppos)
++ return 0;
++
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+ read = 0;
+@@ -158,6 +161,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
+ unsigned long copied;
+ void *ptr;
+
++ if (p != *ppos)
++ return -EFBIG;
++
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 3c9e4e98c651..d43a6202a5c5 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1238,9 +1238,17 @@ static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ if (num_dcts_intlv == 2) {
+ select = (sys_addr >> 8) & 0x3;
+ channel = select ? 0x3 : 0;
+- } else if (num_dcts_intlv == 4)
+- channel = (sys_addr >> 8) & 0x7;
+-
++ } else if (num_dcts_intlv == 4) {
++ u8 intlv_addr = dct_sel_interleave_addr(pvt);
++ switch (intlv_addr) {
++ case 0x4:
++ channel = (sys_addr >> 8) & 0x3;
++ break;
++ case 0x5:
++ channel = (sys_addr >> 9) & 0x3;
++ break;
++ }
++ }
+ return channel;
+ }
+
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 4e901081e287..01892bdfa7b7 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -110,6 +110,7 @@ config I2C_I801
+ Wellsburg (PCH)
+ Coleto Creek (PCH)
+ Wildcat Point-LP (PCH)
++ BayTrail (SOC)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 737e29866887..0444f7aa1046 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -60,6 +60,7 @@
+ Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
+ Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
+ Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
++ BayTrail (SOC) 0x0f12 32 hard yes yes yes
+
+ Features supported by this driver:
+ Software PEC no
+@@ -161,6 +162,7 @@
+ STATUS_ERROR_FLAGS)
+
+ /* Older devices have their ID defined in <linux/pci_ids.h> */
++#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
+ #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
+ #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
+ /* Patsburg also has three 'Integrated Device Function' SMBus controllers */
+@@ -822,6 +824,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
+ { 0, }
+ };
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 72531f008a5e..5d2edb4b60aa 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -982,10 +982,10 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = devid;
+- cmd->data[0] |= (pasid & 0xff) << 16;
++ cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
+ cmd->data[0] |= (qdep & 0xff) << 24;
+ cmd->data[1] = devid;
+- cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
++ cmd->data[1] |= (pasid & 0xff) << 16;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ cmd->data[3] = upper_32_bits(address);
+diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
+index 4e8cfa2ac803..779016068a82 100644
+--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
++++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
+@@ -1940,6 +1940,7 @@ static void tulip_remove_one(struct pci_dev *pdev)
+ free_netdev (dev);
+ pci_release_regions (pdev);
+ pci_set_drvdata (pdev, NULL);
++ pci_disable_device(pdev);
+
+ /* pci_power_off (pdev, -1); */
+ }
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 4ef786775acb..9cb400c4cbaa 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -2976,11 +2976,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
+ u32 rctl, rfctl;
+ u32 pages = 0;
+
+- /* Workaround Si errata on PCHx - configure jumbo frame flow */
+- if ((hw->mac.type >= e1000_pch2lan) &&
+- (adapter->netdev->mtu > ETH_DATA_LEN) &&
+- e1000_lv_jumbo_workaround_ich8lan(hw, true))
+- e_dbg("failed to enable jumbo frame workaround mode\n");
++ /* Workaround Si errata on PCHx - configure jumbo frame flow.
++ * If jumbo frames not set, program related MAC/PHY registers
++ * to h/w defaults
++ */
++ if (hw->mac.type >= e1000_pch2lan) {
++ s32 ret_val;
++
++ if (adapter->netdev->mtu > ETH_DATA_LEN)
++ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
++ else
++ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
++
++ if (ret_val)
++ e_dbg("failed to enable|disable jumbo frame workaround mode\n");
++ }
+
+ /* Program MC offset vector base */
+ rctl = er32(RCTL);
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+index edc5d105ff98..03a56dfba2db 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+@@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
+ bool blocked;
+ int err;
+
++ if (!wl->ucode.bcm43xx_bomminor) {
++ err = brcms_request_fw(wl, wl->wlc->hw->d11core);
++ if (err)
++ return -ENOENT;
++ }
++
+ ieee80211_wake_queues(hw);
+ spin_lock_bh(&wl->lock);
+ blocked = brcms_rfkill_set_hw_state(wl);
+@@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
+ if (!blocked)
+ wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+
+- if (!wl->ucode.bcm43xx_bomminor) {
+- err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+- if (err) {
+- brcms_remove(wl->wlc->hw->d11core);
+- return -ENOENT;
+- }
+- }
+-
+ spin_lock_bh(&wl->lock);
+ /* avoid acknowledging frames before a non-monitor device is added */
+ wl->mute_tx = true;
+diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
+index c10e9ac9bbbc..510994a7eca0 100644
+--- a/drivers/pci/host/pcie-designware.c
++++ b/drivers/pci/host/pcie-designware.c
+@@ -268,13 +268,13 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
+- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
++ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ }
+
+ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+@@ -283,7 +283,6 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
+- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+@@ -291,6 +290,7 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+ dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
++ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ }
+
+ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+@@ -299,7 +299,6 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
+- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+@@ -307,6 +306,7 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+ dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
++ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ }
+
+ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+@@ -532,7 +532,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
+
+ /* setup RC BARs */
+ dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
+- dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1);
++ dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
+
+ /* setup interrupt pins */
+ dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
+diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
+index c55f234b29e6..26321f9f5caf 100644
+--- a/drivers/staging/comedi/drivers/8255_pci.c
++++ b/drivers/staging/comedi/drivers/8255_pci.c
+@@ -56,6 +56,7 @@ Configuration Options: not applicable, uses PCI auto config
+ #include "../comedidev.h"
+
+ #include "8255.h"
++#include "mite.h"
+
+ enum pci_8255_boardid {
+ BOARD_ADLINK_PCI7224,
+@@ -79,6 +80,7 @@ struct pci_8255_boardinfo {
+ const char *name;
+ int dio_badr;
+ int n_8255;
++ unsigned int has_mite:1;
+ };
+
+ static const struct pci_8255_boardinfo pci_8255_boards[] = {
+@@ -126,36 +128,43 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
+ .name = "ni_pci-dio-96",
+ .dio_badr = 1,
+ .n_8255 = 4,
++ .has_mite = 1,
+ },
+ [BOARD_NI_PCIDIO96B] = {
+ .name = "ni_pci-dio-96b",
+ .dio_badr = 1,
+ .n_8255 = 4,
++ .has_mite = 1,
+ },
+ [BOARD_NI_PXI6508] = {
+ .name = "ni_pxi-6508",
+ .dio_badr = 1,
+ .n_8255 = 4,
++ .has_mite = 1,
+ },
+ [BOARD_NI_PCI6503] = {
+ .name = "ni_pci-6503",
+ .dio_badr = 1,
+ .n_8255 = 1,
++ .has_mite = 1,
+ },
+ [BOARD_NI_PCI6503B] = {
+ .name = "ni_pci-6503b",
+ .dio_badr = 1,
+ .n_8255 = 1,
++ .has_mite = 1,
+ },
+ [BOARD_NI_PCI6503X] = {
+ .name = "ni_pci-6503x",
+ .dio_badr = 1,
+ .n_8255 = 1,
++ .has_mite = 1,
+ },
+ [BOARD_NI_PXI_6503] = {
+ .name = "ni_pxi-6503",
+ .dio_badr = 1,
+ .n_8255 = 1,
++ .has_mite = 1,
+ },
+ };
+
+@@ -163,6 +172,25 @@ struct pci_8255_private {
+ void __iomem *mmio_base;
+ };
+
++static int pci_8255_mite_init(struct pci_dev *pcidev)
++{
++ void __iomem *mite_base;
++ u32 main_phys_addr;
++
++ /* ioremap the MITE registers (BAR 0) temporarily */
++ mite_base = pci_ioremap_bar(pcidev, 0);
++ if (!mite_base)
++ return -ENOMEM;
++
++ /* set data window to main registers (BAR 1) */
++ main_phys_addr = pci_resource_start(pcidev, 1);
++ writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
++
++ /* finished with MITE registers */
++ iounmap(mite_base);
++ return 0;
++}
++
+ static int pci_8255_mmio(int dir, int port, int data, unsigned long iobase)
+ {
+ void __iomem *mmio_base = (void __iomem *)iobase;
+@@ -201,6 +229,12 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
+ if (ret)
+ return ret;
+
++ if (board->has_mite) {
++ ret = pci_8255_mite_init(pcidev);
++ if (ret)
++ return ret;
++ }
++
+ is_mmio = (pci_resource_flags(pcidev, board->dio_badr) &
+ IORESOURCE_MEM) != 0;
+ if (is_mmio) {
+diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
+index 8fd72ff9436e..d917a34e94bb 100644
+--- a/drivers/tty/ipwireless/tty.c
++++ b/drivers/tty/ipwireless/tty.c
+@@ -177,9 +177,6 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
+ ": %d chars not inserted to flip buffer!\n",
+ length - work);
+
+- /*
+- * This may sleep if ->low_latency is set
+- */
+ if (work)
+ tty_flip_buffer_push(&tty->port);
+ }
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index c043136fbe51..2b52d807934e 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -332,14 +332,11 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
+ * Takes any pending buffers and transfers their ownership to the
+ * ldisc side of the queue. It then schedules those characters for
+ * processing by the line discipline.
+- * Note that this function can only be used when the low_latency flag
+- * is unset. Otherwise the workqueue won't be flushed.
+ */
+
+ void tty_schedule_flip(struct tty_port *port)
+ {
+ struct tty_bufhead *buf = &port->buf;
+- WARN_ON(port->low_latency);
+
+ buf->tail->commit = buf->tail->used;
+ schedule_work(&buf->work);
+@@ -487,17 +484,15 @@ static void flush_to_ldisc(struct work_struct *work)
+ */
+ void tty_flush_to_ldisc(struct tty_struct *tty)
+ {
+- if (!tty->port->low_latency)
+- flush_work(&tty->port->buf.work);
++ flush_work(&tty->port->buf.work);
+ }
+
+ /**
+ * tty_flip_buffer_push - terminal
+ * @port: tty port to push
+ *
+- * Queue a push of the terminal flip buffers to the line discipline. This
+- * function must not be called from IRQ context if port->low_latency is
+- * set.
++ * Queue a push of the terminal flip buffers to the line discipline.
++ * Can be called from IRQ/atomic context.
+ *
+ * In the event of the queue being busy for flipping the work will be
+ * held off and retried later.
+@@ -505,14 +500,7 @@ void tty_flush_to_ldisc(struct tty_struct *tty)
+
+ void tty_flip_buffer_push(struct tty_port *port)
+ {
+- struct tty_bufhead *buf = &port->buf;
+-
+- buf->tail->commit = buf->tail->used;
+-
+- if (port->low_latency)
+- flush_to_ldisc(&buf->work);
+- else
+- schedule_work(&buf->work);
++ tty_schedule_flip(port);
+ }
+ EXPORT_SYMBOL(tty_flip_buffer_push);
+
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index c74a00ad7add..d3448a90f0f9 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1271,12 +1271,13 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
+ *
+ * Locking: None
+ */
+-static void tty_line_name(struct tty_driver *driver, int index, char *p)
++static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
+ {
+ if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
+- strcpy(p, driver->name);
++ return sprintf(p, "%s", driver->name);
+ else
+- sprintf(p, "%s%d", driver->name, index + driver->name_base);
++ return sprintf(p, "%s%d", driver->name,
++ index + driver->name_base);
+ }
+
+ /**
+@@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev,
+ if (i >= ARRAY_SIZE(cs))
+ break;
+ }
+- while (i--)
+- count += sprintf(buf + count, "%s%d%c",
+- cs[i]->name, cs[i]->index, i ? ' ':'\n');
++ while (i--) {
++ int index = cs[i]->index;
++ struct tty_driver *drv = cs[i]->device(cs[i], &index);
++
++ /* don't resolve tty0 as some programs depend on it */
++ if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
++ count += tty_line_name(drv, index, buf + count);
++ else
++ count += sprintf(buf + count, "%s%d",
++ cs[i]->name, cs[i]->index);
++
++ count += sprintf(buf + count, "%c", i ? ' ':'\n');
++ }
+ console_unlock();
+
+ return count;
+diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
+index 5651231a7437..f3eecd967a8a 100644
+--- a/drivers/usb/atm/usbatm.h
++++ b/drivers/usb/atm/usbatm.h
+@@ -34,6 +34,7 @@
+ #include <linux/stringify.h>
+ #include <linux/usb.h>
+ #include <linux/mutex.h>
++#include <linux/ratelimit.h>
+
+ /*
+ #define VERBOSE_DEBUG
+@@ -59,13 +60,12 @@
+ atm_printk(KERN_INFO, instance , format , ## arg)
+ #define atm_warn(instance, format, arg...) \
+ atm_printk(KERN_WARNING, instance , format , ## arg)
+-#define atm_dbg(instance, format, arg...) \
+- dynamic_pr_debug("ATM dev %d: " format , \
+- (instance)->atm_dev->number , ## arg)
+-#define atm_rldbg(instance, format, arg...) \
+- if (printk_ratelimit()) \
+- atm_dbg(instance , format , ## arg)
+-
++#define atm_dbg(instance, format, ...) \
++ pr_debug("ATM dev %d: " format, \
++ (instance)->atm_dev->number, ##__VA_ARGS__)
++#define atm_rldbg(instance, format, ...) \
++ pr_debug_ratelimited("ATM dev %d: " format, \
++ (instance)->atm_dev->number, ##__VA_ARGS__)
+
+ /* flags, set by mini-driver in bind() */
+
+diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
+index b369292d4b90..ad0aca812002 100644
+--- a/drivers/usb/gadget/u_serial.c
++++ b/drivers/usb/gadget/u_serial.c
+@@ -549,8 +549,8 @@ static void gs_rx_push(unsigned long _port)
+ port->read_started--;
+ }
+
+- /* Push from tty to ldisc; without low_latency set this is handled by
+- * a workqueue, so we won't get callbacks and can hold port_lock
++ /* Push from tty to ldisc; this is handled by a workqueue,
++ * so we won't get callbacks and can hold port_lock
+ */
+ if (do_push)
+ tty_flip_buffer_push(&port->port);
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 1f572c00a1be..cfda0a6c07a7 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -312,6 +312,12 @@ static int balloon(void *_vballoon)
+ else if (diff < 0)
+ leak_balloon(vb, -diff);
+ update_balloon_size(vb);
++
++ /*
++ * For large balloon changes, we could spend a lot of time
++ * and always have work to do. Be nice if preempt disabled.
++ */
++ cond_resched();
+ }
+ return 0;
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 62176ad89846..84d590a9e4ad 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3246,6 +3246,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
+ /* send down all the barriers */
+ head = &info->fs_devices->devices;
+ list_for_each_entry_rcu(dev, head, dev_list) {
++ if (dev->missing)
++ continue;
+ if (!dev->bdev) {
+ errors_send++;
+ continue;
+@@ -3260,6 +3262,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
+
+ /* wait for all the barriers */
+ list_for_each_entry_rcu(dev, head, dev_list) {
++ if (dev->missing)
++ continue;
+ if (!dev->bdev) {
+ errors_wait++;
+ continue;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index b791cfb9a050..25d64e8e8e47 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -663,7 +663,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ int lock = (trans->type != TRANS_JOIN_NOLOCK);
+ int err = 0;
+
+- if (--trans->use_count) {
++ if (trans->use_count > 1) {
++ trans->use_count--;
+ trans->block_rsv = trans->orig_rsv;
+ return 0;
+ }
+@@ -711,17 +712,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ }
+
+ if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
+- if (throttle) {
+- /*
+- * We may race with somebody else here so end up having
+- * to call end_transaction on ourselves again, so inc
+- * our use_count.
+- */
+- trans->use_count++;
++ if (throttle)
+ return btrfs_commit_transaction(trans, root);
+- } else {
++ else
+ wake_up_process(info->transaction_kthread);
+- }
+ }
+
+ if (trans->type & __TRANS_FREEZABLE)
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 30b38e23caa7..8ef74f3d8fe5 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2893,6 +2893,7 @@ static int prepend_path(const struct path *path,
+ restart:
+ bptr = *buffer;
+ blen = *buflen;
++ error = 0;
+ dentry = path->dentry;
+ vfsmnt = path->mnt;
+ mnt = real_mount(vfsmnt);
+@@ -3121,19 +3122,22 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
+ /*
+ * Write full pathname from the root of the filesystem into the buffer.
+ */
+-static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
++static char *__dentry_path(struct dentry *d, char *buf, int buflen)
+ {
++ struct dentry *dentry;
+ char *end, *retval;
+ int len, seq = 0;
+ int error = 0;
+
++ if (buflen < 2)
++ goto Elong;
++
+ rcu_read_lock();
+ restart:
++ dentry = d;
+ end = buf + buflen;
+ len = buflen;
+ prepend(&end, &len, "\0", 1);
+- if (buflen < 1)
+- goto Elong;
+ /* Get '/' right */
+ retval = end-1;
+ *retval = '/';
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index e678549ec994..8dd96591b2f8 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2616,6 +2616,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ ex_ee_block = le32_to_cpu(ex->ee_block);
+ ex_ee_len = ext4_ext_get_actual_len(ex);
+
++ /*
++ * If we're starting with an extent other than the last one in the
++ * node, we need to see if it shares a cluster with the extent to
++ * the right (towards the end of the file). If its leftmost cluster
++ * is this extent's rightmost cluster and it is not cluster aligned,
++ * we'll mark it as a partial that is not to be deallocated.
++ */
++
++ if (ex != EXT_LAST_EXTENT(eh)) {
++ ext4_fsblk_t current_pblk, right_pblk;
++ long long current_cluster, right_cluster;
++
++ current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
++ current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
++ right_pblk = ext4_ext_pblock(ex + 1);
++ right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
++ if (current_cluster == right_cluster &&
++ EXT4_PBLK_COFF(sbi, right_pblk))
++ *partial_cluster = -right_cluster;
++ }
++
+ trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
+
+ while (ex >= EXT_FIRST_EXTENT(eh) &&
+@@ -2741,10 +2762,15 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ err = ext4_ext_correct_indexes(handle, inode, path);
+
+ /*
+- * Free the partial cluster only if the current extent does not
+- * reference it. Otherwise we might free used cluster.
++ * If there's a partial cluster and at least one extent remains in
++ * the leaf, free the partial cluster if it isn't shared with the
++ * current extent. If there's a partial cluster and no extents
++ * remain in the leaf, it can't be freed here. It can only be
++ * freed when it's possible to determine if it's not shared with
++ * any other extent - when the next leaf is processed or when space
++ * removal is complete.
+ */
+- if (*partial_cluster > 0 &&
++ if (*partial_cluster > 0 && eh->eh_entries &&
+ (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
+ *partial_cluster)) {
+ int flags = get_default_free_blocks_flags(inode);
+@@ -4159,7 +4185,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_extent newex, *ex, *ex2;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_fsblk_t newblock = 0;
+- int free_on_err = 0, err = 0, depth;
++ int free_on_err = 0, err = 0, depth, ret;
+ unsigned int allocated = 0, offset = 0;
+ unsigned int allocated_clusters = 0;
+ struct ext4_allocation_request ar;
+@@ -4220,9 +4246,13 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ if (!ext4_ext_is_uninitialized(ex))
+ goto out;
+
+- allocated = ext4_ext_handle_uninitialized_extents(
++ ret = ext4_ext_handle_uninitialized_extents(
+ handle, inode, map, path, flags,
+ allocated, newblock);
++ if (ret < 0)
++ err = ret;
++ else
++ allocated = ret;
+ goto out3;
+ }
+ }
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 3595180b62ac..5bbec31440a4 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -88,16 +88,29 @@ static inline struct inode *wb_inode(struct list_head *head)
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/writeback.h>
+
++static void bdi_wakeup_thread(struct backing_dev_info *bdi)
++{
++ spin_lock_bh(&bdi->wb_lock);
++ if (test_bit(BDI_registered, &bdi->state))
++ mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
++ spin_unlock_bh(&bdi->wb_lock);
++}
++
+ static void bdi_queue_work(struct backing_dev_info *bdi,
+ struct wb_writeback_work *work)
+ {
+ trace_writeback_queue(bdi, work);
+
+ spin_lock_bh(&bdi->wb_lock);
++ if (!test_bit(BDI_registered, &bdi->state)) {
++ if (work->done)
++ complete(work->done);
++ goto out_unlock;
++ }
+ list_add_tail(&work->list, &bdi->work_list);
+- spin_unlock_bh(&bdi->wb_lock);
+-
+ mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
++out_unlock:
++ spin_unlock_bh(&bdi->wb_lock);
+ }
+
+ static void
+@@ -113,7 +126,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ trace_writeback_nowork(bdi);
+- mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
++ bdi_wakeup_thread(bdi);
+ return;
+ }
+
+@@ -160,7 +173,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
+ * writeback as soon as there is no other work to do.
+ */
+ trace_writeback_wake_background(bdi);
+- mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
++ bdi_wakeup_thread(bdi);
+ }
+
+ /*
+@@ -1016,7 +1029,7 @@ void bdi_writeback_workfn(struct work_struct *work)
+ current->flags |= PF_SWAPWRITE;
+
+ if (likely(!current_is_workqueue_rescuer() ||
+- list_empty(&bdi->bdi_list))) {
++ !test_bit(BDI_registered, &bdi->state))) {
+ /*
+ * The normal path. Keep writing back @bdi until its
+ * work_list is empty. Note that this path is also taken
+@@ -1038,10 +1051,10 @@ void bdi_writeback_workfn(struct work_struct *work)
+ trace_writeback_pages_written(pages_written);
+ }
+
+- if (!list_empty(&bdi->work_list) ||
+- (wb_has_dirty_io(wb) && dirty_writeback_interval))
+- queue_delayed_work(bdi_wq, &wb->dwork,
+- msecs_to_jiffies(dirty_writeback_interval * 10));
++ if (!list_empty(&bdi->work_list))
++ mod_delayed_work(bdi_wq, &wb->dwork, 0);
++ else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
++ bdi_wakeup_thread_delayed(bdi);
+
+ current->flags &= ~PF_SWAPWRITE;
+ }
+diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
+index 16a5047903a6..406d9cc84ba8 100644
+--- a/fs/jffs2/compr_rtime.c
++++ b/fs/jffs2/compr_rtime.c
+@@ -33,7 +33,7 @@ static int jffs2_rtime_compress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t *sourcelen, uint32_t *dstlen)
+ {
+- short positions[256];
++ unsigned short positions[256];
+ int outpos = 0;
+ int pos=0;
+
+@@ -74,7 +74,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t srclen, uint32_t destlen)
+ {
+- short positions[256];
++ unsigned short positions[256];
+ int outpos = 0;
+ int pos=0;
+
+diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
+index e4619b00f7c5..fa35ff79ab35 100644
+--- a/fs/jffs2/nodelist.h
++++ b/fs/jffs2/nodelist.h
+@@ -231,7 +231,7 @@ struct jffs2_tmp_dnode_info
+ uint32_t version;
+ uint32_t data_crc;
+ uint32_t partial_crc;
+- uint16_t csize;
++ uint32_t csize;
+ uint16_t overlapped;
+ };
+
+diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
+index 03310721712f..b6bd4affd9ad 100644
+--- a/fs/jffs2/nodemgmt.c
++++ b/fs/jffs2/nodemgmt.c
+@@ -179,6 +179,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ spin_unlock(&c->erase_completion_lock);
+
+ schedule();
++ remove_wait_queue(&c->erase_wait, &wait);
+ } else
+ spin_unlock(&c->erase_completion_lock);
+ } else if (ret)
+@@ -211,20 +212,25 @@ out:
+ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
+ uint32_t *len, uint32_t sumsize)
+ {
+- int ret = -EAGAIN;
++ int ret;
+ minsize = PAD(minsize);
+
+ jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
+
+- spin_lock(&c->erase_completion_lock);
+- while(ret == -EAGAIN) {
++ while (true) {
++ spin_lock(&c->erase_completion_lock);
+ ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
+ if (ret) {
+ jffs2_dbg(1, "%s(): looping, ret is %d\n",
+ __func__, ret);
+ }
++ spin_unlock(&c->erase_completion_lock);
++
++ if (ret == -EAGAIN)
++ cond_resched();
++ else
++ break;
+ }
+- spin_unlock(&c->erase_completion_lock);
+ if (!ret)
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+
+diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
+index 20bf8e8002d6..a6fcbd220f6b 100644
+--- a/fs/xfs/xfs_da_btree.c
++++ b/fs/xfs/xfs_da_btree.c
+@@ -1335,7 +1335,7 @@ xfs_da3_fixhashpath(
+ node = blk->bp->b_addr;
+ xfs_da3_node_hdr_from_disk(&nodehdr, node);
+ btree = xfs_da3_node_tree_p(node);
+- if (be32_to_cpu(btree->hashval) == lasthash)
++ if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
+ break;
+ blk->hashval = lasthash;
+ btree[blk->index].hashval = cpu_to_be32(lasthash);
+diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
+index 5f66d519a726..a4cf599ecfc8 100644
+--- a/include/linux/backing-dev.h
++++ b/include/linux/backing-dev.h
+@@ -95,7 +95,7 @@ struct backing_dev_info {
+ unsigned int max_ratio, max_prop_frac;
+
+ struct bdi_writeback wb; /* default writeback info for this bdi */
+- spinlock_t wb_lock; /* protects work_list */
++ spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
+
+ struct list_head work_list;
+
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index e6131a782481..694925837a16 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -233,6 +233,8 @@ extern asmlinkage void dump_stack(void) __cold;
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+ #endif
+
++#include <linux/dynamic_debug.h>
++
+ /* If you are writing a driver, please use dev_dbg instead */
+ #if defined(CONFIG_DYNAMIC_DEBUG)
+ /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
+@@ -343,7 +345,19 @@ extern asmlinkage void dump_stack(void) __cold;
+ #endif
+
+ /* If you are writing a driver, please use dev_dbg instead */
+-#if defined(DEBUG)
++#if defined(CONFIG_DYNAMIC_DEBUG)
++/* descriptor check is first to prevent flooding with "callbacks suppressed" */
++#define pr_debug_ratelimited(fmt, ...) \
++do { \
++ static DEFINE_RATELIMIT_STATE(_rs, \
++ DEFAULT_RATELIMIT_INTERVAL, \
++ DEFAULT_RATELIMIT_BURST); \
++ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
++ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
++ __ratelimit(&_rs)) \
++ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
++} while (0)
++#elif defined(DEBUG)
+ #define pr_debug_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+ #else
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 64f864651d86..96c23247a332 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -203,7 +203,7 @@ struct tty_port {
+ wait_queue_head_t delta_msr_wait; /* Modem status change */
+ unsigned long flags; /* TTY flags ASY_*/
+ unsigned char console:1, /* port is a console */
+- low_latency:1; /* direct buffer flush */
++ low_latency:1; /* optional: tune for latency */
+ struct mutex mutex; /* Locking */
+ struct mutex buf_mutex; /* Buffer alloc lock */
+ unsigned char *xmit_buf; /* Optional buffer */
+diff --git a/kernel/exit.c b/kernel/exit.c
+index a949819055d5..dcde2c4b61d0 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -559,9 +559,6 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
+ struct list_head *dead)
+ {
+ list_move_tail(&p->sibling, &p->real_parent->children);
+-
+- if (p->exit_state == EXIT_DEAD)
+- return;
+ /*
+ * If this is a threaded reparent there is no need to
+ * notify anyone anything has happened.
+@@ -569,9 +566,19 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
+ if (same_thread_group(p->real_parent, father))
+ return;
+
+- /* We don't want people slaying init. */
++ /*
++ * We don't want people slaying init.
++ *
++ * Note: we do this even if it is EXIT_DEAD, wait_task_zombie()
++ * can change ->exit_state to EXIT_ZOMBIE. If this is the final
++ * state, do_notify_parent() was already called and ->exit_signal
++ * doesn't matter.
++ */
+ p->exit_signal = SIGCHLD;
+
++ if (p->exit_state == EXIT_DEAD)
++ return;
++
+ /* If it has exited notify the new parent about this child's death. */
+ if (!p->ptrace &&
+ p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
+@@ -783,6 +790,8 @@ void do_exit(long code)
+ exit_shm(tsk);
+ exit_files(tsk);
+ exit_fs(tsk);
++ if (group_dead)
++ disassociate_ctty(1);
+ exit_task_namespaces(tsk);
+ exit_task_work(tsk);
+ check_stack_usage();
+@@ -798,13 +807,9 @@ void do_exit(long code)
+
+ cgroup_exit(tsk, 1);
+
+- if (group_dead)
+- disassociate_ctty(1);
+-
+ module_put(task_thread_info(tsk)->exec_domain->module);
+
+ proc_exit_connector(tsk);
+-
+ /*
+ * FIXME: do that only when needed, using sched_exit tracepoint
+ */
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index 42086551a24a..55e956056987 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -312,7 +312,9 @@ static void *pidns_get(struct task_struct *task)
+ struct pid_namespace *ns;
+
+ rcu_read_lock();
+- ns = get_pid_ns(task_active_pid_ns(task));
++ ns = task_active_pid_ns(task);
++ if (ns)
++ get_pid_ns(ns);
+ rcu_read_unlock();
+
+ return ns;
+diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
+index 13fb1134ba58..6991139e3303 100644
+--- a/kernel/user_namespace.c
++++ b/kernel/user_namespace.c
+@@ -146,7 +146,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
+
+ /* Find the matching extent */
+ extents = map->nr_extents;
+- smp_read_barrier_depends();
++ smp_rmb();
+ for (idx = 0; idx < extents; idx++) {
+ first = map->extent[idx].first;
+ last = first + map->extent[idx].count - 1;
+@@ -170,7 +170,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
+
+ /* Find the matching extent */
+ extents = map->nr_extents;
+- smp_read_barrier_depends();
++ smp_rmb();
+ for (idx = 0; idx < extents; idx++) {
+ first = map->extent[idx].first;
+ last = first + map->extent[idx].count - 1;
+@@ -193,7 +193,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
+
+ /* Find the matching extent */
+ extents = map->nr_extents;
+- smp_read_barrier_depends();
++ smp_rmb();
+ for (idx = 0; idx < extents; idx++) {
+ first = map->extent[idx].lower_first;
+ last = first + map->extent[idx].count - 1;
+@@ -609,9 +609,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ * were written before the count of the extents.
+ *
+ * To achieve this smp_wmb() is used on guarantee the write
+- * order and smp_read_barrier_depends() is guaranteed that we
+- * don't have crazy architectures returning stale data.
+- *
++ * order and smp_rmb() is guaranteed that we don't have crazy
++ * architectures returning stale data.
+ */
+ mutex_lock(&id_map_mutex);
+
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index ce682f7a4f29..09d9591b7708 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -288,13 +288,19 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
+ * Note, we wouldn't bother setting up the timer, but this function is on the
+ * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
+ * by delaying the wake-up.
++ *
++ * We have to be careful not to postpone flush work if it is scheduled for
++ * earlier. Thus we use queue_delayed_work().
+ */
+ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
+ {
+ unsigned long timeout;
+
+ timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
+- mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
++ spin_lock_bh(&bdi->wb_lock);
++ if (test_bit(BDI_registered, &bdi->state))
++ queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
++ spin_unlock_bh(&bdi->wb_lock);
+ }
+
+ /*
+@@ -307,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
+ spin_unlock_bh(&bdi_lock);
+
+ synchronize_rcu_expedited();
+-
+- /* bdi_list is now unused, clear it to mark @bdi dying */
+- INIT_LIST_HEAD(&bdi->bdi_list);
+ }
+
+ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+@@ -360,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+ */
+ bdi_remove_from_list(bdi);
+
++ /* Make sure nobody queues further work */
++ spin_lock_bh(&bdi->wb_lock);
++ clear_bit(BDI_registered, &bdi->state);
++ spin_unlock_bh(&bdi->wb_lock);
++
+ /*
+ * Drain work list and shutdown the delayed_work. At this point,
+ * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 8db3e89fae35..a3af2b750e96 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3565,7 +3565,13 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+
+ hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+
+- if (ltk->type & HCI_SMP_STK) {
++ /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
++ * temporary key used to encrypt a connection following
++ * pairing. It is used during the Encrypted Session Setup to
++ * distribute the keys. Later, security can be re-established
++ * using a distributed LTK.
++ */
++ if (ltk->type == HCI_SMP_STK_SLAVE) {
+ list_del(&ltk->list);
+ kfree(ltk);
+ }
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index c482f7c7dd32..c211607b79b3 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -249,26 +249,33 @@ int ping_init_sock(struct sock *sk)
+ {
+ struct net *net = sock_net(sk);
+ kgid_t group = current_egid();
+- struct group_info *group_info = get_current_groups();
+- int i, j, count = group_info->ngroups;
++ struct group_info *group_info;
++ int i, j, count;
+ kgid_t low, high;
++ int ret = 0;
+
+ inet_get_ping_group_range_net(net, &low, &high);
+ if (gid_lte(low, group) && gid_lte(group, high))
+ return 0;
+
++ group_info = get_current_groups();
++ count = group_info->ngroups;
+ for (i = 0; i < group_info->nblocks; i++) {
+ int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
+ for (j = 0; j < cp_count; j++) {
+ kgid_t gid = group_info->blocks[i][j];
+ if (gid_lte(low, gid) && gid_lte(gid, high))
+- return 0;
++ goto out_release_group;
+ }
+
+ count -= cp_count;
+ }
+
+- return -EACCES;
++ ret = -EACCES;
++
++out_release_group:
++ put_group_info(group_info);
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(ping_init_sock);
+
+diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
+index c3235675f359..5c2dab276109 100644
+--- a/net/openvswitch/dp_notify.c
++++ b/net/openvswitch/dp_notify.c
+@@ -65,8 +65,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
+ continue;
+
+ netdev_vport = netdev_vport_priv(vport);
+- if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED ||
+- netdev_vport->dev->reg_state == NETREG_UNREGISTERING)
++ if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
+ dp_detach_port_notify(vport);
+ }
+ }
+@@ -88,6 +87,10 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
+ return NOTIFY_DONE;
+
+ if (event == NETDEV_UNREGISTER) {
++ /* upper_dev_unlink and decrement promisc immediately */
++ ovs_netdev_detach_dev(vport);
++
++ /* schedule vport destroy, dev_put and genl notification */
+ ovs_net = net_generic(dev_net(dev), ovs_net_id);
+ queue_work(system_wq, &ovs_net->dp_notify_work);
+ }
+diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
+index 09d93c13cfd6..d21f77d875ba 100644
+--- a/net/openvswitch/vport-netdev.c
++++ b/net/openvswitch/vport-netdev.c
+@@ -150,15 +150,25 @@ static void free_port_rcu(struct rcu_head *rcu)
+ ovs_vport_free(vport_from_priv(netdev_vport));
+ }
+
+-static void netdev_destroy(struct vport *vport)
++void ovs_netdev_detach_dev(struct vport *vport)
+ {
+ struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+- rtnl_lock();
++ ASSERT_RTNL();
+ netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+ netdev_rx_handler_unregister(netdev_vport->dev);
+- netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
++ netdev_upper_dev_unlink(netdev_vport->dev,
++ netdev_master_upper_dev_get(netdev_vport->dev));
+ dev_set_promiscuity(netdev_vport->dev, -1);
++}
++
++static void netdev_destroy(struct vport *vport)
++{
++ struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
++
++ rtnl_lock();
++ if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
++ ovs_netdev_detach_dev(vport);
+ rtnl_unlock();
+
+ call_rcu(&netdev_vport->rcu, free_port_rcu);
+diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
+index dd298b5c5cdb..8df01c1127e5 100644
+--- a/net/openvswitch/vport-netdev.h
++++ b/net/openvswitch/vport-netdev.h
+@@ -39,5 +39,6 @@ netdev_vport_priv(const struct vport *vport)
+ }
+
+ const char *ovs_netdev_get_name(const struct vport *);
++void ovs_netdev_detach_dev(struct vport *);
+
+ #endif /* vport_netdev.h */
+diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
+index 2d682977ce82..39dc5bc742e0 100644
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -306,7 +306,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
+ BUG_ON(ioapic->rtc_status.pending_eoi != 0);
+ ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
+ ioapic->rtc_status.dest_map);
+- ioapic->rtc_status.pending_eoi = ret;
++ ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
+ } else
+ ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
+
diff --git a/1019_linux-3.12.20.patch b/1019_linux-3.12.20.patch
new file mode 100644
index 00000000..44b5357d
--- /dev/null
+++ b/1019_linux-3.12.20.patch
@@ -0,0 +1,7196 @@
+diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt
+index a590ca51be75..f762e11bfe11 100644
+--- a/Documentation/devicetree/bindings/spi/efm32-spi.txt
++++ b/Documentation/devicetree/bindings/spi/efm32-spi.txt
+@@ -3,7 +3,7 @@
+ Required properties:
+ - #address-cells: see spi-bus.txt
+ - #size-cells: see spi-bus.txt
+-- compatible: should be "efm32,spi"
++- compatible: should be "energymicro,efm32-spi"
+ - reg: Offset and length of the register set for the controller
+ - interrupts: pair specifying rx and tx irq
+ - clocks: phandle to the spi clock
+@@ -15,7 +15,7 @@ Example:
+ spi1: spi@0x4000c400 { /* USART1 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "efm32,spi";
++ compatible = "energymicro,efm32-spi";
+ reg = <0x4000c400 0x400>;
+ interrupts = <15 16>;
+ clocks = <&cmu 20>;
+diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
+index 1e6b6531bbcc..d2ba80bb7af5 100644
+--- a/Documentation/video4linux/gspca.txt
++++ b/Documentation/video4linux/gspca.txt
+@@ -55,6 +55,7 @@ zc3xx 0458:700f Genius VideoCam Web V2
+ sonixj 0458:7025 Genius Eye 311Q
+ sn9c20x 0458:7029 Genius Look 320s
+ sonixj 0458:702e Genius Slim 310 NB
++sn9c20x 0458:7045 Genius Look 1320 V2
+ sn9c20x 0458:704a Genius Slim 1320
+ sn9c20x 0458:704c Genius i-Look 1321
+ sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650)
+diff --git a/Makefile b/Makefile
+index cf5d97e60b39..d8adfdbe0344 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 19
++SUBLEVEL = 20
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index b908dde8a331..15588b0611e9 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -610,11 +610,13 @@ resume_user_mode_begin:
+
+ resume_kernel_mode:
+
+-#ifdef CONFIG_PREEMPT
+-
+- ; This is a must for preempt_schedule_irq()
++ ; Disable Interrupts from this point on
++ ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
++ ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
+ IRQ_DISABLE r9
+
++#ifdef CONFIG_PREEMPT
++
+ ; Can't preempt if preemption disabled
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
+diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
+index 295cefeb25d3..33058aa40e77 100644
+--- a/arch/arc/plat-arcfpga/Kconfig
++++ b/arch/arc/plat-arcfpga/Kconfig
+@@ -33,7 +33,6 @@ config ISS_SMP_EXTN
+ bool "ARC SMP Extensions (ISS Models only)"
+ default n
+ depends on SMP
+- select ARC_HAS_COH_RTSC
+ help
+ SMP Extensions to ARC700, in a "simulation only" Model, supported in
+ ARC ISS (Instruction Set Simulator).
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 1ad6fb6c094d..e47fcd1e9645 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -892,7 +892,7 @@ config ARCH_MULTI_V5
+ bool "ARMv5 based platforms (ARM926T, XSCALE, PJ1, ...)"
+ depends on !ARCH_MULTI_V6_V7
+ select ARCH_MULTI_V4_V5
+- select CPU_ARM926T if (!CPU_ARM946E || CPU_ARM1020 || \
++ select CPU_ARM926T if !(CPU_ARM946E || CPU_ARM1020 || \
+ CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \
+ CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_FEROCEON)
+
+diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
+index 2f66deda9f5c..e6e952e32117 100644
+--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
++++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
+@@ -120,7 +120,7 @@
+ musb: usb@47400000 {
+ status = "okay";
+
+- control@44e10000 {
++ control@44e10620 {
+ status = "okay";
+ };
+
+@@ -141,7 +141,7 @@
+ dr_mode = "host";
+ };
+
+- dma-controller@07402000 {
++ dma-controller@47402000 {
+ status = "okay";
+ };
+ };
+diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
+index e8ec8756e498..b2f476911e13 100644
+--- a/arch/arm/boot/dts/am335x-evm.dts
++++ b/arch/arm/boot/dts/am335x-evm.dts
+@@ -174,7 +174,7 @@
+ musb: usb@47400000 {
+ status = "okay";
+
+- control@44e10000 {
++ control@44e10620 {
+ status = "okay";
+ };
+
+@@ -195,7 +195,7 @@
+ dr_mode = "host";
+ };
+
+- dma-controller@07402000 {
++ dma-controller@47402000 {
+ status = "okay";
+ };
+ };
+diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
+index 4f339fa91c57..521d92afc78a 100644
+--- a/arch/arm/boot/dts/am335x-evmsk.dts
++++ b/arch/arm/boot/dts/am335x-evmsk.dts
+@@ -211,7 +211,7 @@
+ musb: usb@47400000 {
+ status = "okay";
+
+- control@44e10000 {
++ control@44e10620 {
+ status = "okay";
+ };
+
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index f9c5da9c7fe1..e9b6775299d0 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -346,7 +346,7 @@
+ ti,hwmods = "usb_otg_hs";
+ status = "disabled";
+
+- ctrl_mod: control@44e10000 {
++ ctrl_mod: control@44e10620 {
+ compatible = "ti,am335x-usb-ctrl-module";
+ reg = <0x44e10620 0x10
+ 0x44e10648 0x4>;
+@@ -449,7 +449,7 @@
+ "tx14", "tx15";
+ };
+
+- cppi41dma: dma-controller@07402000 {
++ cppi41dma: dma-controller@47402000 {
+ compatible = "ti,am3359-cppi41";
+ reg = <0x47400000 0x1000
+ 0x47402000 0x1000
+diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
+index 364a63dce6c5..beae26c4f3fb 100644
+--- a/arch/arm/boot/dts/armada-370-xp.dtsi
++++ b/arch/arm/boot/dts/armada-370-xp.dtsi
+@@ -156,6 +156,7 @@
+ #size-cells = <0>;
+ compatible = "marvell,orion-mdio";
+ reg = <0x72004 0x4>;
++ clocks = <&gateclk 4>;
+ };
+
+ eth0: ethernet@70000 {
+diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
+index cee55fa33731..47fa5aba20a1 100644
+--- a/arch/arm/boot/dts/exynos5250-arndale.dts
++++ b/arch/arm/boot/dts/exynos5250-arndale.dts
+@@ -286,6 +286,7 @@
+ regulator-name = "vdd_g3d";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
+ regulator-boot-on;
+ op_mode = <1>;
+ };
+diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
+index 191ada6e4d2d..662c7bd06108 100644
+--- a/arch/arm/include/asm/div64.h
++++ b/arch/arm/include/asm/div64.h
+@@ -156,7 +156,7 @@
+ /* Select the best insn combination to perform the */ \
+ /* actual __m * __n / (__p << 64) operation. */ \
+ if (!__c) { \
+- asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
++ asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
+ "mov %Q0, #0" \
+ : "=&r" (__res) \
+ : "r" (__m), "r" (__n) \
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index e42cf597f6e6..2aff798fbef4 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -3,11 +3,6 @@
+
+ #ifdef __KERNEL__
+
+-#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
+-/* ARM doesn't provide unprivileged exclusive memory accessors */
+-#include <asm-generic/futex.h>
+-#else
+-
+ #include <linux/futex.h>
+ #include <linux/uaccess.h>
+ #include <asm/errno.h>
+@@ -164,6 +159,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+ return ret;
+ }
+
+-#endif /* !(CPU_USE_DOMAINS && SMP) */
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_ARM_FUTEX_H */
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index f97ee02386ee..c98c9c89b95c 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -140,6 +140,7 @@
+ #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
+ #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
+ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
++#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
+ #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
+
+ #ifndef __ASSEMBLY__
+diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
+index 141baa3f9a72..cbd61977c996 100644
+--- a/arch/arm/include/asm/unistd.h
++++ b/arch/arm/include/asm/unistd.h
+@@ -48,6 +48,5 @@
+ */
+ #define __IGNORE_fadvise64_64
+ #define __IGNORE_migrate_pages
+-#define __IGNORE_kcmp
+
+ #endif /* __ASM_ARM_UNISTD_H */
+diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
+index 57221e349a7c..faa3d4c41ece 100644
+--- a/arch/arm/kernel/machine_kexec.c
++++ b/arch/arm/kernel/machine_kexec.c
+@@ -181,3 +181,10 @@ void machine_kexec(struct kimage *image)
+
+ soft_restart(reboot_code_buffer_phys);
+ }
++
++void arch_crash_save_vmcoreinfo(void)
++{
++#ifdef CONFIG_ARM_LPAE
++ VMCOREINFO_CONFIG(ARM_LPAE);
++#endif
++}
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index cb79a5dd6d96..fe59e4a19022 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -41,6 +41,8 @@ static unsigned long hyp_idmap_start;
+ static unsigned long hyp_idmap_end;
+ static phys_addr_t hyp_idmap_vector;
+
++#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
++
+ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+ {
+ /*
+@@ -172,14 +174,14 @@ void free_boot_hyp_pgd(void)
+ if (boot_hyp_pgd) {
+ unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+ unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+- kfree(boot_hyp_pgd);
++ free_pages((unsigned long)boot_hyp_pgd, pgd_order);
+ boot_hyp_pgd = NULL;
+ }
+
+ if (hyp_pgd)
+ unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+
+- kfree(init_bounce_page);
++ free_page((unsigned long)init_bounce_page);
+ init_bounce_page = NULL;
+
+ mutex_unlock(&kvm_hyp_pgd_mutex);
+@@ -209,7 +211,7 @@ void free_hyp_pgds(void)
+ for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
+ unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+
+- kfree(hyp_pgd);
++ free_pages((unsigned long)hyp_pgd, pgd_order);
+ hyp_pgd = NULL;
+ }
+
+@@ -781,7 +783,7 @@ int kvm_mmu_init(void)
+ size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
+ phys_addr_t phys_base;
+
+- init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!init_bounce_page) {
+ kvm_err("Couldn't allocate HYP init bounce page\n");
+ err = -ENOMEM;
+@@ -807,8 +809,9 @@ int kvm_mmu_init(void)
+ (unsigned long)phys_base);
+ }
+
+- hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+- boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
++ hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
++ boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
++
+ if (!hyp_pgd || !boot_hyp_pgd) {
+ kvm_err("Hyp mode PGD not allocated\n");
+ err = -ENOMEM;
+diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
+index e022a869bff2..6037a9a01ed5 100644
+--- a/arch/arm/mach-omap2/irq.c
++++ b/arch/arm/mach-omap2/irq.c
+@@ -222,6 +222,7 @@ void __init ti81xx_init_irq(void)
+ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs *regs)
+ {
+ u32 irqnr;
++ int handled_irq = 0;
+
+ do {
+ irqnr = readl_relaxed(base_addr + 0x98);
+@@ -249,8 +250,15 @@ out:
+ if (irqnr) {
+ irqnr = irq_find_mapping(domain, irqnr);
+ handle_IRQ(irqnr, regs);
++ handled_irq = 1;
+ }
+ } while (irqnr);
++
++ /* If an irq is masked or deasserted while active, we will
++ * keep ending up here with no irq handled. So remove it from
++ * the INTC with an ack.*/
++ if (!handled_irq)
++ omap_ack_irq(NULL);
+ }
+
+ asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs)
+diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+index 60f23440082e..857e76c38a15 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -1968,7 +1968,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
+ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
+ .name = "usb_host_hs",
+ .class = &omap3xxx_usb_host_hs_hwmod_class,
+- .clkdm_name = "l3_init_clkdm",
++ .clkdm_name = "usbhost_clkdm",
+ .mpu_irqs = omap3xxx_usb_host_hs_irqs,
+ .main_clk = "usbhost_48m_fck",
+ .prcm = {
+@@ -2053,7 +2053,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
+ static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
+ .name = "usb_tll_hs",
+ .class = &omap3xxx_usb_tll_hs_hwmod_class,
+- .clkdm_name = "l3_init_clkdm",
++ .clkdm_name = "core_l4_clkdm",
+ .mpu_irqs = omap3xxx_usb_tll_hs_irqs,
+ .main_clk = "usbtll_fck",
+ .prcm = {
+diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
+index 7bdd22afce69..d4d0fce325c7 100644
+--- a/arch/arm/mach-omap2/pm.h
++++ b/arch/arm/mach-omap2/pm.h
+@@ -103,7 +103,7 @@ static inline void enable_omap3630_toggle_l2_on_restore(void) { }
+
+ #define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0)
+
+-#if defined(CONFIG_ARCH_OMAP4)
++#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
+ extern u16 pm44xx_errata;
+ #define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id))
+ #else
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index cd2c88e7a8f7..b3b1b883bd08 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -446,7 +446,6 @@ config CPU_32v5
+
+ config CPU_32v6
+ bool
+- select CPU_USE_DOMAINS if CPU_V6 && MMU
+ select TLS_REG_EMUL if !CPU_32v6K && !MMU
+
+ config CPU_32v6K
+@@ -671,7 +670,7 @@ config ARM_VIRT_EXT
+
+ config SWP_EMULATE
+ bool "Emulate SWP/SWPB instructions"
+- depends on !CPU_USE_DOMAINS && CPU_V7
++ depends on CPU_V7
+ default y if SMP
+ select HAVE_PROC_CPU if PROC_FS
+ help
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 0222ba7603af..b7c987dbb604 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -464,6 +464,16 @@ static void __init build_mem_type_table(void)
+ s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
+
+ /*
++ * We don't use domains on ARMv6 (since this causes problems with
++ * v6/v7 kernels), so we must use a separate memory type for user
++ * r/o, kernel r/w to map the vectors page.
++ */
++#ifndef CONFIG_ARM_LPAE
++ if (cpu_arch == CPU_ARCH_ARMv6)
++ vecs_pgprot |= L_PTE_MT_VECTORS;
++#endif
++
++ /*
+ * ARMv6 and above have extended page tables.
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
+diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
+index e3c48a3fe063..ee1d80593958 100644
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -112,13 +112,9 @@
+ * 100x 1 0 1 r/o no acc
+ * 10x0 1 0 1 r/o no acc
+ * 1011 0 0 1 r/w no acc
+- * 110x 0 1 0 r/w r/o
+- * 11x0 0 1 0 r/w r/o
+- * 1111 0 1 1 r/w r/w
+- *
+- * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
+ * 110x 1 1 1 r/o r/o
+ * 11x0 1 1 1 r/o r/o
++ * 1111 0 1 1 r/w r/w
+ */
+ .macro armv6_mt_table pfx
+ \pfx\()_mt_table:
+@@ -137,7 +133,7 @@
+ .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+- .long 0x00 @ unused
++ .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
+ .endm
+
+ .macro armv6_set_pte_ext pfx
+@@ -158,24 +154,21 @@
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+-#ifdef CONFIG_CPU_USE_DOMAINS
+- @ allow kernel read/write access to read-only user pages
+ tstne r3, #PTE_EXT_APX
+- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+-#endif
++
++ @ user read-only -> kernel read-only
++ bicne r3, r3, #PTE_EXT_AP0
+
+ tst r1, #L_PTE_XN
+ orrne r3, r3, #PTE_EXT_XN
+
+- orr r3, r3, r2
++ eor r3, r3, r2
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
+ moveq r3, #0
+-#ifndef CONFIG_CPU_USE_DOMAINS
+ tstne r1, #L_PTE_NONE
+ movne r3, #0
+-#endif
+
+ str r3, [r0]
+ mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
+index bdd3be4be77a..1f52915f2b28 100644
+--- a/arch/arm/mm/proc-v7-2level.S
++++ b/arch/arm/mm/proc-v7-2level.S
+@@ -90,21 +90,14 @@ ENTRY(cpu_v7_set_pte_ext)
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+-#ifdef CONFIG_CPU_USE_DOMAINS
+- @ allow kernel read/write access to read-only user pages
+- tstne r3, #PTE_EXT_APX
+- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+-#endif
+
+ tst r1, #L_PTE_XN
+ orrne r3, r3, #PTE_EXT_XN
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_VALID
+-#ifndef CONFIG_CPU_USE_DOMAINS
+ eorne r1, r1, #L_PTE_NONE
+ tstne r1, #L_PTE_NONE
+-#endif
+ moveq r3, #0
+
+ ARM( str r3, [r0, #2048]! )
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 82d95a7e9466..05fe332c1061 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -168,7 +168,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+ {
+ if (pte_valid_user(pte)) {
+- if (pte_exec(pte))
++ if (!pte_special(pte) && pte_exec(pte))
+ __sync_icache_dcache(pte, addr);
+ if (!pte_dirty(pte))
+ pte = pte_wrprotect(pte);
+@@ -253,11 +253,11 @@ static inline int has_transparent_hugepage(void)
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+ #define pgprot_noncached(prot) \
+- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
++ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
+ #define pgprot_writecombine(prot) \
+- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
++ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
+ #define pgprot_dmacoherent(prot) \
+- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
++ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
+ #define __HAVE_PHYS_MEM_ACCESS_PROT
+ struct file;
+ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
+index 4b6274b47f33..e75ef8219caf 100644
+--- a/arch/mips/kvm/kvm_mips_emul.c
++++ b/arch/mips/kvm/kvm_mips_emul.c
+@@ -1571,17 +1571,17 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+ arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+ #else
+ /* UserLocal not implemented */
+- er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
++ er = EMULATE_FAIL;
+ #endif
+ break;
+
+ default:
+- printk("RDHWR not supported\n");
++ kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
+ er = EMULATE_FAIL;
+ break;
+ }
+ } else {
+- printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
++ kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
+ er = EMULATE_FAIL;
+ }
+
+@@ -1590,6 +1590,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+ */
+ if (er == EMULATE_FAIL) {
+ vcpu->arch.pc = curr_pc;
++ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ }
+ return er;
+ }
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index 7e0277a1048f..32a7c828f073 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -43,6 +43,7 @@ LEAF(swsusp_arch_resume)
+ bne t1, t3, 1b
+ PTR_L t0, PBE_NEXT(t0)
+ bnez t0, 0b
++ jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ PTR_LA t0, saved_regs
+ PTR_L ra, PT_R31(t0)
+ PTR_L sp, PT_R29(t0)
+diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
+index a613d2c82fd9..b142b8e0ed9e 100644
+--- a/arch/powerpc/include/asm/compat.h
++++ b/arch/powerpc/include/asm/compat.h
+@@ -8,7 +8,11 @@
+ #include <linux/sched.h>
+
+ #define COMPAT_USER_HZ 100
++#ifdef __BIG_ENDIAN__
+ #define COMPAT_UTS_MACHINE "ppc\0\0"
++#else
++#define COMPAT_UTS_MACHINE "ppcle\0\0"
++#endif
+
+ typedef u32 compat_size_t;
+ typedef s32 compat_ssize_t;
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 7ca729cac073..cb9c1740cee0 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -208,6 +208,7 @@
+ #define SPRN_ACOP 0x1F /* Available Coprocessor Register */
+ #define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
+ #define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
++#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
+ #define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
+ #define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
+ #define SPRN_CTRLF 0x088
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index aa75b2beba7d..f1106103634b 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -525,6 +525,31 @@ out_and_saveregs:
+ tm_save_sprs(thr);
+ }
+
++extern void __tm_recheckpoint(struct thread_struct *thread,
++ unsigned long orig_msr);
++
++void tm_recheckpoint(struct thread_struct *thread,
++ unsigned long orig_msr)
++{
++ unsigned long flags;
++
++ /* We really can't be interrupted here as the TEXASR registers can't
++ * change and later in the trecheckpoint code, we have a userspace R1.
++ * So let's hard disable over this region.
++ */
++ local_irq_save(flags);
++ hard_irq_disable();
++
++ /* The TM SPRs are restored here, so that TEXASR.FS can be set
++ * before the trecheckpoint and no explosion occurs.
++ */
++ tm_restore_sprs(thread);
++
++ __tm_recheckpoint(thread, orig_msr);
++
++ local_irq_restore(flags);
++}
++
+ static inline void tm_recheckpoint_new_task(struct task_struct *new)
+ {
+ unsigned long msr;
+@@ -543,13 +568,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
+ if (!new->thread.regs)
+ return;
+
+- /* The TM SPRs are restored here, so that TEXASR.FS can be set
+- * before the trecheckpoint and no explosion occurs.
+- */
+- tm_restore_sprs(&new->thread);
+-
+- if (!MSR_TM_ACTIVE(new->thread.regs->msr))
++ if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
++ tm_restore_sprs(&new->thread);
+ return;
++ }
+ msr = new->thread.tm_orig_msr;
+ /* Recheckpoint to restore original checkpointed register state. */
+ TM_DEBUG("*** tm_recheckpoint of pid %d "
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index fea2dba1226c..50606e4261a1 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -872,6 +872,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+ * transactional versions should be loaded.
+ */
+ tm_enable();
++ /* Make sure the transaction is marked as failed */
++ current->thread.tm_texasr |= TEXASR_FS;
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(&current->thread, msr);
+ /* Get the top half of the MSR */
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 36b1d1daa236..4456779dba1c 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -519,6 +519,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
+ }
+ #endif
+ tm_enable();
++ /* Make sure the transaction is marked as failed */
++ current->thread.tm_texasr |= TEXASR_FS;
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(&current->thread, msr);
+
+diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
+index cd809eaa8b5c..8b0518519f9f 100644
+--- a/arch/powerpc/kernel/tm.S
++++ b/arch/powerpc/kernel/tm.S
+@@ -306,7 +306,7 @@ dont_backup_fp:
+ * Call with IRQs off, stacks get all out of sync for
+ * some periods in here!
+ */
+-_GLOBAL(tm_recheckpoint)
++_GLOBAL(__tm_recheckpoint)
+ mfcr r5
+ mflr r0
+ std r5, 8(r1)
+diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
+index f201af8be580..31b5ca8f8c3d 100644
+--- a/arch/s390/include/asm/ccwdev.h
++++ b/arch/s390/include/asm/ccwdev.h
+@@ -219,7 +219,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
+ #define to_ccwdev(n) container_of(n, struct ccw_device, dev)
+ #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
+
+-extern struct ccw_device *ccw_device_probe_console(void);
++extern struct ccw_device *ccw_device_probe_console(struct ccw_driver *);
+ extern void ccw_device_wait_idle(struct ccw_device *);
+ extern int ccw_device_force_console(struct ccw_device *);
+
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 776dafe918db..48bb1c129963 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -788,7 +788,8 @@ rerun_vcpu:
+ if (rc)
+ break;
+ if (kvm_is_ucontrol(vcpu->kvm))
+- rc = -EOPNOTSUPP;
++ /* Don't exit for host interrupts. */
++ rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
+ else
+ rc = kvm_handle_sie_intercept(vcpu);
+ } while (!signal_pending(current) && !rc);
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 96a4b150f958..906fba63b66d 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
+ case BPF_S_LD_W_IND:
+ case BPF_S_LD_H_IND:
+ case BPF_S_LD_B_IND:
+- case BPF_S_LDX_B_MSH:
+ case BPF_S_LD_IMM:
+ case BPF_S_LD_MEM:
+ case BPF_S_MISC_TXA:
+diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
+index b959f5592604..8dfe645bcc4b 100644
+--- a/arch/sh/kernel/dumpstack.c
++++ b/arch/sh/kernel/dumpstack.c
+@@ -115,7 +115,7 @@ static int print_trace_stack(void *data, char *name)
+ */
+ static void print_trace_address(void *data, unsigned long addr, int reliable)
+ {
+- printk(data);
++ printk("%s", (char *)data);
+ printk_address(addr, reliable);
+ }
+
+diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
+index b7388a425f09..9b883a89aad5 100644
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -865,6 +865,9 @@ fail:
+ * Because the x86 boot code expects to be passed a boot_params we
+ * need to create one ourselves (usually the bootloader would create
+ * one for us).
++ *
++ * The caller is responsible for filling out ->code32_start in the
++ * returned boot_params.
+ */
+ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
+ {
+@@ -921,8 +924,6 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
+ hdr->vid_mode = 0xffff;
+ hdr->boot_flag = 0xAA55;
+
+- hdr->code32_start = (__u64)(unsigned long)image->image_base;
+-
+ hdr->type_of_loader = 0x21;
+
+ /* Convert unicode cmdline to ascii */
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index 5d6f6891b188..b1bd969e26aa 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -50,6 +50,13 @@ ENTRY(efi_pe_entry)
+ pushl %eax
+ pushl %esi
+ pushl %ecx
++
++ call reloc
++reloc:
++ popl %ecx
++ subl reloc, %ecx
++ movl %ecx, BP_code32_start(%eax)
++
+ sub $0x4, %esp
+
+ ENTRY(efi_stub_entry)
+@@ -63,12 +70,7 @@ ENTRY(efi_stub_entry)
+ hlt
+ jmp 1b
+ 2:
+- call 3f
+-3:
+- popl %eax
+- subl $3b, %eax
+- subl BP_pref_address(%esi), %eax
+- add BP_code32_start(%esi), %eax
++ movl BP_code32_start(%esi), %eax
+ leal preferred_addr(%eax), %eax
+ jmp *%eax
+
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index c337422b575d..a55840367359 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -215,6 +215,8 @@ ENTRY(efi_pe_entry)
+ cmpq $0,%rax
+ je 1f
+ mov %rax, %rdx
++ leaq startup_32(%rip), %rax
++ movl %eax, BP_code32_start(%rdx)
+ popq %rsi
+ popq %rdi
+
+@@ -228,12 +230,7 @@ ENTRY(efi_stub_entry)
+ hlt
+ jmp 1b
+ 2:
+- call 3f
+-3:
+- popq %rax
+- subq $3b, %rax
+- subq BP_pref_address(%rsi), %rax
+- add BP_code32_start(%esi), %eax
++ movl BP_code32_start(%esi), %eax
+ leaq preferred_addr(%rax), %rax
+ jmp *%rax
+
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index e6253195a301..1ffc32dbe450 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -659,8 +659,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+ ret = -EPERM;
+ goto out;
+ }
+- run_sync();
+ out:
++ run_sync();
+ return ret;
+
+ fail_update:
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index ebc987398923..af1d14a9ebda 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -229,6 +229,17 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ }
+ }
+
++ /*
++ * On x86-64 we do not support 16-bit segments due to
++ * IRET leaking the high bits of the kernel stack address.
++ */
++#ifdef CONFIG_X86_64
++ if (!ldt_info.seg_32bit) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
+index f8e71ea60319..b2d247356c7d 100644
+--- a/drivers/acpi/acpica/nsrepair.c
++++ b/drivers/acpi/acpica/nsrepair.c
+@@ -207,13 +207,30 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
+ * this predefined name. Either one return value is expected, or none,
+ * for both methods and other objects.
+ *
+- * Exit now if there is no return object. Warning if one was expected.
++ * Try to fix if there was no return object. Warning if failed to fix.
+ */
+ if (!return_object) {
+ if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
+- ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+- ACPI_WARN_ALWAYS,
+- "Missing expected return value"));
++ if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
++ ACPI_WARN_PREDEFINED((AE_INFO,
++ info->full_pathname,
++ ACPI_WARN_ALWAYS,
++ "Found unexpected NULL package element"));
++
++ status =
++ acpi_ns_repair_null_element(info,
++ expected_btypes,
++ package_index,
++ return_object_ptr);
++ if (ACPI_SUCCESS(status)) {
++ return (AE_OK); /* Repair was successful */
++ }
++ } else {
++ ACPI_WARN_PREDEFINED((AE_INFO,
++ info->full_pathname,
++ ACPI_WARN_ALWAYS,
++ "Missing expected return value"));
++ }
+
+ return (AE_AML_NO_RETURN_VALUE);
+ }
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index dc11b7a64376..b4bdb8859485 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1169,18 +1169,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
+ return rc;
+
+ for (i = 0; i < host->n_ports; i++) {
+- const char* desc;
+ struct ahci_port_priv *pp = host->ports[i]->private_data;
+
+- /* pp is NULL for dummy ports */
+- if (pp)
+- desc = pp->irq_desc;
+- else
+- desc = dev_driver_string(host->dev);
++ /* Do not receive interrupts sent by dummy ports */
++ if (!pp) {
++ disable_irq(irq + i);
++ continue;
++ }
+
+- rc = devm_request_threaded_irq(host->dev,
+- irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
+- desc, host->ports[i]);
++ rc = devm_request_threaded_irq(host->dev, irq + i,
++ ahci_hw_interrupt,
++ ahci_thread_fn, IRQF_SHARED,
++ pp->irq_desc, host->ports[i]);
+ if (rc)
+ goto out_free_irqs;
+ }
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c5d056e974f1..97ae08de4b52 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4224,8 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+
+ /* devices that don't properly handle queued TRIM commands */
+- { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+- { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
++ { "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, },
++ { "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, },
++ { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
++ { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
+ /*
+ * Some WD SATA-I drives spin up and down erratically when the link
+@@ -4792,21 +4794,26 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+ {
+ struct ata_queued_cmd *qc = NULL;
+- unsigned int i;
++ unsigned int i, tag;
+
+ /* no command while frozen */
+ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+ return NULL;
+
+- /* the last tag is reserved for internal command. */
+- for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
+- if (!test_and_set_bit(i, &ap->qc_allocated)) {
+- qc = __ata_qc_from_tag(ap, i);
++ for (i = 0; i < ATA_MAX_QUEUE; i++) {
++ tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
++
++ /* the last tag is reserved for internal command. */
++ if (tag == ATA_TAG_INTERNAL)
++ continue;
++
++ if (!test_and_set_bit(tag, &ap->qc_allocated)) {
++ qc = __ata_qc_from_tag(ap, tag);
++ qc->tag = tag;
++ ap->last_tag = tag;
+ break;
+ }
+-
+- if (qc)
+- qc->tag = i;
++ }
+
+ return qc;
+ }
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 690011de912a..cf3e5042193c 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -3053,7 +3053,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
+ int ret;
+
+ while (ptr) {
+- ret = copy_to_user(param, ptr, sizeof(*ptr));
++ struct floppy_raw_cmd cmd = *ptr;
++ cmd.next = NULL;
++ cmd.kernel_data = NULL;
++ ret = copy_to_user(param, &cmd, sizeof(cmd));
+ if (ret)
+ return -EFAULT;
+ param += sizeof(struct floppy_raw_cmd);
+@@ -3107,10 +3110,11 @@ loop:
+ return -ENOMEM;
+ *rcmd = ptr;
+ ret = copy_from_user(ptr, param, sizeof(*ptr));
+- if (ret)
+- return -EFAULT;
+ ptr->next = NULL;
+ ptr->buffer_length = 0;
++ ptr->kernel_data = NULL;
++ if (ret)
++ return -EFAULT;
+ param += sizeof(struct floppy_raw_cmd);
+ if (ptr->cmd_count > 33)
+ /* the command may now also take up the space
+@@ -3126,7 +3130,6 @@ loop:
+ for (i = 0; i < 16; i++)
+ ptr->reply[i] = 0;
+ ptr->resultcode = 0;
+- ptr->kernel_data = NULL;
+
+ if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+ if (ptr->length <= 0)
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 952dbfe22126..4d26c25aa9c5 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -621,6 +621,12 @@ static void mtip_timeout_function(unsigned long int data)
+ */
+ writel(1 << bit, port->completed[group]);
+
++ /* Unmap the DMA scatter list entries */
++ dma_unmap_sg(&port->dd->pdev->dev,
++ command->sg,
++ command->scatter_ents,
++ command->direction);
++
+ /* Call the async completion callback. */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data,
+@@ -628,12 +634,6 @@ static void mtip_timeout_function(unsigned long int data)
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+- /* Unmap the DMA scatter list entries */
+- dma_unmap_sg(&port->dd->pdev->dev,
+- command->sg,
+- command->scatter_ents,
+- command->direction);
+-
+ /*
+ * Clear the allocated bit and active tag for the
+ * command.
+@@ -711,6 +711,12 @@ static void mtip_async_complete(struct mtip_port *port,
+ "Command tag %d failed due to TFE\n", tag);
+ }
+
++ /* Unmap the DMA scatter list entries */
++ dma_unmap_sg(&dd->pdev->dev,
++ command->sg,
++ command->scatter_ents,
++ command->direction);
++
+ /* Upper layer callback */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data, cb_status);
+@@ -718,12 +724,6 @@ static void mtip_async_complete(struct mtip_port *port,
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+- /* Unmap the DMA scatter list entries */
+- dma_unmap_sg(&dd->pdev->dev,
+- command->sg,
+- command->scatter_ents,
+- command->direction);
+-
+ /* Clear the allocated and active bits for the command */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+@@ -4040,6 +4040,7 @@ skip_create_disk:
+ blk_queue_max_hw_sectors(dd->queue, 0xffff);
+ blk_queue_max_segment_size(dd->queue, 0x400000);
+ blk_queue_io_min(dd->queue, 4096);
++ blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
+
+ /*
+ * write back cache is not supported in the device. FUA depends on
+diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
+index 7be41e676a64..aa21299ec7d2 100644
+--- a/drivers/clk/clk-s2mps11.c
++++ b/drivers/clk/clk-s2mps11.c
+@@ -130,7 +130,7 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev)
+ int i;
+
+ if (!iodev->dev->of_node)
+- return NULL;
++ return ERR_PTR(-EINVAL);
+
+ clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks");
+ if (!clk_np) {
+diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
+index 10b577fcf48d..0ad40e4a7ae0 100644
+--- a/drivers/dma/edma.c
++++ b/drivers/dma/edma.c
+@@ -170,11 +170,13 @@ static void edma_execute(struct edma_chan *echan)
+ if (edesc->processed == edesc->pset_nr)
+ edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
+
+- edma_resume(echan->ch_num);
+-
+ if (edesc->processed <= MAX_NR_SG) {
+ dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+ edma_start(echan->ch_num);
++ } else {
++ dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
++ echan->ch_num, edesc->processed);
++ edma_resume(echan->ch_num);
+ }
+
+ /*
+diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
+index f8e6af20dfbf..d599fc42ae8b 100644
+--- a/drivers/gpio/gpio-mxs.c
++++ b/drivers/gpio/gpio-mxs.c
+@@ -214,7 +214,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
+ ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+ ct->regs.mask = PINCTRL_IRQEN(port);
+
+- irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
++ irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
++ IRQ_NOREQUEST, 0);
+ }
+
+ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
+index 138364d91782..bfcfd0c202ad 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
++++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <linux/console.h>
+ #include <drm/drmP.h>
++#include <drm/drm_crtc_helper.h>
+
+ #include "cirrus_drv.h"
+
+@@ -75,6 +76,41 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
+ drm_put_dev(dev);
+ }
+
++static int cirrus_pm_suspend(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ struct cirrus_device *cdev = drm_dev->dev_private;
++
++ drm_kms_helper_poll_disable(drm_dev);
++
++ if (cdev->mode_info.gfbdev) {
++ console_lock();
++ fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
++ console_unlock();
++ }
++
++ return 0;
++}
++
++static int cirrus_pm_resume(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ struct cirrus_device *cdev = drm_dev->dev_private;
++
++ drm_helper_resume_force_mode(drm_dev);
++
++ if (cdev->mode_info.gfbdev) {
++ console_lock();
++ fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
++ console_unlock();
++ }
++
++ drm_kms_helper_poll_enable(drm_dev);
++ return 0;
++}
++
+ static const struct file_operations cirrus_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+@@ -104,11 +140,17 @@ static struct drm_driver driver = {
+ .dumb_destroy = drm_gem_dumb_destroy,
+ };
+
++static const struct dev_pm_ops cirrus_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
++ cirrus_pm_resume)
++};
++
+ static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
++ .driver.pm = &cirrus_pm_ops,
+ };
+
+ static int __init cirrus_init(void)
+diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
+index 3592616d484b..c6ec012befcd 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
++++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
+@@ -308,6 +308,9 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+
+ WREG_HDR(hdr);
+ cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
++
++ /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
++ outb(0x20, 0x3c0);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 10d1de5bce6f..e8edbb751e9a 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -758,6 +758,14 @@ static const struct dmi_system_id intel_no_crt[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
++ {
++ .callback = intel_no_crt_dmi_callback,
++ .ident = "DELL XPS 8700",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
++ },
++ },
+ { }
+ };
+
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 13c23a4789de..8dd98d4fc124 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -55,6 +55,9 @@ static __u32 vmbus_get_next_version(__u32 current_version)
+ case (VERSION_WIN8):
+ return VERSION_WIN7;
+
++ case (VERSION_WIN8_1):
++ return VERSION_WIN8;
++
+ case (VERSION_WS2008):
+ default:
+ return VERSION_INVAL;
+@@ -79,6 +82,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+ msg->monitor_page2 = virt_to_phys(
+ (void *)((unsigned long)vmbus_connection.monitor_pages +
+ PAGE_SIZE));
++ if (version == VERSION_WIN8_1)
++ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
+
+ /*
+ * Add to list before we send the request since we may
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index 2db7dcd826b9..376de1cc85db 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -119,7 +119,8 @@ static ssize_t iio_scan_el_show(struct device *dev,
+ int ret;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
+- ret = test_bit(to_iio_dev_attr(attr)->address,
++ /* Ensure ret is 0 or 1. */
++ ret = !!test_bit(to_iio_dev_attr(attr)->address,
+ indio_dev->buffer->scan_mask);
+
+ return sprintf(buf, "%d\n", ret);
+@@ -789,7 +790,8 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
+ if (!buffer->scan_mask)
+ return 0;
+
+- return test_bit(bit, buffer->scan_mask);
++ /* Ensure return value is 0 or 1. */
++ return !!test_bit(bit, buffer->scan_mask);
+ };
+ EXPORT_SYMBOL_GPL(iio_scan_mask_query);
+
+diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
+index 212150c25ea0..8cc837537768 100644
+--- a/drivers/infiniband/hw/ehca/ehca_cq.c
++++ b/drivers/infiniband/hw/ehca/ehca_cq.c
+@@ -283,6 +283,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
+ (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ ehca_err(device, "Copy to udata failed.");
++ cq = ERR_PTR(-EFAULT);
+ goto create_cq_exit4;
+ }
+ }
+diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
+index 714293b78518..e2f9a51f4a38 100644
+--- a/drivers/infiniband/hw/ipath/ipath_diag.c
++++ b/drivers/infiniband/hw/ipath/ipath_diag.c
+@@ -326,7 +326,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ size_t count, loff_t *off)
+ {
+ u32 __iomem *piobuf;
+- u32 plen, clen, pbufn;
++ u32 plen, pbufn, maxlen_reserve;
+ struct ipath_diag_pkt odp;
+ struct ipath_diag_xpkt dp;
+ u32 *tmpbuf = NULL;
+@@ -335,51 +335,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ u64 val;
+ u32 l_state, lt_state; /* LinkState, LinkTrainingState */
+
+- if (count < sizeof(odp)) {
+- ret = -EINVAL;
+- goto bail;
+- }
+
+ if (count == sizeof(dp)) {
+ if (copy_from_user(&dp, data, sizeof(dp))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+- } else if (copy_from_user(&odp, data, sizeof(odp))) {
+- ret = -EFAULT;
++ } else if (count == sizeof(odp)) {
++ if (copy_from_user(&odp, data, sizeof(odp))) {
++ ret = -EFAULT;
++ goto bail;
++ }
++ } else {
++ ret = -EINVAL;
+ goto bail;
+ }
+
+- /*
+- * Due to padding/alignment issues (lessened with new struct)
+- * the old and new structs are the same length. We need to
+- * disambiguate them, which we can do because odp.len has never
+- * been less than the total of LRH+BTH+DETH so far, while
+- * dp.unit (same offset) unit is unlikely to get that high.
+- * Similarly, dp.data, the pointer to user at the same offset
+- * as odp.unit, is almost certainly at least one (512byte)page
+- * "above" NULL. The if-block below can be omitted if compatibility
+- * between a new driver and older diagnostic code is unimportant.
+- * compatibility the other direction (new diags, old driver) is
+- * handled in the diagnostic code, with a warning.
+- */
+- if (dp.unit >= 20 && dp.data < 512) {
+- /* very probable version mismatch. Fix it up */
+- memcpy(&odp, &dp, sizeof(odp));
+- /* We got a legacy dp, copy elements to dp */
+- dp.unit = odp.unit;
+- dp.data = odp.data;
+- dp.len = odp.len;
+- dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */
+- }
+-
+ /* send count must be an exact number of dwords */
+ if (dp.len & 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+- clen = dp.len >> 2;
++ plen = dp.len >> 2;
+
+ dd = ipath_lookup(dp.unit);
+ if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
+@@ -422,16 +400,22 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ goto bail;
+ }
+
+- /* need total length before first word written */
+- /* +1 word is for the qword padding */
+- plen = sizeof(u32) + dp.len;
+-
+- if ((plen + 4) > dd->ipath_ibmaxlen) {
++ /*
++ * need total length before first word written, plus 2 Dwords. One Dword
++ * is for padding so we get the full user data when not aligned on
++ * a word boundary. The other Dword is to make sure we have room for the
++ * ICRC which gets tacked on later.
++ */
++ maxlen_reserve = 2 * sizeof(u32);
++ if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
+ ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
+- plen - 4, dd->ipath_ibmaxlen);
++ dp.len, dd->ipath_ibmaxlen);
+ ret = -EINVAL;
+- goto bail; /* before writing pbc */
++ goto bail;
+ }
++
++ plen = sizeof(u32) + dp.len;
++
+ tmpbuf = vmalloc(plen);
+ if (!tmpbuf) {
+ dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
+@@ -473,11 +457,11 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ */
+ if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
+ ipath_flush_wc();
+- __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
++ __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
+ ipath_flush_wc();
+- __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
++ __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
+ } else
+- __iowrite32_copy(piobuf + 2, tmpbuf, clen);
++ __iowrite32_copy(piobuf + 2, tmpbuf, plen);
+
+ ipath_flush_wc();
+
+diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
+index 5b71d43bd89c..42dde06fdb91 100644
+--- a/drivers/infiniband/hw/mthca/mthca_provider.c
++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
+@@ -695,6 +695,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
+
+ if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
+ mthca_free_cq(to_mdev(ibdev), cq);
++ err = -EFAULT;
+ goto err_free;
+ }
+
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 5b53ca5a2284..09999cdec3b1 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1186,7 +1186,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
+- return NULL;
++ return ERR_PTR(-EFAULT);
+ }
+ if (req.user_wqe_buffers) {
+ virt_wqs = 1;
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index 275f247f9fca..2023cd61b897 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -1578,7 +1578,7 @@ static int do_qib_user_sdma_queue_create(struct file *fp)
+ struct qib_ctxtdata *rcd = fd->rcd;
+ struct qib_devdata *dd = rcd->dd;
+
+- if (dd->flags & QIB_HAS_SEND_DMA)
++ if (dd->flags & QIB_HAS_SEND_DMA) {
+
+ fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
+ dd->unit,
+@@ -1586,6 +1586,7 @@ static int do_qib_user_sdma_queue_create(struct file *fp)
+ fd->subctxt);
+ if (!fd->pq)
+ return -ENOMEM;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
+index 24e802f4ea2f..76c3e177164d 100644
+--- a/drivers/infiniband/hw/qib/qib_init.c
++++ b/drivers/infiniband/hw/qib/qib_init.c
+@@ -1097,14 +1097,10 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
+ int ret;
+
+ dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
+- if (!dd) {
+- dd = ERR_PTR(-ENOMEM);
+- goto bail;
+- }
++ if (!dd)
++ return ERR_PTR(-ENOMEM);
+
+-#ifdef CONFIG_DEBUG_FS
+- qib_dbg_ibdev_init(&dd->verbs_dev);
+-#endif
++ INIT_LIST_HEAD(&dd->list);
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&qib_devs_lock, flags);
+@@ -1121,11 +1117,6 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
+ if (ret < 0) {
+ qib_early_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
+-#ifdef CONFIG_DEBUG_FS
+- qib_dbg_ibdev_exit(&dd->verbs_dev);
+-#endif
+- ib_dealloc_device(&dd->verbs_dev.ibdev);
+- dd = ERR_PTR(ret);
+ goto bail;
+ }
+
+@@ -1139,9 +1130,15 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
+ qib_early_err(&pdev->dev,
+ "Could not alloc cpulist info, cpu affinity might be wrong\n");
+ }
+-
+-bail:
++#ifdef CONFIG_DEBUG_FS
++ qib_dbg_ibdev_init(&dd->verbs_dev);
++#endif
+ return dd;
++bail:
++ if (!list_empty(&dd->list))
++ list_del_init(&dd->list);
++ ib_dealloc_device(&dd->verbs_dev.ibdev);
++ return ERR_PTR(ret);;
+ }
+
+ /*
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 09c71293ab4b..02e4d2efa208 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -426,11 +426,18 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
+ {
+ struct fast_reg_descriptor *fr_desc;
+ struct isert_device *device = isert_conn->conn_device;
+- int i, ret;
++ struct se_session *se_sess = isert_conn->conn->sess->se_sess;
++ struct se_node_acl *se_nacl = se_sess->se_node_acl;
++ int i, ret, tag_num;
++ /*
++ * Setup the number of FRMRs based upon the number of tags
++ * available to session in iscsi_target_locate_portal().
++ */
++ tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
++ tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
+
+- INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
+ isert_conn->conn_frwr_pool_size = 0;
+- for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
++ for (i = 0; i < tag_num; i++) {
+ fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
+ if (!fr_desc) {
+ pr_err("Failed to allocate fast_reg descriptor\n");
+@@ -502,6 +509,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ kref_get(&isert_conn->conn_kref);
+ mutex_init(&isert_conn->conn_mutex);
+ spin_lock_init(&isert_conn->conn_lock);
++ INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
+
+ cma_id->context = isert_conn;
+ isert_conn->conn_cm_id = cma_id;
+@@ -559,14 +567,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ isert_conn->conn_pd = device->dev_pd;
+ isert_conn->conn_mr = device->dev_mr;
+
+- if (device->use_frwr) {
+- ret = isert_conn_create_frwr_pool(isert_conn);
+- if (ret) {
+- pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
+- goto out_frwr;
+- }
+- }
+-
+ ret = isert_conn_setup_qp(isert_conn, cma_id);
+ if (ret)
+ goto out_conn_dev;
+@@ -580,9 +580,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ return 0;
+
+ out_conn_dev:
+- if (device->use_frwr)
+- isert_conn_free_frwr_pool(isert_conn);
+-out_frwr:
+ isert_device_try_release(device);
+ out_rsp_dma_map:
+ ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+@@ -930,6 +927,15 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+ }
+ if (!login->login_failed) {
+ if (login->login_complete) {
++ if (isert_conn->conn_device->use_frwr) {
++ ret = isert_conn_create_frwr_pool(isert_conn);
++ if (ret) {
++ pr_err("Conn: %p failed to create"
++ " frwr_pool\n", isert_conn);
++ return ret;
++ }
++ }
++
+ ret = isert_alloc_rx_descriptors(isert_conn);
+ if (ret)
+ return ret;
+@@ -1414,7 +1420,7 @@ isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn
+ }
+
+ static void
+-isert_put_cmd(struct isert_cmd *isert_cmd)
++isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
+ {
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct isert_conn *isert_conn = isert_cmd->conn;
+@@ -1430,8 +1436,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+- if (cmd->data_direction == DMA_TO_DEVICE)
++ if (cmd->data_direction == DMA_TO_DEVICE) {
+ iscsit_stop_dataout_timer(cmd);
++ /*
++ * Check for special case during comp_err where
++ * WRITE_PENDING has been handed off from core,
++ * but requires an extra target_put_sess_cmd()
++ * before transport_generic_free_cmd() below.
++ */
++ if (comp_err &&
++ cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
++ struct se_cmd *se_cmd = &cmd->se_cmd;
++
++ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++ }
++ }
+
+ device->unreg_rdma_mem(isert_cmd, isert_conn);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+@@ -1486,7 +1505,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
+
+ static void
+ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
+- struct ib_device *ib_dev)
++ struct ib_device *ib_dev, bool comp_err)
+ {
+ if (isert_cmd->pdu_buf_dma != 0) {
+ pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
+@@ -1496,7 +1515,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
+ }
+
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+- isert_put_cmd(isert_cmd);
++ isert_put_cmd(isert_cmd, comp_err);
+ }
+
+ static void
+@@ -1540,14 +1559,14 @@ isert_do_control_comp(struct work_struct *work)
+ iscsit_tmr_post_handler(cmd, cmd->conn);
+
+ cmd->i_state = ISTATE_SENT_STATUS;
+- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
++ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
+ break;
+ case ISTATE_SEND_REJECT:
+ pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
+ atomic_dec(&isert_conn->post_send_buf_count);
+
+ cmd->i_state = ISTATE_SENT_STATUS;
+- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
++ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
+@@ -1561,7 +1580,7 @@ isert_do_control_comp(struct work_struct *work)
+ case ISTATE_SEND_TEXTRSP:
+ atomic_dec(&isert_conn->post_send_buf_count);
+ cmd->i_state = ISTATE_SENT_STATUS;
+- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
++ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
+ break;
+ default:
+ pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
+@@ -1592,7 +1611,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+
+ cmd->i_state = ISTATE_SENT_STATUS;
+- isert_completion_put(tx_desc, isert_cmd, ib_dev);
++ isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
+ }
+
+ static void
+@@ -1646,7 +1665,7 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
+ if (!isert_cmd)
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+ else
+- isert_completion_put(tx_desc, isert_cmd, ib_dev);
++ isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
+ }
+
+ static void
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 6c923c7039a1..56f2cf790779 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+ {
++ struct ib_device *dev = ch->sport->sdev->device;
+ struct se_cmd *cmd;
+ struct scatterlist *sg, *sg_orig;
+ int sg_cnt;
+@@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+
+ db = ioctx->rbufs;
+ tsize = cmd->data_length;
+- dma_len = sg_dma_len(&sg[0]);
++ dma_len = ib_sg_dma_len(dev, &sg[0]);
+ riu = ioctx->rdma_ius;
+
+ /*
+@@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ ++j;
+ if (j < count) {
+ sg = sg_next(sg);
+- dma_len = sg_dma_len(sg);
++ dma_len = ib_sg_dma_len(
++ dev, sg);
+ }
+ }
+ } else {
+@@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ tsize = cmd->data_length;
+ riu = ioctx->rdma_ius;
+ sg = sg_orig;
+- dma_len = sg_dma_len(&sg[0]);
+- dma_addr = sg_dma_address(&sg[0]);
++ dma_len = ib_sg_dma_len(dev, &sg[0]);
++ dma_addr = ib_sg_dma_address(dev, &sg[0]);
+
+ /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
+ for (i = 0, j = 0;
+@@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ ++j;
+ if (j < count) {
+ sg = sg_next(sg);
+- dma_len = sg_dma_len(sg);
+- dma_addr = sg_dma_address(sg);
++ dma_len = ib_sg_dma_len(
++ dev, sg);
++ dma_addr = ib_sg_dma_address(
++ dev, sg);
+ }
+ }
+ } else {
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index d8d49d10f9bb..3c511c4adaca 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1515,6 +1515,22 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
++ /* Lenovo ThinkPad Edge E431 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
++ },
++ .driver_data = (int []){1024, 5022, 2508, 4832},
++ },
++ {
++ /* Lenovo ThinkPad T431s */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
+ /* Lenovo ThinkPad T440s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -1523,6 +1539,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
++ /* Lenovo ThinkPad L440 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
+ /* Lenovo ThinkPad T540p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -1530,6 +1554,32 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ },
+ .driver_data = (int []){1024, 5056, 2058, 4832},
+ },
++ {
++ /* Lenovo ThinkPad L540 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
++ /* Lenovo Yoga S1 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
++ "ThinkPad S1 Yoga"),
++ },
++ .driver_data = (int []){1232, 5710, 1156, 4696},
++ },
++ {
++ /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION,
++ "ThinkPad X1 Carbon 2nd"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
+ #endif
+ { }
+ };
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 1af7255bbffb..1d38019bb022 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -114,6 +114,12 @@ struct dm_cache_metadata {
+ unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
+ size_t policy_hint_size;
+ struct dm_cache_statistics stats;
++
++ /*
++ * Reading the space map root can fail, so we read it into this
++ * buffer before the superblock is locked and updated.
++ */
++ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+ };
+
+ /*-------------------------------------------------------------------
+@@ -242,11 +248,31 @@ static void __setup_mapping_info(struct dm_cache_metadata *cmd)
+ }
+ }
+
++static int __save_sm_root(struct dm_cache_metadata *cmd)
++{
++ int r;
++ size_t metadata_len;
++
++ r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
++ if (r < 0)
++ return r;
++
++ return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
++ metadata_len);
++}
++
++static void __copy_sm_root(struct dm_cache_metadata *cmd,
++ struct cache_disk_superblock *disk_super)
++{
++ memcpy(&disk_super->metadata_space_map_root,
++ &cmd->metadata_space_map_root,
++ sizeof(cmd->metadata_space_map_root));
++}
++
+ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+ {
+ int r;
+ struct dm_block *sblock;
+- size_t metadata_len;
+ struct cache_disk_superblock *disk_super;
+ sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+
+@@ -254,12 +280,16 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+ if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
+ bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
+
+- r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
++ r = dm_tm_pre_commit(cmd->tm);
+ if (r < 0)
+ return r;
+
+- r = dm_tm_pre_commit(cmd->tm);
+- if (r < 0)
++ /*
++ * dm_sm_copy_root() can fail. So we need to do it before we start
++ * updating the superblock.
++ */
++ r = __save_sm_root(cmd);
++ if (r)
+ return r;
+
+ r = superblock_lock_zero(cmd, &sblock);
+@@ -275,10 +305,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+ memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
+ disk_super->policy_hint_size = 0;
+
+- r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+- metadata_len);
+- if (r < 0)
+- goto bad_locked;
++ __copy_sm_root(cmd, disk_super);
+
+ disk_super->mapping_root = cpu_to_le64(cmd->root);
+ disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+@@ -295,10 +322,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+ disk_super->write_misses = cpu_to_le32(0);
+
+ return dm_tm_commit(cmd->tm, sblock);
+-
+-bad_locked:
+- dm_bm_unlock(sblock);
+- return r;
+ }
+
+ static int __format_metadata(struct dm_cache_metadata *cmd)
+@@ -511,8 +534,9 @@ static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
+ disk_super = dm_block_data(sblock);
+ update_flags(disk_super, mutator);
+ read_superblock_fields(cmd, disk_super);
++ dm_bm_unlock(sblock);
+
+- return dm_bm_flush_and_unlock(cmd->bm, sblock);
++ return dm_bm_flush(cmd->bm);
+ }
+
+ static int __begin_transaction(struct dm_cache_metadata *cmd)
+@@ -540,7 +564,6 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
+ flags_mutator mutator)
+ {
+ int r;
+- size_t metadata_len;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+@@ -558,8 +581,8 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
+ if (r < 0)
+ return r;
+
+- r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+- if (r < 0)
++ r = __save_sm_root(cmd);
++ if (r)
+ return r;
+
+ r = superblock_lock(cmd, &sblock);
+@@ -586,13 +609,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
+ disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
+ disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
+ disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
+-
+- r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+- metadata_len);
+- if (r < 0) {
+- dm_bm_unlock(sblock);
+- return r;
+- }
++ __copy_sm_root(cmd, disk_super);
+
+ return dm_tm_commit(cmd->tm, sblock);
+ }
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 6ab68e058a0a..d64bf7d6c8fe 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -162,7 +162,7 @@ struct cache {
+ */
+ dm_dblock_t discard_nr_blocks;
+ unsigned long *discard_bitset;
+- uint32_t discard_block_size; /* a power of 2 times sectors per block */
++ uint32_t discard_block_size;
+
+ /*
+ * Rather than reconstructing the table line for the status we just
+@@ -1908,35 +1908,6 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
+ return 0;
+ }
+
+-/*
+- * We want the discard block size to be a power of two, at least the size
+- * of the cache block size, and have no more than 2^14 discard blocks
+- * across the origin.
+- */
+-#define MAX_DISCARD_BLOCKS (1 << 14)
+-
+-static bool too_many_discard_blocks(sector_t discard_block_size,
+- sector_t origin_size)
+-{
+- (void) sector_div(origin_size, discard_block_size);
+-
+- return origin_size > MAX_DISCARD_BLOCKS;
+-}
+-
+-static sector_t calculate_discard_block_size(sector_t cache_block_size,
+- sector_t origin_size)
+-{
+- sector_t discard_block_size;
+-
+- discard_block_size = roundup_pow_of_two(cache_block_size);
+-
+- if (origin_size)
+- while (too_many_discard_blocks(discard_block_size, origin_size))
+- discard_block_size *= 2;
+-
+- return discard_block_size;
+-}
+-
+ #define DEFAULT_MIGRATION_THRESHOLD 2048
+
+ static int cache_create(struct cache_args *ca, struct cache **result)
+@@ -2041,9 +2012,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+ }
+ clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
+
+- cache->discard_block_size =
+- calculate_discard_block_size(cache->sectors_per_block,
+- cache->origin_sectors);
++ cache->discard_block_size = cache->sectors_per_block;
+ cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
+ cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
+ if (!cache->discard_bitset) {
+@@ -2630,7 +2599,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
+ /*
+ * FIXME: these limits may be incompatible with the cache device
+ */
+- limits->max_discard_sectors = cache->discard_block_size * 1024;
++ limits->max_discard_sectors = cache->discard_block_size;
+ limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+ }
+
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 3bb4506582a9..07a6ea3a9820 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -192,6 +192,13 @@ struct dm_pool_metadata {
+ * operation possible in this state is the closing of the device.
+ */
+ bool fail_io:1;
++
++ /*
++ * Reading the space map roots can fail, so we read it into these
++ * buffers before the superblock is locked and updated.
++ */
++ __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
++ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+ };
+
+ struct dm_thin_device {
+@@ -431,26 +438,53 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
+ pmd->details_info.value_type.equal = NULL;
+ }
+
++static int save_sm_roots(struct dm_pool_metadata *pmd)
++{
++ int r;
++ size_t len;
++
++ r = dm_sm_root_size(pmd->metadata_sm, &len);
++ if (r < 0)
++ return r;
++
++ r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
++ if (r < 0)
++ return r;
++
++ r = dm_sm_root_size(pmd->data_sm, &len);
++ if (r < 0)
++ return r;
++
++ return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
++}
++
++static void copy_sm_roots(struct dm_pool_metadata *pmd,
++ struct thin_disk_superblock *disk)
++{
++ memcpy(&disk->metadata_space_map_root,
++ &pmd->metadata_space_map_root,
++ sizeof(pmd->metadata_space_map_root));
++
++ memcpy(&disk->data_space_map_root,
++ &pmd->data_space_map_root,
++ sizeof(pmd->data_space_map_root));
++}
++
+ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
+ {
+ int r;
+ struct dm_block *sblock;
+- size_t metadata_len, data_len;
+ struct thin_disk_superblock *disk_super;
+ sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
+
+ if (bdev_size > THIN_METADATA_MAX_SECTORS)
+ bdev_size = THIN_METADATA_MAX_SECTORS;
+
+- r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
+- if (r < 0)
+- return r;
+-
+- r = dm_sm_root_size(pmd->data_sm, &data_len);
++ r = dm_sm_commit(pmd->data_sm);
+ if (r < 0)
+ return r;
+
+- r = dm_sm_commit(pmd->data_sm);
++ r = save_sm_roots(pmd);
+ if (r < 0)
+ return r;
+
+@@ -471,15 +505,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
+ disk_super->trans_id = 0;
+ disk_super->held_root = 0;
+
+- r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
+- metadata_len);
+- if (r < 0)
+- goto bad_locked;
+-
+- r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
+- data_len);
+- if (r < 0)
+- goto bad_locked;
++ copy_sm_roots(pmd, disk_super);
+
+ disk_super->data_mapping_root = cpu_to_le64(pmd->root);
+ disk_super->device_details_root = cpu_to_le64(pmd->details_root);
+@@ -488,10 +514,6 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
+ disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
+
+ return dm_tm_commit(pmd->tm, sblock);
+-
+-bad_locked:
+- dm_bm_unlock(sblock);
+- return r;
+ }
+
+ static int __format_metadata(struct dm_pool_metadata *pmd)
+@@ -769,6 +791,10 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
+ if (r < 0)
+ return r;
+
++ r = save_sm_roots(pmd);
++ if (r < 0)
++ return r;
++
+ r = superblock_lock(pmd, &sblock);
+ if (r)
+ return r;
+@@ -780,21 +806,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
+ disk_super->trans_id = cpu_to_le64(pmd->trans_id);
+ disk_super->flags = cpu_to_le32(pmd->flags);
+
+- r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
+- metadata_len);
+- if (r < 0)
+- goto out_locked;
+-
+- r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
+- data_len);
+- if (r < 0)
+- goto out_locked;
++ copy_sm_roots(pmd, disk_super);
+
+ return dm_tm_commit(pmd->tm, sblock);
+-
+-out_locked:
+- dm_bm_unlock(sblock);
+- return r;
+ }
+
+ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index e9587101b04f..e4cc196634c1 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1332,9 +1332,9 @@ static void process_deferred_bios(struct pool *pool)
+ */
+ if (ensure_next_mapping(pool)) {
+ spin_lock_irqsave(&pool->lock, flags);
++ bio_list_add(&pool->deferred_bios, bio);
+ bio_list_merge(&pool->deferred_bios, &bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+-
+ break;
+ }
+
+diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
+index 064a3c271baa..30597f389d39 100644
+--- a/drivers/md/persistent-data/dm-block-manager.c
++++ b/drivers/md/persistent-data/dm-block-manager.c
+@@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b)
+ }
+ EXPORT_SYMBOL_GPL(dm_bm_unlock);
+
+-int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
+- struct dm_block *superblock)
++int dm_bm_flush(struct dm_block_manager *bm)
+ {
+- int r;
+-
+ if (bm->read_only)
+ return -EPERM;
+
+- r = dm_bufio_write_dirty_buffers(bm->bufio);
+- if (unlikely(r)) {
+- dm_bm_unlock(superblock);
+- return r;
+- }
+-
+- dm_bm_unlock(superblock);
+-
+ return dm_bufio_write_dirty_buffers(bm->bufio);
+ }
+-EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
++EXPORT_SYMBOL_GPL(dm_bm_flush);
+
+ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
+ {
+diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
+index 13cd58e1fe69..1b95dfc17786 100644
+--- a/drivers/md/persistent-data/dm-block-manager.h
++++ b/drivers/md/persistent-data/dm-block-manager.h
+@@ -105,8 +105,7 @@ int dm_bm_unlock(struct dm_block *b);
+ *
+ * This method always blocks.
+ */
+-int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
+- struct dm_block *superblock);
++int dm_bm_flush(struct dm_block_manager *bm);
+
+ /*
+ * Request data is prefetched into the cache.
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
+index 81da1a26042e..3bc30a0ae3d6 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transaction_manager *tm)
+ if (r < 0)
+ return r;
+
+- return 0;
++ return dm_bm_flush(tm->bm);
+ }
+ EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
+
+@@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
+ return -EWOULDBLOCK;
+
+ wipe_shadow_table(tm);
++ dm_bm_unlock(root);
+
+- return dm_bm_flush_and_unlock(tm->bm, root);
++ return dm_bm_flush(tm->bm);
+ }
+ EXPORT_SYMBOL_GPL(dm_tm_commit);
+
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
+index b5b139076ca5..2772ed2a781a 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.h
++++ b/drivers/md/persistent-data/dm-transaction-manager.h
+@@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transac
+ /*
+ * We use a 2-phase commit here.
+ *
+- * i) In the first phase the block manager is told to start flushing, and
+- * the changes to the space map are written to disk. You should interrogate
+- * your particular space map to get detail of its root node etc. to be
+- * included in your superblock.
++ * i) Make all changes for the transaction *except* for the superblock.
++ * Then call dm_tm_pre_commit() to flush them to disk.
+ *
+- * ii) @root will be committed last. You shouldn't use more than the
+- * first 512 bytes of @root if you wish the transaction to survive a power
+- * failure. You *must* have a write lock held on @root for both stage (i)
+- * and (ii). The commit will drop the write lock.
++ * ii) Lock your superblock. Update. Then call dm_tm_commit() which will
++ * unlock the superblock and flush it. No other blocks should be updated
++ * during this period. Care should be taken to never unlock a partially
++ * updated superblock; perform any operations that could fail *before* you
++ * take the superblock lock.
+ */
+ int dm_tm_pre_commit(struct dm_transaction_manager *tm);
+-int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root);
++int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock);
+
+ /*
+ * These methods are the only way to get hold of a writeable block.
+diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
+index 02699c111019..c7a1c8eba475 100644
+--- a/drivers/media/dvb-frontends/m88rs2000.c
++++ b/drivers/media/dvb-frontends/m88rs2000.c
+@@ -712,6 +712,22 @@ static int m88rs2000_get_frontend(struct dvb_frontend *fe)
+ return 0;
+ }
+
++static int m88rs2000_get_tune_settings(struct dvb_frontend *fe,
++ struct dvb_frontend_tune_settings *tune)
++{
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++
++ if (c->symbol_rate > 3000000)
++ tune->min_delay_ms = 2000;
++ else
++ tune->min_delay_ms = 3000;
++
++ tune->step_size = c->symbol_rate / 16000;
++ tune->max_drift = c->symbol_rate / 2000;
++
++ return 0;
++}
++
+ static int m88rs2000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+ {
+ struct m88rs2000_state *state = fe->demodulator_priv;
+@@ -743,7 +759,7 @@ static struct dvb_frontend_ops m88rs2000_ops = {
+ .symbol_rate_tolerance = 500, /* ppm */
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
+- FE_CAN_QPSK |
++ FE_CAN_QPSK | FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO
+ },
+
+@@ -763,6 +779,7 @@ static struct dvb_frontend_ops m88rs2000_ops = {
+
+ .set_frontend = m88rs2000_set_frontend,
+ .get_frontend = m88rs2000_get_frontend,
++ .get_tune_settings = m88rs2000_get_tune_settings,
+ };
+
+ struct dvb_frontend *m88rs2000_attach(const struct m88rs2000_config *config,
+diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
+index d45e7f6ff332..e87a734637a9 100644
+--- a/drivers/media/pci/saa7134/saa7134-cards.c
++++ b/drivers/media/pci/saa7134/saa7134-cards.c
+@@ -8045,8 +8045,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
+ break;
+ } /* switch() */
+
+- /* initialize tuner */
+- if (TUNER_ABSENT != dev->tuner_type) {
++ /* initialize tuner (don't do this when resuming) */
++ if (!dev->insuspend && TUNER_ABSENT != dev->tuner_type) {
+ int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
+
+ /* Note: radio tuner address is always filled in,
+diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
+index cd8831aebdeb..e2e4610d5550 100644
+--- a/drivers/media/platform/omap3isp/isppreview.c
++++ b/drivers/media/platform/omap3isp/isppreview.c
+@@ -1079,6 +1079,7 @@ static void preview_config_input_format(struct isp_prev_device *prev,
+ */
+ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
+ {
++ const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
+ struct isp_device *isp = to_isp_device(prev);
+ unsigned int sph = prev->crop.left;
+ unsigned int eph = prev->crop.left + prev->crop.width - 1;
+@@ -1086,6 +1087,14 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
+ unsigned int elv = prev->crop.top + prev->crop.height - 1;
+ u32 features;
+
++ if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
++ format->code != V4L2_MBUS_FMT_Y10_1X10) {
++ sph -= 2;
++ eph += 2;
++ slv -= 2;
++ elv += 2;
++ }
++
+ features = (prev->params.params[0].features & active)
+ | (prev->params.params[1].features & ~active);
+
+diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
+index bb1e8dca80cd..069b7f0dfb36 100644
+--- a/drivers/media/usb/em28xx/em28xx-dvb.c
++++ b/drivers/media/usb/em28xx/em28xx-dvb.c
+@@ -673,7 +673,8 @@ static void pctv_520e_init(struct em28xx *dev)
+ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
+ {
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+- struct em28xx *dev = fe->dvb->priv;
++ struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
++ struct em28xx *dev = i2c_bus->dev;
+ #ifdef CONFIG_GPIOLIB
+ struct em28xx_dvb *dvb = dev->dvb;
+ int ret;
+diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
+index f4453d52801b..ceb5404045b4 100644
+--- a/drivers/media/usb/gspca/sn9c20x.c
++++ b/drivers/media/usb/gspca/sn9c20x.c
+@@ -2359,6 +2359,7 @@ static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
+ {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
+ {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
++ {USB_DEVICE(0x0458, 0x7045), SN9C20X(MT9M112, 0x5d, LED_REVERSE)},
+ {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index 3394c3432011..e1c5bf3ea112 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -1846,7 +1846,25 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
+
+ if (!enable) {
+ uvc_uninit_video(stream, 1);
+- usb_set_interface(stream->dev->udev, stream->intfnum, 0);
++ if (stream->intf->num_altsetting > 1) {
++ usb_set_interface(stream->dev->udev,
++ stream->intfnum, 0);
++ } else {
++ /* UVC doesn't specify how to inform a bulk-based device
++ * when the video stream is stopped. Windows sends a
++ * CLEAR_FEATURE(HALT) request to the video streaming
++ * bulk endpoint, mimic the same behaviour.
++ */
++ unsigned int epnum = stream->header.bEndpointAddress
++ & USB_ENDPOINT_NUMBER_MASK;
++ unsigned int dir = stream->header.bEndpointAddress
++ & USB_ENDPOINT_DIR_MASK;
++ unsigned int pipe;
++
++ pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
++ usb_clear_halt(stream->dev->udev, pipe);
++ }
++
+ uvc_queue_enable(&stream->queue, 0);
+ uvc_video_clock_cleanup(stream);
+ return 0;
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 8f7a6a454a4c..b63a5e584aa0 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -787,8 +787,8 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
+ #define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
+ #define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
+ #define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
+-#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 63, struct v4l2_subdev_edid32)
+-#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 64, struct v4l2_subdev_edid32)
++#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 40, struct v4l2_subdev_edid32)
++#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 41, struct v4l2_subdev_edid32)
+ #define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
+ #define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
+ #define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
+diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
+index ee52b9f4a944..c0895f88ce9c 100644
+--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
+@@ -26,6 +26,10 @@
+ #include <linux/v4l2-dv-timings.h>
+ #include <media/v4l2-dv-timings.h>
+
++MODULE_AUTHOR("Hans Verkuil");
++MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
++MODULE_LICENSE("GPL");
++
+ const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
+ V4L2_DV_BT_CEA_640X480P59_94,
+ V4L2_DV_BT_CEA_720X480I59_94,
+diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
+index a65447d65605..da1ef3290be5 100644
+--- a/drivers/mfd/88pm800.c
++++ b/drivers/mfd/88pm800.c
+@@ -571,7 +571,7 @@ static int pm800_probe(struct i2c_client *client,
+ ret = pm800_pages_init(chip);
+ if (ret) {
+ dev_err(&client->dev, "pm800_pages_init failed!\n");
+- goto err_page_init;
++ goto err_device_init;
+ }
+
+ ret = device_800_init(chip, pdata);
+@@ -587,7 +587,6 @@ static int pm800_probe(struct i2c_client *client,
+
+ err_device_init:
+ pm800_pages_exit(chip);
+-err_page_init:
+ err_subchip_alloc:
+ pm80x_deinit();
+ out_init:
+diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
+index 7ebe9ef1eba6..a141b460697d 100644
+--- a/drivers/mfd/88pm860x-core.c
++++ b/drivers/mfd/88pm860x-core.c
+@@ -1179,12 +1179,18 @@ static int pm860x_probe(struct i2c_client *client,
+ chip->companion_addr = pdata->companion_addr;
+ chip->companion = i2c_new_dummy(chip->client->adapter,
+ chip->companion_addr);
++ if (!chip->companion) {
++ dev_err(&client->dev,
++ "Failed to allocate I2C companion device\n");
++ return -ENODEV;
++ }
+ chip->regmap_companion = regmap_init_i2c(chip->companion,
+ &pm860x_regmap_config);
+ if (IS_ERR(chip->regmap_companion)) {
+ ret = PTR_ERR(chip->regmap_companion);
+ dev_err(&chip->companion->dev,
+ "Failed to allocate register map: %d\n", ret);
++ i2c_unregister_device(chip->companion);
+ return ret;
+ }
+ i2c_set_clientdata(chip->companion, chip);
+diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
+index d3e23278d299..38917a822335 100644
+--- a/drivers/mfd/kempld-core.c
++++ b/drivers/mfd/kempld-core.c
+@@ -322,9 +322,12 @@ static int kempld_detect_device(struct kempld_device_data *pld)
+ return -ENODEV;
+ }
+
+- /* Release hardware mutex if aquired */
+- if (!(index_reg & KEMPLD_MUTEX_KEY))
++ /* Release hardware mutex if acquired */
++ if (!(index_reg & KEMPLD_MUTEX_KEY)) {
+ iowrite8(KEMPLD_MUTEX_KEY, pld->io_index);
++ /* PXT and COMe-cPC2 boards may require a second release */
++ iowrite8(KEMPLD_MUTEX_KEY, pld->io_index);
++ }
+
+ mutex_unlock(&pld->lock);
+
+diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
+index 108453b75ccc..fc2e0b946f8d 100644
+--- a/drivers/mfd/max77686.c
++++ b/drivers/mfd/max77686.c
+@@ -120,6 +120,10 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
+ dev_info(max77686->dev, "device found\n");
+
+ max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
++ if (!max77686->rtc) {
++ dev_err(max77686->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(max77686->rtc, max77686);
+
+ max77686_irq_init(max77686);
+diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
+index c04723efc707..8abfd3f0af5b 100644
+--- a/drivers/mfd/max77693.c
++++ b/drivers/mfd/max77693.c
+@@ -149,9 +149,18 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
+ dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
+
+ max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
++ if (!max77693->muic) {
++ dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(max77693->muic, max77693);
+
+ max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
++ if (!max77693->haptic) {
++ dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n");
++ ret = -ENODEV;
++ goto err_i2c_haptic;
++ }
+ i2c_set_clientdata(max77693->haptic, max77693);
+
+ /*
+@@ -187,8 +196,9 @@ err_mfd:
+ max77693_irq_exit(max77693);
+ err_irq:
+ err_regmap_muic:
+- i2c_unregister_device(max77693->muic);
+ i2c_unregister_device(max77693->haptic);
++err_i2c_haptic:
++ i2c_unregister_device(max77693->muic);
+ return ret;
+ }
+
+diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
+index de7fb80a6052..afd07718dcab 100644
+--- a/drivers/mfd/max8925-i2c.c
++++ b/drivers/mfd/max8925-i2c.c
+@@ -181,9 +181,18 @@ static int max8925_probe(struct i2c_client *client,
+ mutex_init(&chip->io_lock);
+
+ chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR);
++ if (!chip->rtc) {
++ dev_err(chip->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(chip->rtc, chip);
+
+ chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
++ if (!chip->adc) {
++ dev_err(chip->dev, "Failed to allocate I2C device for ADC\n");
++ i2c_unregister_device(chip->rtc);
++ return -ENODEV;
++ }
+ i2c_set_clientdata(chip->adc, chip);
+
+ device_init_wakeup(&client->dev, 1);
+diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
+index cee098c0dae3..20a20051108f 100644
+--- a/drivers/mfd/max8997.c
++++ b/drivers/mfd/max8997.c
+@@ -217,10 +217,26 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
+ mutex_init(&max8997->iolock);
+
+ max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
++ if (!max8997->rtc) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(max8997->rtc, max8997);
++
+ max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
++ if (!max8997->haptic) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n");
++ ret = -ENODEV;
++ goto err_i2c_haptic;
++ }
+ i2c_set_clientdata(max8997->haptic, max8997);
++
+ max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
++ if (!max8997->muic) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n");
++ ret = -ENODEV;
++ goto err_i2c_muic;
++ }
+ i2c_set_clientdata(max8997->muic, max8997);
+
+ pm_runtime_set_active(max8997->dev);
+@@ -247,7 +263,9 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
+ err_mfd:
+ mfd_remove_devices(max8997->dev);
+ i2c_unregister_device(max8997->muic);
++err_i2c_muic:
+ i2c_unregister_device(max8997->haptic);
++err_i2c_haptic:
+ i2c_unregister_device(max8997->rtc);
+ return ret;
+ }
+diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
+index fe6332dcabee..25e131a0840a 100644
+--- a/drivers/mfd/max8998.c
++++ b/drivers/mfd/max8998.c
+@@ -215,6 +215,10 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
+ mutex_init(&max8998->iolock);
+
+ max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
++ if (!max8998->rtc) {
++ dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(max8998->rtc, max8998);
+
+ max8998_irq_init(max8998);
+diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
+index f530e4b73f19..d02546b0a8cc 100644
+--- a/drivers/mfd/sec-core.c
++++ b/drivers/mfd/sec-core.c
+@@ -274,6 +274,10 @@ static int sec_pmic_probe(struct i2c_client *i2c,
+ }
+
+ sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
++ if (!sec_pmic->rtc) {
++ dev_err(&i2c->dev, "Failed to allocate I2C for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
+
+ if (pdata && pdata->cfg_pmic_irq)
+diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
+index d79277204835..de87eafbeb05 100644
+--- a/drivers/mfd/tps65910.c
++++ b/drivers/mfd/tps65910.c
+@@ -254,8 +254,10 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+ ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
+ IRQF_ONESHOT, pdata->irq_base,
+ tps6591x_irqs_chip, &tps65910->irq_data);
+- if (ret < 0)
++ if (ret < 0) {
+ dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
++ tps65910->chip_irq = 0;
++ }
+ return ret;
+ }
+
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index b66cec93ebb3..9abe5a4e3ef7 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -74,23 +74,69 @@ int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
+
+
+ /**
+- * mei_io_list_flush - removes list entry belonging to cl.
++ * mei_cl_cmp_id - tells if the clients are the same
+ *
+- * @list: An instance of our list structure
+- * @cl: host client
++ * @cl1: host client 1
++ * @cl2: host client 2
++ *
++ * returns true - if the clients has same host and me ids
++ * false - otherwise
++ */
++static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
++ const struct mei_cl *cl2)
++{
++ return cl1 && cl2 &&
++ (cl1->host_client_id == cl2->host_client_id) &&
++ (cl1->me_client_id == cl2->me_client_id);
++}
++
++/**
++ * mei_io_list_flush - removes cbs belonging to cl.
++ *
++ * @list: an instance of our list structure
++ * @cl: host client, can be NULL for flushing the whole list
++ * @free: whether to free the cbs
+ */
+-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
++static void __mei_io_list_flush(struct mei_cl_cb *list,
++ struct mei_cl *cl, bool free)
+ {
+ struct mei_cl_cb *cb;
+ struct mei_cl_cb *next;
+
++ /* enable removing everything if no cl is specified */
+ list_for_each_entry_safe(cb, next, &list->list, list) {
+- if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
++ if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
+ list_del(&cb->list);
++ if (free)
++ mei_io_cb_free(cb);
++ }
+ }
+ }
+
+ /**
++ * mei_io_list_flush - removes list entry belonging to cl.
++ *
++ * @list: An instance of our list structure
++ * @cl: host client
++ */
++static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
++{
++ __mei_io_list_flush(list, cl, false);
++}
++
++
++/**
++ * mei_io_list_free - removes cb belonging to cl and free them
++ *
++ * @list: An instance of our list structure
++ * @cl: host client
++ */
++static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
++{
++ __mei_io_list_flush(list, cl, true);
++}
++
++/**
+ * mei_io_cb_free - free mei_cb_private related memory
+ *
+ * @cb: mei callback struct
+@@ -192,8 +238,8 @@ int mei_cl_flush_queues(struct mei_cl *cl)
+
+ dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
+ mei_io_list_flush(&cl->dev->read_list, cl);
+- mei_io_list_flush(&cl->dev->write_list, cl);
+- mei_io_list_flush(&cl->dev->write_waiting_list, cl);
++ mei_io_list_free(&cl->dev->write_list, cl);
++ mei_io_list_free(&cl->dev->write_waiting_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
+@@ -916,20 +962,8 @@ void mei_cl_all_wakeup(struct mei_device *dev)
+ */
+ void mei_cl_all_write_clear(struct mei_device *dev)
+ {
+- struct mei_cl_cb *cb, *next;
+- struct list_head *list;
+-
+- list = &dev->write_list.list;
+- list_for_each_entry_safe(cb, next, list, list) {
+- list_del(&cb->list);
+- mei_io_cb_free(cb);
+- }
+-
+- list = &dev->write_waiting_list.list;
+- list_for_each_entry_safe(cb, next, list, list) {
+- list_del(&cb->list);
+- mei_io_cb_free(cb);
+- }
++ mei_io_list_free(&dev->write_list, NULL);
++ mei_io_list_free(&dev->write_waiting_list, NULL);
+ }
+
+
+diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
+index 892cc4207fa2..5d75ab523490 100644
+--- a/drivers/misc/mei/client.h
++++ b/drivers/misc/mei/client.h
+@@ -45,8 +45,6 @@ static inline void mei_io_list_init(struct mei_cl_cb *list)
+ {
+ INIT_LIST_HEAD(&list->list);
+ }
+-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
+-
+ /*
+ * MEI Host Client Functions
+ */
+@@ -61,22 +59,6 @@ int mei_cl_unlink(struct mei_cl *cl);
+ int mei_cl_flush_queues(struct mei_cl *cl);
+ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
+
+-/**
+- * mei_cl_cmp_id - tells if file private data have same id
+- *
+- * @fe1: private data of 1. file object
+- * @fe2: private data of 2. file object
+- *
+- * returns true - if ids are the same and not NULL
+- */
+-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+- const struct mei_cl *cl2)
+-{
+- return cl1 && cl2 &&
+- (cl1->host_client_id == cl2->host_client_id) &&
+- (cl1->me_client_id == cl2->me_client_id);
+-}
+-
+
+ int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
+
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 66f411a6e8ea..cabc04383685 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -115,6 +115,11 @@
+ #define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
+
+ #define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
++
++/* Host Firmware Status Registers in PCI Config Space */
++#define PCI_CFG_HFS_1 0x40
++#define PCI_CFG_HFS_2 0x48
++
+ /*
+ * MEI HW Section
+ */
+diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
+index 1b922e971d3e..41b4b4818b04 100644
+--- a/drivers/misc/mei/interrupt.c
++++ b/drivers/misc/mei/interrupt.c
+@@ -420,8 +420,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
+
+ cl->status = 0;
+ list_del(&cb->list);
+- if (MEI_WRITING == cl->writing_state &&
+- cb->fop_type == MEI_FOP_WRITE &&
++ if (cb->fop_type == MEI_FOP_WRITE &&
+ cl != &dev->iamthif_cl) {
+ dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
+ cl->writing_state = MEI_WRITE_COMPLETE;
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index cabeddd66c1f..9558bef37eea 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -648,8 +648,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
+ goto out;
+ }
+
+- if (MEI_WRITE_COMPLETE == cl->writing_state)
+- mask |= (POLLIN | POLLRDNORM);
++ mask |= (POLLIN | POLLRDNORM);
+
+ out:
+ mutex_unlock(&dev->device_lock);
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index e637318b79ba..20fb058f6ae8 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -100,15 +100,31 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+ {
+ u32 reg;
+- if (ent->device == MEI_DEV_ID_PBG_1) {
+- pci_read_config_dword(pdev, 0x48, &reg);
+- /* make sure that bit 9 is up and bit 10 is down */
+- if ((reg & 0x600) == 0x200) {
+- dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+- return false;
+- }
++ /* Cougar Point || Patsburg */
++ if (ent->device == MEI_DEV_ID_CPT_1 ||
++ ent->device == MEI_DEV_ID_PBG_1) {
++ pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
++ /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
++ if ((reg & 0x600) == 0x200)
++ goto no_mei;
+ }
++
++ /* Lynx Point */
++ if (ent->device == MEI_DEV_ID_LPT_H ||
++ ent->device == MEI_DEV_ID_LPT_W ||
++ ent->device == MEI_DEV_ID_LPT_HR) {
++ /* Read ME FW Status check for SPS Firmware */
++ pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
++ /* if bits [19:16] = 15, running SPS Firmware */
++ if ((reg & 0xf0000) == 0xf0000)
++ goto no_mei;
++ }
++
+ return true;
++
++no_mei:
++ dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
++ return false;
+ }
+ /**
+ * mei_probe - Device Initialization Routine
+diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
+index 85472d3fd37f..d002eb9e01cd 100644
+--- a/drivers/mmc/host/sdhci-bcm-kona.c
++++ b/drivers/mmc/host/sdhci-bcm-kona.c
+@@ -314,7 +314,7 @@ err_pltfm_free:
+ return ret;
+ }
+
+-static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev)
++static int sdhci_bcm_kona_remove(struct platform_device *pdev)
+ {
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ int dead;
+diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
+index 9be079224761..1de054a0775d 100644
+--- a/drivers/mtd/nand/atmel_nand.c
++++ b/drivers/mtd/nand/atmel_nand.c
+@@ -1249,6 +1249,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
+ goto err;
+ }
+
++ nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
+ nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
+ nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
+
+diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
+index 52115151e4a7..2e1d16bf9818 100644
+--- a/drivers/mtd/nand/nuc900_nand.c
++++ b/drivers/mtd/nand/nuc900_nand.c
+@@ -225,7 +225,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
+ val = __raw_readl(nand->reg + REG_FMICSR);
+
+ if (!(val & NAND_EN))
+- __raw_writel(val | NAND_EN, REG_FMICSR);
++ __raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);
+
+ val = __raw_readl(nand->reg + REG_SMCSR);
+
+diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
+index 4b8e89583f2a..cf49c22673b9 100644
+--- a/drivers/mtd/sm_ftl.c
++++ b/drivers/mtd/sm_ftl.c
+@@ -59,15 +59,12 @@ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+ struct attribute_group *attr_group;
+ struct attribute **attributes;
+ struct sm_sysfs_attribute *vendor_attribute;
++ char *vendor;
+
+- int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+- SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
+-
+- char *vendor = kmalloc(vendor_len, GFP_KERNEL);
++ vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
++ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
+ if (!vendor)
+ goto error1;
+- memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
+- vendor[vendor_len] = 0;
+
+ /* Initialize sysfs attributes */
+ vendor_attribute =
+@@ -78,7 +75,7 @@ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+ sysfs_attr_init(&vendor_attribute->dev_attr.attr);
+
+ vendor_attribute->data = vendor;
+- vendor_attribute->len = vendor_len;
++ vendor_attribute->len = strlen(vendor);
+ vendor_attribute->dev_attr.attr.name = "vendor";
+ vendor_attribute->dev_attr.attr.mode = S_IRUGO;
+ vendor_attribute->dev_attr.show = sm_attr_show;
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 71d9cad02704..5cdd2b2f18c5 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -88,9 +88,8 @@
+ #define MVNETA_TX_IN_PRGRS BIT(1)
+ #define MVNETA_TX_FIFO_EMPTY BIT(8)
+ #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+-#define MVNETA_SERDES_CFG 0x24A0
++#define MVNETA_SGMII_SERDES_CFG 0x24A0
+ #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
+-#define MVNETA_RGMII_SERDES_PROTO 0x0667
+ #define MVNETA_TYPE_PRIO 0x24bc
+ #define MVNETA_FORCE_UNI BIT(21)
+ #define MVNETA_TXQ_CMD_1 0x24e4
+@@ -666,6 +665,35 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+ }
+
++
++
++/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
++static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
++{
++ u32 val;
++
++ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
++
++ if (enable)
++ val |= MVNETA_GMAC2_PORT_RGMII;
++ else
++ val &= ~MVNETA_GMAC2_PORT_RGMII;
++
++ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
++}
++
++/* Config SGMII port */
++static void mvneta_port_sgmii_config(struct mvneta_port *pp)
++{
++ u32 val;
++
++ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
++ val |= MVNETA_GMAC2_PCS_ENABLE;
++ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
++
++ mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
++}
++
+ /* Start the Ethernet port RX and TX activity */
+ static void mvneta_port_up(struct mvneta_port *pp)
+ {
+@@ -2695,15 +2723,12 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
+
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII)
+- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+- else
+- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
++ mvneta_port_sgmii_config(pp);
+
+- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+-
+- val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
++ mvneta_gmac_rgmii_set(pp, 1);
+
+ /* Cancel Port Reset */
++ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val &= ~MVNETA_GMAC2_PORT_RESET;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index ba39178a94ab..00e3f49fcf9b 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1718,7 +1718,7 @@ int ath_cabq_update(struct ath_softc *sc)
+ else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
+ sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
+
+- qi.tqi_readyTime = (cur_conf->beacon_interval *
++ qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
+ sc->config.cabqReadytime) / 100;
+ ath_txq_update(sc, qnum, &qi);
+
+diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
+index 7c970d3ae358..80ecca3e1465 100644
+--- a/drivers/net/wireless/b43/phy_n.c
++++ b/drivers/net/wireless/b43/phy_n.c
+@@ -5175,22 +5175,22 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
+ int ch = new_channel->hw_value;
+
+ u16 old_band_5ghz;
+- u32 tmp32;
++ u16 tmp16;
+
+ old_band_5ghz =
+ b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
+ if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
+- tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
++ tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
+ b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
+ b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
+ } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) {
+ b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
+- tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
++ tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
+ b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
+ }
+
+ b43_chantab_phy_upload(dev, e);
+diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
+index 7aad766865cf..ca9c4f1f9b0a 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/main.c
++++ b/drivers/net/wireless/iwlwifi/dvm/main.c
+@@ -252,13 +252,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, bt_runtime_config);
+
++ mutex_lock(&priv->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+- return;
++ goto out;
+
+ /* dont send host command if rf-kill is on */
+ if (!iwl_is_ready_rf(priv))
+- return;
++ goto out;
++
+ iwlagn_send_advance_bt_config(priv);
++out:
++ mutex_unlock(&priv->mutex);
+ }
+
+ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+index e06971be7df7..f923d8c9a296 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+@@ -1025,9 +1025,20 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
+ bool rtstatus = true;
+ int err = 0;
+ u8 tmp_u1b, u1byte;
++ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n");
+ rtlpriv->rtlhal.being_init_adapter = true;
++ /* As this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
++
+ rtlpriv->intf_ops->disable_aspm(hw);
+
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1);
+@@ -1043,7 +1054,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ err = 1;
+- return err;
++ goto exit;
+ }
+
+ err = rtl88e_download_fw(hw, false);
+@@ -1051,8 +1062,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
+ err = 1;
+- rtlhal->fw_ready = false;
+- return err;
++ goto exit;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+@@ -1135,10 +1145,12 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
+ }
+ rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128));
+ rtl88e_dm_init(hw);
++exit:
++ local_irq_restore(flags);
+ rtlpriv->rtlhal.being_init_adapter = false;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n",
+ err);
+- return 0;
++ return err;
+ }
+
+ static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+index 68685a898257..749d41723371 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+@@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ u8 *psaddr;
+ __le16 fc;
+ u16 type, ufc;
+- bool match_bssid, packet_toself, packet_beacon, addr;
++ bool match_bssid, packet_toself, packet_beacon = false, addr;
+
+ tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+index 189ba124a8c6..324aa581938e 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+@@ -985,6 +985,17 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ int err = 0;
+ static bool iqk_initialized;
++ unsigned long flags;
++
++ /* As this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
+
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
+ err = _rtl92cu_init_mac(hw);
+@@ -997,7 +1008,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
+ err = 1;
+- return err;
++ goto exit;
+ }
+ rtlhal->last_hmeboxnum = 0; /* h2c */
+ _rtl92cu_phy_param_tab_init(hw);
+@@ -1034,6 +1045,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ _InitPABias(hw);
+ _update_mac_setting(hw);
+ rtl92c_dm_init(hw);
++exit:
++ local_irq_restore(flags);
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+index 4f461786a7eb..c471400fe8f0 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+@@ -955,7 +955,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 tmp_byte = 0;
+-
++ unsigned long flags;
+ bool rtstatus = true;
+ u8 tmp_u1b;
+ int err = false;
+@@ -967,6 +967,16 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ rtlpci->being_init_adapter = true;
+
++ /* As this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
++
+ rtlpriv->intf_ops->disable_aspm(hw);
+
+ /* 1. MAC Initialize */
+@@ -984,7 +994,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now... "
+ "Please copy FW into /lib/firmware/rtlwifi\n");
+- return 1;
++ err = 1;
++ goto exit;
+ }
+
+ /* After FW download, we have to reset MAC register */
+@@ -997,7 +1008,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
+ if (!rtl92s_phy_mac_config(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n");
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* because last function modify RCR, so we update
+@@ -1016,7 +1028,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
+ if (!rtl92s_phy_bb_config(hw)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n");
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */
+@@ -1033,7 +1046,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ if (!rtl92s_phy_rf_config(hw)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* After read predefined TXT, we must set BB/MAC/RF
+@@ -1122,8 +1136,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON);
+ rtl92s_dm_init(hw);
++exit:
++ local_irq_restore(flags);
+ rtlpci->being_init_adapter = false;
+-
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+index c333dfd116b8..99f6bc5fa986 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+@@ -880,14 +880,25 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
+ bool rtstatus = true;
+ int err;
+ u8 tmp_u1b;
++ unsigned long flags;
+
+ rtlpriv->rtlhal.being_init_adapter = true;
++ /* As this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
++
+ rtlpriv->intf_ops->disable_aspm(hw);
+ rtstatus = _rtl8712e_init_mac(hw);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ err = 1;
+- return err;
++ goto exit;
+ }
+
+ err = rtl8723ae_download_fw(hw);
+@@ -895,8 +906,7 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
+ err = 1;
+- rtlhal->fw_ready = false;
+- return err;
++ goto exit;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+@@ -971,6 +981,8 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+ }
+ rtl8723ae_dm_init(hw);
++exit:
++ local_irq_restore(flags);
+ rtlpriv->rtlhal.being_init_adapter = false;
+ return err;
+ }
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index 8efd11dafd44..b4214cba58b7 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -749,7 +749,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+
+ for (i = 0; i < nranges; i++) {
+ u32 flags = of_read_number(range, 1);
+- u32 slot = of_read_number(range, 2);
++ u32 slot = of_read_number(range + 1, 1);
+ u64 cpuaddr = of_read_number(range + na, pna);
+ unsigned long rtype;
+
+diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
+index 81d8681c3195..7917bb2fa834 100644
+--- a/drivers/regulator/arizona-ldo1.c
++++ b/drivers/regulator/arizona-ldo1.c
+@@ -153,11 +153,9 @@ static const struct regulator_desc arizona_ldo1 = {
+
+ .vsel_reg = ARIZONA_LDO1_CONTROL_1,
+ .vsel_mask = ARIZONA_LDO1_VSEL_MASK,
+- .bypass_reg = ARIZONA_LDO1_CONTROL_1,
+- .bypass_mask = ARIZONA_LDO1_BYPASS,
+ .min_uV = 900000,
+- .uV_step = 50000,
+- .n_voltages = 7,
++ .uV_step = 25000,
++ .n_voltages = 13,
+ .enable_time = 500,
+
+ .owner = THIS_MODULE,
+@@ -203,6 +201,7 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
+ */
+ switch (arizona->type) {
+ case WM5102:
++ case WM8997:
+ desc = &arizona_ldo1_hc;
+ ldo1->init_data = arizona_ldo1_dvfs;
+ break;
+diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
+index eb5d22795c47..bb86494e2b7b 100644
+--- a/drivers/s390/char/con3215.c
++++ b/drivers/s390/char/con3215.c
+@@ -922,7 +922,7 @@ static int __init con3215_init(void)
+ raw3215_freelist = req;
+ }
+
+- cdev = ccw_device_probe_console();
++ cdev = ccw_device_probe_console(&raw3215_ccw_driver);
+ if (IS_ERR(cdev))
+ return -ENODEV;
+
+diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
+index 699fd3e363df..bb6b0df50b33 100644
+--- a/drivers/s390/char/con3270.c
++++ b/drivers/s390/char/con3270.c
+@@ -576,7 +576,6 @@ static struct console con3270 = {
+ static int __init
+ con3270_init(void)
+ {
+- struct ccw_device *cdev;
+ struct raw3270 *rp;
+ void *cbuf;
+ int i;
+@@ -591,10 +590,7 @@ con3270_init(void)
+ cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
+ }
+
+- cdev = ccw_device_probe_console();
+- if (IS_ERR(cdev))
+- return -ENODEV;
+- rp = raw3270_setup_console(cdev);
++ rp = raw3270_setup_console();
+ if (IS_ERR(rp))
+ return PTR_ERR(rp);
+
+diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
+index 24a08e8f19e1..651d1f5da7c4 100644
+--- a/drivers/s390/char/raw3270.c
++++ b/drivers/s390/char/raw3270.c
+@@ -776,16 +776,24 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
+ }
+
+ #ifdef CONFIG_TN3270_CONSOLE
++/* Tentative definition - see below for actual definition. */
++static struct ccw_driver raw3270_ccw_driver;
++
+ /*
+ * Setup 3270 device configured as console.
+ */
+-struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
++struct raw3270 __init *raw3270_setup_console(void)
+ {
++ struct ccw_device *cdev;
+ unsigned long flags;
+ struct raw3270 *rp;
+ char *ascebc;
+ int rc;
+
++ cdev = ccw_device_probe_console(&raw3270_ccw_driver);
++ if (IS_ERR(cdev))
++ return ERR_CAST(cdev);
++
+ rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+ ascebc = kzalloc(256, GFP_KERNEL);
+ rc = raw3270_setup_device(cdev, rp, ascebc);
+diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
+index 7b73ff8c1bd7..359276a88396 100644
+--- a/drivers/s390/char/raw3270.h
++++ b/drivers/s390/char/raw3270.h
+@@ -190,7 +190,7 @@ raw3270_put_view(struct raw3270_view *view)
+ wake_up(&raw3270_wait_queue);
+ }
+
+-struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
++struct raw3270 *raw3270_setup_console(void);
+ void raw3270_wait_cons_dev(struct raw3270 *);
+
+ /* Notifier for device addition/removal */
+diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
+index 13299f902676..ec0951a788fc 100644
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -560,18 +560,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+
+ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
+ {
+- do {
++ static int ntsm_unsupported;
++
++ while (true) {
+ memset(sei, 0, sizeof(*sei));
+ sei->request.length = 0x0010;
+ sei->request.code = 0x000e;
+- sei->ntsm = ntsm;
++ if (!ntsm_unsupported)
++ sei->ntsm = ntsm;
+
+ if (chsc(sei))
+ break;
+
+ if (sei->response.code != 0x0001) {
+- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
+- sei->response.code);
++ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
++ sei->response.code, sei->ntsm);
++
++ if (sei->response.code == 3 && sei->ntsm) {
++ /* Fallback for old firmware. */
++ ntsm_unsupported = 1;
++ continue;
++ }
+ break;
+ }
+
+@@ -587,7 +596,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+- } while (sei->u.nt0_area.flags & 0x80);
++
++ if (!(sei->u.nt0_area.flags & 0x80))
++ break;
++ }
+ }
+
+ /*
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index e4a7ab2bb629..3a33be681fbe 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -1610,7 +1610,7 @@ out_unlock:
+ return rc;
+ }
+
+-struct ccw_device *ccw_device_probe_console(void)
++struct ccw_device *ccw_device_probe_console(struct ccw_driver *drv)
+ {
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
+@@ -1632,6 +1632,7 @@ struct ccw_device *ccw_device_probe_console(void)
+ kfree(io_priv);
+ return cdev;
+ }
++ cdev->drv = drv;
+ set_io_private(sch, io_priv);
+ ret = ccw_device_console_enable(cdev, sch);
+ if (ret) {
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 278c9fa62067..1822cb9ec623 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2501,16 +2501,15 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
+ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
+ {
+ uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
+- dma_addr_t dma_coherent_handle;
++
+ /*
+ ********************************************************************
+ ** here we need to tell iop 331 our freeccb.HighPart
+ ** if freeccb.HighPart is not zero
+ ********************************************************************
+ */
+- dma_coherent_handle = acb->dma_coherent_handle;
+- cdb_phyaddr = (uint32_t)(dma_coherent_handle);
+- cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
++ cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
++ cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
+ acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
+ /*
+ ***********************************************************************
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 7f0af4fcc001..6fd7d40b2c4d 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+
+ mpt2sas_base_free_resources(ioc);
+ pci_save_state(pdev);
+- pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 9f01bbbf3a26..36d62fd53511 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -2616,7 +2616,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ ha->flags.enable_64bit_addressing ? "enable" :
+ "disable");
+ ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
+- if (!ret) {
++ if (ret) {
+ ql_log_pci(ql_log_fatal, pdev, 0x0031,
+ "Failed to allocate memory for adapter, aborting.\n");
+
+@@ -3541,10 +3541,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ else {
+ qla2x00_set_reserved_loop_ids(ha);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
+- "loop_id_map=%p. \n", ha->loop_id_map);
++ "loop_id_map=%p.\n", ha->loop_id_map);
+ }
+
+- return 1;
++ return 0;
+
+ fail_async_pd:
+ dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index b26f1a5cc0ec..95a5d73e675c 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -751,8 +751,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
+
+ vscsi->affinity_hint_set = true;
+ } else {
+- for (i = 0; i < vscsi->num_queues; i++)
++ for (i = 0; i < vscsi->num_queues; i++) {
++ if (!vscsi->req_vqs[i].vq)
++ continue;
++
+ virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
++ }
+
+ vscsi->affinity_hint_set = false;
+ }
+diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
+index 7d84418a01d8..cc6a997d4cda 100644
+--- a/drivers/spi/spi-efm32.c
++++ b/drivers/spi/spi-efm32.c
+@@ -491,6 +491,9 @@ static int efm32_spi_remove(struct platform_device *pdev)
+
+ static const struct of_device_id efm32_spi_dt_ids[] = {
+ {
++ .compatible = "energymicro,efm32-spi",
++ }, {
++ /* doesn't follow the "vendor,device" scheme, don't use */
+ .compatible = "efm32,spi",
+ }, {
+ /* sentinel */
+diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
+index 701ad1a69394..4fbe303e8186 100644
+--- a/drivers/staging/comedi/drivers/usbdux.c
++++ b/drivers/staging/comedi/drivers/usbdux.c
+@@ -494,7 +494,7 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
+ /* pointer to the DA */
+ *datap++ = val & 0xff;
+ *datap++ = (val >> 8) & 0xff;
+- *datap++ = chan;
++ *datap++ = chan << 6;
+ devpriv->ao_readback[chan] = val;
+
+ s->async->events |= COMEDI_CB_BLOCK;
+@@ -1040,11 +1040,8 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ /* set current channel of the running acquisition to zero */
+ s->async->cur_chan = 0;
+
+- for (i = 0; i < cmd->chanlist_len; ++i) {
+- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+-
+- devpriv->ao_chanlist[i] = chan << 6;
+- }
++ for (i = 0; i < cmd->chanlist_len; ++i)
++ devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
+
+ /* we count in steps of 1ms (125us) */
+ /* 125us mode not used yet */
+diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
+index 201165787362..33243ed40a1e 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
+@@ -555,7 +555,7 @@ _func_exit_;
+ /* set the security information in the recv_frame */
+ static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame)
+ {
+- u8 *psta_addr = NULL, *ptr;
++ u8 *psta_addr, *ptr;
+ uint auth_alg;
+ struct recv_frame_hdr *pfhdr;
+ struct sta_info *psta;
+@@ -569,7 +569,6 @@ static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *pre
+ _func_enter_;
+
+ pstapriv = &adapter->stapriv;
+- psta = rtw_get_stainfo(pstapriv, psta_addr);
+
+ auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
+
+@@ -577,6 +576,7 @@ _func_enter_;
+ pfhdr = &precv_frame->u.hdr;
+ pattrib = &pfhdr->attrib;
+ psta_addr = pattrib->ta;
++ psta = rtw_get_stainfo(pstapriv, psta_addr);
+
+ prtnframe = NULL;
+
+diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
+index 23ec684b60e1..274c359279ef 100644
+--- a/drivers/staging/rtl8712/rtl871x_recv.c
++++ b/drivers/staging/rtl8712/rtl871x_recv.c
+@@ -254,7 +254,7 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ union recv_frame *prtnframe;
+- u16 ether_type = 0;
++ u16 ether_type;
+
+ pstapriv = &adapter->stapriv;
+ ptr = get_recvframe_data(precv_frame);
+@@ -263,15 +263,14 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
+ psta = r8712_get_stainfo(pstapriv, psta_addr);
+ auth_alg = adapter->securitypriv.AuthAlgrthm;
+ if (auth_alg == 2) {
++ /* get ether_type */
++ ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
++ memcpy(&ether_type, ptr, 2);
++ ether_type = ntohs((unsigned short)ether_type);
++
+ if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ /* blocked
+ * only accept EAPOL frame */
+- prtnframe = precv_frame;
+- /*get ether_type */
+- ptr = ptr + pfhdr->attrib.hdrlen +
+- pfhdr->attrib.iv_len + LLC_HEADER_SIZE;
+- memcpy(&ether_type, ptr, 2);
+- ether_type = ntohs((unsigned short)ether_type);
+ if (ether_type == 0x888e)
+ prtnframe = precv_frame;
+ else {
+diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
+index 73fc3cc19e33..18d4856c092f 100644
+--- a/drivers/staging/serqt_usb2/serqt_usb2.c
++++ b/drivers/staging/serqt_usb2/serqt_usb2.c
+@@ -725,7 +725,7 @@ static int qt_startup(struct usb_serial *serial)
+ goto startup_error;
+ }
+
+- switch (serial->dev->descriptor.idProduct) {
++ switch (le16_to_cpu(serial->dev->descriptor.idProduct)) {
+ case QUATECH_DSU100:
+ case QUATECH_QSU100:
+ case QUATECH_ESU100A:
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index b5e574659785..b47c2be1c427 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -2476,6 +2476,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ {
+ struct iscsi_cmd *cmd;
+ struct iscsi_conn *conn_p;
++ bool found = false;
+
+ /*
+ * Only send a Asynchronous Message on connections whos network
+@@ -2484,11 +2485,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
+ iscsit_inc_conn_usage_count(conn_p);
++ found = true;
+ break;
+ }
+ }
+
+- if (!conn_p)
++ if (!found)
+ return;
+
+ cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index d9b92b2c524d..e84149895af2 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -409,13 +409,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+ goto out;
+ }
+
+- write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
++ write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+ GFP_KERNEL);
+ if (!write_sg) {
+ pr_err("Unable to allocate compare_and_write sg\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
++ sg_init_table(write_sg, cmd->t_data_nents);
+ /*
+ * Setup verify and write data payloads from total NumberLBAs.
+ */
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 4859505ae2ed..639fdb395fb7 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+
+ if (tport) {
+ tport->tpg = tpg;
++ tpg->tport = tport;
+ return tport;
+ }
+
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index 9eba119bcdd3..e8e06d82acf0 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -31,6 +31,7 @@
+ #include <linux/list.h>
+ #include <linux/module.h>
+ #include <linux/major.h>
++#include <linux/atomic.h>
+ #include <linux/sysrq.h>
+ #include <linux/tty.h>
+ #include <linux/tty_flip.h>
+@@ -70,6 +71,9 @@ static struct task_struct *hvc_task;
+ /* Picks up late kicks after list walk but before schedule() */
+ static int hvc_kicked;
+
++/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */
++static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1);
++
+ static int hvc_init(void);
+
+ #ifdef CONFIG_MAGIC_SYSRQ
+@@ -186,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
+ return hvc_driver;
+ }
+
+-static int __init hvc_console_setup(struct console *co, char *options)
++static int hvc_console_setup(struct console *co, char *options)
+ {
+ if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
+ return -ENODEV;
+@@ -851,7 +855,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
+ int i;
+
+ /* We wait until a driver actually comes along */
+- if (!hvc_driver) {
++ if (atomic_inc_not_zero(&hvc_needs_init)) {
+ int err = hvc_init();
+ if (err)
+ return ERR_PTR(err);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index d4a89db511b5..bd73dc25b41d 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -2350,8 +2350,12 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
+ if (tty->ops->flush_chars)
+ tty->ops->flush_chars(tty);
+ } else {
++ struct n_tty_data *ldata = tty->disc_data;
++
+ while (nr > 0) {
++ mutex_lock(&ldata->output_lock);
+ c = tty->ops->write(tty, b, nr);
++ mutex_unlock(&ldata->output_lock);
+ if (c < 0) {
+ retval = c;
+ goto break_out;
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 4f6e01cf67f7..aa6db8f4ee18 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -555,7 +555,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+ */
+ if ((p->port.type == PORT_XR17V35X) ||
+ (p->port.type == PORT_XR17D15X)) {
+- serial_out(p, UART_EXAR_SLEEP, 0xff);
++ serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
+ return;
+ }
+
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 2b52d807934e..4847fc57f3e2 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -248,7 +248,11 @@ int tty_buffer_request_room(struct tty_port *port, size_t size)
+ if ((n = tty_buffer_alloc(port, size)) != NULL) {
+ buf->tail = n;
+ b->commit = b->used;
+- smp_mb();
++ /* paired w/ barrier in flush_to_ldisc(); ensures the
++ * latest commit value can be read before the head is
++ * advanced to the next buffer
++ */
++ smp_wmb();
+ b->next = n;
+ } else
+ size = left;
+@@ -449,17 +453,24 @@ static void flush_to_ldisc(struct work_struct *work)
+
+ while (1) {
+ struct tty_buffer *head = buf->head;
++ struct tty_buffer *next;
+ int count;
+
+ /* Ldisc or user is trying to gain exclusive access */
+ if (atomic_read(&buf->priority))
+ break;
+
++ next = head->next;
++ /* paired w/ barrier in __tty_buffer_request_room();
++ * ensures commit value read is not stale if the head
++ * is advancing to the next buffer
++ */
++ smp_rmb();
+ count = head->commit - head->read;
+ if (!count) {
+- if (head->next == NULL)
++ if (next == NULL)
+ break;
+- buf->head = head->next;
++ buf->head = next;
+ tty_buffer_free(port, head);
+ continue;
+ }
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 689433cdef25..2ddc586457c8 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -959,8 +959,7 @@ EXPORT_SYMBOL_GPL(usb_deregister);
+ * it doesn't support pre_reset/post_reset/reset_resume or
+ * because it doesn't support suspend/resume.
+ *
+- * The caller must hold @intf's device's lock, but not its pm_mutex
+- * and not @intf->dev.sem.
++ * The caller must hold @intf's device's lock, but not @intf's lock.
+ */
+ void usb_forced_unbind_intf(struct usb_interface *intf)
+ {
+@@ -973,16 +972,37 @@ void usb_forced_unbind_intf(struct usb_interface *intf)
+ intf->needs_binding = 1;
+ }
+
++/*
++ * Unbind drivers for @udev's marked interfaces. These interfaces have
++ * the needs_binding flag set, for example by usb_resume_interface().
++ *
++ * The caller must hold @udev's device lock.
++ */
++static void unbind_marked_interfaces(struct usb_device *udev)
++{
++ struct usb_host_config *config;
++ int i;
++ struct usb_interface *intf;
++
++ config = udev->actconfig;
++ if (config) {
++ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
++ intf = config->interface[i];
++ if (intf->dev.driver && intf->needs_binding)
++ usb_forced_unbind_intf(intf);
++ }
++ }
++}
++
+ /* Delayed forced unbinding of a USB interface driver and scan
+ * for rebinding.
+ *
+- * The caller must hold @intf's device's lock, but not its pm_mutex
+- * and not @intf->dev.sem.
++ * The caller must hold @intf's device's lock, but not @intf's lock.
+ *
+ * Note: Rebinds will be skipped if a system sleep transition is in
+ * progress and the PM "complete" callback hasn't occurred yet.
+ */
+-void usb_rebind_intf(struct usb_interface *intf)
++static void usb_rebind_intf(struct usb_interface *intf)
+ {
+ int rc;
+
+@@ -999,68 +1019,66 @@ void usb_rebind_intf(struct usb_interface *intf)
+ }
+ }
+
+-#ifdef CONFIG_PM
+-
+-/* Unbind drivers for @udev's interfaces that don't support suspend/resume
+- * There is no check for reset_resume here because it can be determined
+- * only during resume whether reset_resume is needed.
++/*
++ * Rebind drivers to @udev's marked interfaces. These interfaces have
++ * the needs_binding flag set.
+ *
+ * The caller must hold @udev's device lock.
+ */
+-static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
++static void rebind_marked_interfaces(struct usb_device *udev)
+ {
+ struct usb_host_config *config;
+ int i;
+ struct usb_interface *intf;
+- struct usb_driver *drv;
+
+ config = udev->actconfig;
+ if (config) {
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intf = config->interface[i];
+-
+- if (intf->dev.driver) {
+- drv = to_usb_driver(intf->dev.driver);
+- if (!drv->suspend || !drv->resume)
+- usb_forced_unbind_intf(intf);
+- }
++ if (intf->needs_binding)
++ usb_rebind_intf(intf);
+ }
+ }
+ }
+
+-/* Unbind drivers for @udev's interfaces that failed to support reset-resume.
+- * These interfaces have the needs_binding flag set by usb_resume_interface().
++/*
++ * Unbind all of @udev's marked interfaces and then rebind all of them.
++ * This ordering is necessary because some drivers claim several interfaces
++ * when they are first probed.
+ *
+ * The caller must hold @udev's device lock.
+ */
+-static void unbind_no_reset_resume_drivers_interfaces(struct usb_device *udev)
++void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev)
+ {
+- struct usb_host_config *config;
+- int i;
+- struct usb_interface *intf;
+-
+- config = udev->actconfig;
+- if (config) {
+- for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+- intf = config->interface[i];
+- if (intf->dev.driver && intf->needs_binding)
+- usb_forced_unbind_intf(intf);
+- }
+- }
++ unbind_marked_interfaces(udev);
++ rebind_marked_interfaces(udev);
+ }
+
+-static void do_rebind_interfaces(struct usb_device *udev)
++#ifdef CONFIG_PM
++
++/* Unbind drivers for @udev's interfaces that don't support suspend/resume
++ * There is no check for reset_resume here because it can be determined
++ * only during resume whether reset_resume is needed.
++ *
++ * The caller must hold @udev's device lock.
++ */
++static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
+ {
+ struct usb_host_config *config;
+ int i;
+ struct usb_interface *intf;
++ struct usb_driver *drv;
+
+ config = udev->actconfig;
+ if (config) {
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intf = config->interface[i];
+- if (intf->needs_binding)
+- usb_rebind_intf(intf);
++
++ if (intf->dev.driver) {
++ drv = to_usb_driver(intf->dev.driver);
++ if (!drv->suspend || !drv->resume)
++ usb_forced_unbind_intf(intf);
++ }
+ }
+ }
+ }
+@@ -1389,7 +1407,7 @@ int usb_resume_complete(struct device *dev)
+ * whose needs_binding flag is set
+ */
+ if (udev->state != USB_STATE_NOTATTACHED)
+- do_rebind_interfaces(udev);
++ rebind_marked_interfaces(udev);
+ return 0;
+ }
+
+@@ -1411,7 +1429,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+- unbind_no_reset_resume_drivers_interfaces(udev);
++ unbind_marked_interfaces(udev);
+ }
+
+ /* Avoid PM error messages for devices disconnected while suspended
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index b9d3c43e3859..3f7ef6129874 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -75,7 +75,7 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
+ PCI_SLOT(companion->devfn) != slot)
+ continue;
+ companion_hcd = pci_get_drvdata(companion);
+- if (!companion_hcd)
++ if (!companion_hcd || !companion_hcd->self.root_hub)
+ continue;
+ fn(pdev, hcd, companion, companion_hcd);
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 3670086fb7c8..5064fc8ba14f 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5308,10 +5308,11 @@ int usb_reset_device(struct usb_device *udev)
+ else if (cintf->condition ==
+ USB_INTERFACE_BOUND)
+ rebind = 1;
++ if (rebind)
++ cintf->needs_binding = 1;
+ }
+- if (ret == 0 && rebind)
+- usb_rebind_intf(cintf);
+ }
++ usb_unbind_and_rebind_marked_interfaces(udev);
+ }
+
+ usb_autosuspend_device(udev);
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 823857767a16..0923add72b59 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -55,7 +55,7 @@ extern int usb_match_one_id_intf(struct usb_device *dev,
+ extern int usb_match_device(struct usb_device *dev,
+ const struct usb_device_id *id);
+ extern void usb_forced_unbind_intf(struct usb_interface *intf);
+-extern void usb_rebind_intf(struct usb_interface *intf);
++extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev);
+
+ extern int usb_hub_claim_port(struct usb_device *hdev, unsigned port,
+ struct dev_state *owner);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index f8af8d44af85..546e67a2e4cd 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -815,15 +815,15 @@ struct dwc3_event_depevt {
+ * 12 - VndrDevTstRcved
+ * @reserved15_12: Reserved, not used
+ * @event_info: Information about this event
+- * @reserved31_24: Reserved, not used
++ * @reserved31_25: Reserved, not used
+ */
+ struct dwc3_event_devt {
+ u32 one_bit:1;
+ u32 device_event:7;
+ u32 type:4;
+ u32 reserved15_12:4;
+- u32 event_info:8;
+- u32 reserved31_24:8;
++ u32 event_info:9;
++ u32 reserved31_25:7;
+ } __packed;
+
+ /**
+@@ -856,6 +856,19 @@ union dwc3_event {
+ struct dwc3_event_gevt gevt;
+ };
+
++/**
++ * struct dwc3_gadget_ep_cmd_params - representation of endpoint command
++ * parameters
++ * @param2: third parameter
++ * @param1: second parameter
++ * @param0: first parameter
++ */
++struct dwc3_gadget_ep_cmd_params {
++ u32 param2;
++ u32 param1;
++ u32 param0;
++};
++
+ /*
+ * DWC3 Features to be used as Driver Data
+ */
+@@ -881,11 +894,31 @@ static inline void dwc3_host_exit(struct dwc3 *dwc)
+ #if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+ int dwc3_gadget_init(struct dwc3 *dwc);
+ void dwc3_gadget_exit(struct dwc3 *dwc);
++int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
++int dwc3_gadget_get_link_state(struct dwc3 *dwc);
++int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
++int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
++ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
++int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
+ #else
+ static inline int dwc3_gadget_init(struct dwc3 *dwc)
+ { return 0; }
+ static inline void dwc3_gadget_exit(struct dwc3 *dwc)
+ { }
++static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
++{ return 0; }
++static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc)
++{ return 0; }
++static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
++ enum dwc3_link_state state)
++{ return 0; }
++
++static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
++ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
++{ return 0; }
++static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
++ int cmd, u32 param)
++{ return 0; }
+ #endif
+
+ /* power management interface */
+diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
+index febe1aa7b714..a0ee75b68a80 100644
+--- a/drivers/usb/dwc3/gadget.h
++++ b/drivers/usb/dwc3/gadget.h
+@@ -56,12 +56,6 @@ struct dwc3;
+ /* DEPXFERCFG parameter 0 */
+ #define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff)
+
+-struct dwc3_gadget_ep_cmd_params {
+- u32 param2;
+- u32 param1;
+- u32 param0;
+-};
+-
+ /* -------------------------------------------------------------------------- */
+
+ #define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
+@@ -85,9 +79,6 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
+ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ int status);
+
+-int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
+-int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
+-
+ void dwc3_ep0_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event);
+ void dwc3_ep0_out_start(struct dwc3 *dwc);
+@@ -95,9 +86,6 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
+ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags);
+ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
+-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
+-int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
+
+ /**
+ * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
+diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
+index 2cb52e0438df..1e53092e93c5 100644
+--- a/drivers/usb/gadget/atmel_usba_udc.c
++++ b/drivers/usb/gadget/atmel_usba_udc.c
+@@ -1827,12 +1827,12 @@ static int atmel_usba_stop(struct usb_gadget *gadget,
+ toggle_bias(0);
+ usba_writel(udc, CTRL, USBA_DISABLE_MASK);
+
+- udc->driver = NULL;
+-
+ clk_disable_unprepare(udc->hclk);
+ clk_disable_unprepare(udc->pclk);
+
+- DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
++ DBG(DBG_GADGET, "unregistered driver `%s'\n", udc->driver->driver.name);
++
++ udc->driver = NULL;
+
+ return 0;
+ }
+diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
+index 0ff33396eef3..d9e15e85258d 100644
+--- a/drivers/usb/gadget/tcm_usb_gadget.c
++++ b/drivers/usb/gadget/tcm_usb_gadget.c
+@@ -1613,7 +1613,7 @@ static struct se_wwn *usbg_make_tport(
+ return ERR_PTR(-ENOMEM);
+ }
+ tport->tport_wwpn = wwpn;
+- snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name);
++ snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
+ return &tport->tport_wwn;
+ }
+
+diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
+index 0deb9d6cde26..d31814c7238f 100644
+--- a/drivers/usb/gadget/zero.c
++++ b/drivers/usb/gadget/zero.c
+@@ -280,7 +280,7 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
+ ss_opts->isoc_interval = gzero_options.isoc_interval;
+ ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
+ ss_opts->isoc_mult = gzero_options.isoc_mult;
+- ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket;
++ ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
+ ss_opts->bulk_buflen = gzero_options.bulk_buflen;
+
+ func_ss = usb_get_function(func_inst_ss);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 1af67a214d33..67394da1c645 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -134,6 +134,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
++
++ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+@@ -190,6 +192,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ struct usb_hcd *hcd;
+
+ driver = (struct hc_driver *)id->driver_data;
++
++ /* Prevent runtime suspending between USB-2 and USB-3 initialization */
++ pm_runtime_get_noresume(&dev->dev);
++
+ /* Register the USB 2.0 roothub.
+ * FIXME: USB core must know to register the USB 2.0 roothub first.
+ * This is sort of silly, because we could just set the HCD driver flags
+@@ -199,7 +205,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ retval = usb_hcd_pci_probe(dev, id);
+
+ if (retval)
+- return retval;
++ goto put_runtime_pm;
+
+ /* USB 2.0 roothub is stored in the PCI device now. */
+ hcd = dev_get_drvdata(&dev->dev);
+@@ -228,12 +234,17 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ if (xhci->quirks & XHCI_LPM_SUPPORT)
+ hcd_to_bus(xhci->shared_hcd)->root_hub->lpm_capable = 1;
+
++ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
++ pm_runtime_put_noidle(&dev->dev);
++
+ return 0;
+
+ put_usb3_hcd:
+ usb_put_hcd(xhci->shared_hcd);
+ dealloc_usb2_hcd:
+ usb_hcd_pci_remove(dev);
++put_runtime_pm:
++ pm_runtime_put_noidle(&dev->dev);
+ return retval;
+ }
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6bfbd80ec2b9..cbecb5ff7d90 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -556,6 +556,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_generic_trb *trb;
+ struct xhci_ep_ctx *ep_ctx;
+ dma_addr_t addr;
++ u64 hw_dequeue;
+
+ ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+ ep_index, stream_id);
+@@ -565,56 +566,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ stream_id);
+ return;
+ }
+- state->new_cycle_state = 0;
+- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+- "Finding segment containing stopped TRB.");
+- state->new_deq_seg = find_trb_seg(cur_td->start_seg,
+- dev->eps[ep_index].stopped_trb,
+- &state->new_cycle_state);
+- if (!state->new_deq_seg) {
+- WARN_ON(1);
+- return;
+- }
+
+ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Finding endpoint context");
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+- state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
++ hw_dequeue = le64_to_cpu(ep_ctx->deq);
++
++ /* Find virtual address and segment of hardware dequeue pointer */
++ state->new_deq_seg = ep_ring->deq_seg;
++ state->new_deq_ptr = ep_ring->dequeue;
++ while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
++ != (dma_addr_t)(hw_dequeue & ~0xf)) {
++ next_trb(xhci, ep_ring, &state->new_deq_seg,
++ &state->new_deq_ptr);
++ if (state->new_deq_ptr == ep_ring->dequeue) {
++ WARN_ON(1);
++ return;
++ }
++ }
++ /*
++ * Find cycle state for last_trb, starting at old cycle state of
++ * hw_dequeue. If there is only one segment ring, find_trb_seg() will
++ * return immediately and cannot toggle the cycle state if this search
++ * wraps around, so add one more toggle manually in that case.
++ */
++ state->new_cycle_state = hw_dequeue & 0x1;
++ if (ep_ring->first_seg == ep_ring->first_seg->next &&
++ cur_td->last_trb < state->new_deq_ptr)
++ state->new_cycle_state ^= 0x1;
+
+ state->new_deq_ptr = cur_td->last_trb;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Finding segment containing last TRB in TD.");
+ state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+- state->new_deq_ptr,
+- &state->new_cycle_state);
++ state->new_deq_ptr, &state->new_cycle_state);
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
++ /* Increment to find next TRB after last_trb. Cycle if appropriate. */
+ trb = &state->new_deq_ptr->generic;
+ if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+ (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+ state->new_cycle_state ^= 0x1;
+ next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
+
+- /*
+- * If there is only one segment in a ring, find_trb_seg()'s while loop
+- * will not run, and it will return before it has a chance to see if it
+- * needs to toggle the cycle bit. It can't tell if the stalled transfer
+- * ended just before the link TRB on a one-segment ring, or if the TD
+- * wrapped around the top of the ring, because it doesn't have the TD in
+- * question. Look for the one-segment case where stalled TRB's address
+- * is greater than the new dequeue pointer address.
+- */
+- if (ep_ring->first_seg == ep_ring->first_seg->next &&
+- state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
+- state->new_cycle_state ^= 0x1;
++ /* Don't update the ring cycle state for the producer (us). */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Cycle state = 0x%x", state->new_cycle_state);
+
+- /* Don't update the ring cycle state for the producer (us). */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "New dequeue segment = %p (virtual)",
+ state->new_deq_seg);
+@@ -802,7 +804,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+ if (list_empty(&ep->cancelled_td_list)) {
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->stopped_td = NULL;
+- ep->stopped_trb = NULL;
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ return;
+ }
+@@ -870,11 +871,9 @@ remove_finished_td:
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
+
+- /* Clear stopped_td and stopped_trb if endpoint is not halted */
+- if (!(ep->ep_state & EP_HALTED)) {
++ /* Clear stopped_td if endpoint is not halted */
++ if (!(ep->ep_state & EP_HALTED))
+ ep->stopped_td = NULL;
+- ep->stopped_trb = NULL;
+- }
+
+ /*
+ * Drop the lock and complete the URBs in the cancelled TD list.
+@@ -1870,14 +1869,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep->ep_state |= EP_HALTED;
+ ep->stopped_td = td;
+- ep->stopped_trb = event_trb;
+ ep->stopped_stream = stream_id;
+
+ xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+
+ ep->stopped_td = NULL;
+- ep->stopped_trb = NULL;
+ ep->stopped_stream = 0;
+
+ xhci_ring_cmd_db(xhci);
+@@ -1959,7 +1956,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ ep->stopped_td = td;
+- ep->stopped_trb = event_trb;
+ return 0;
+ } else {
+ if (trb_comp_code == COMP_STALL) {
+@@ -1971,7 +1967,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ * USB class driver clear the stall later.
+ */
+ ep->stopped_td = td;
+- ep->stopped_trb = event_trb;
+ ep->stopped_stream = ep_ring->stream_id;
+ } else if (xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code)) {
+@@ -2588,7 +2583,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ * successful event after a short transfer.
+ * Ignore it.
+ */
+- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
++ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ ep_ring->last_td_was_short) {
+ ep_ring->last_td_was_short = false;
+ ret = 0;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f178f762b543..01aa4c9fa558 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -400,16 +400,16 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+
+ #else
+
+-static int xhci_try_enable_msi(struct usb_hcd *hcd)
++static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
+ {
+ return 0;
+ }
+
+-static void xhci_cleanup_msix(struct xhci_hcd *xhci)
++static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
+ {
+ }
+
+-static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
++static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+ {
+ }
+
+@@ -2932,7 +2932,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
+ xhci_ring_cmd_db(xhci);
+ }
+ virt_ep->stopped_td = NULL;
+- virt_ep->stopped_trb = NULL;
+ virt_ep->stopped_stream = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index ed3a425de8ce..75f775c993ee 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -864,8 +864,6 @@ struct xhci_virt_ep {
+ #define EP_GETTING_NO_STREAMS (1 << 5)
+ /* ---- Related to URB cancellation ---- */
+ struct list_head cancelled_td_list;
+- /* The TRB that was last reported in a stopped endpoint ring */
+- union xhci_trb *stopped_trb;
+ struct xhci_td *stopped_td;
+ unsigned int stopped_stream;
+ /* Watchdog timer for stop endpoint command to cancel URBs */
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index f7dca0b92bfb..2cca870d9762 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -439,7 +439,6 @@ void musb_hnp_stop(struct musb *musb)
+ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ u8 devctl)
+ {
+- struct usb_otg *otg = musb->xceiv->otg;
+ irqreturn_t handled = IRQ_NONE;
+
+ dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
+@@ -654,7 +653,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+- musb->is_active = otg->gadget->b_hnp_enable;
++ musb->is_active = musb->g.b_hnp_enable;
+ if (musb->is_active) {
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
+@@ -670,7 +669,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
+- musb->is_active = otg->host->b_hnp_enable;
++ musb->is_active = musb->hcd->self.b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
+index 217339dd7a90..17ea3f271bd8 100644
+--- a/drivers/usb/phy/phy-ulpi.c
++++ b/drivers/usb/phy/phy-ulpi.c
+@@ -47,6 +47,8 @@ struct ulpi_info {
+ static struct ulpi_info ulpi_ids[] = {
+ ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
+ ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
++ ULPI_INFO(ULPI_ID(0x0424, 0x0007), "SMSC USB3320"),
++ ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
+ };
+
+ static int ulpi_set_otg_flags(struct usb_phy *phy)
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index beb8edce4ef2..6e09306b2a5e 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -83,6 +83,9 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
+ { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
++ { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
++ { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
++ { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
+ { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+ { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
+ { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index c38b8c00c06f..42bc082896ac 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -121,8 +121,11 @@
+ #define SUPERIAL_VENDOR_ID 0x5372
+ #define SUPERIAL_PRODUCT_ID 0x2303
+
+-/* Hewlett-Packard LD220-HP POS Pole Display */
++/* Hewlett-Packard POS Pole Displays */
+ #define HP_VENDOR_ID 0x03f0
++#define HP_LD960_PRODUCT_ID 0x0b39
++#define HP_LCM220_PRODUCT_ID 0x3139
++#define HP_LCM960_PRODUCT_ID 0x3239
+ #define HP_LD220_PRODUCT_ID 0x3524
+
+ /* Cressi Edy (diving computer) PC interface */
+diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
+index 088511a58a26..0aca4e6b2871 100644
+--- a/drivers/video/atmel_lcdfb.c
++++ b/drivers/video/atmel_lcdfb.c
+@@ -1081,6 +1081,12 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
+ goto free_cmap;
+ }
+
++ ret = atmel_lcdfb_set_par(info);
++ if (ret < 0) {
++ dev_err(dev, "set par failed: %d\n", ret);
++ goto unregister_irqs;
++ }
++
+ dev_set_drvdata(dev, info);
+
+ /*
+diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
+index e45833ce975b..182bd680141f 100644
+--- a/drivers/video/aty/mach64_accel.c
++++ b/drivers/video/aty/mach64_accel.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include <linux/delay.h>
++#include <asm/unaligned.h>
+ #include <linux/fb.h>
+ #include <video/mach64.h>
+ #include "atyfb.h"
+@@ -419,7 +420,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ u32 *pbitmap, dwords = (src_bytes + 3) / 4;
+ for (pbitmap = (u32*)(image->data); dwords; dwords--, pbitmap++) {
+ wait_for_fifo(1, par);
+- aty_st_le32(HOST_DATA0, le32_to_cpup(pbitmap), par);
++ aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
+ }
+ }
+
+diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
+index 95ec042ddbf8..0fe02e22d9a4 100644
+--- a/drivers/video/aty/mach64_cursor.c
++++ b/drivers/video/aty/mach64_cursor.c
+@@ -5,6 +5,7 @@
+ #include <linux/fb.h>
+ #include <linux/init.h>
+ #include <linux/string.h>
++#include "../fb_draw.h"
+
+ #include <asm/io.h>
+
+@@ -157,24 +158,33 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
++ u16 l = 0xaaaa;
+ b = *src++;
+ m = *msk++;
+ switch (cursor->rop) {
+ case ROP_XOR:
+ // Upper 4 bits of mask data
+- fb_writeb(cursor_bits_lookup[(b ^ m) >> 4], dst++);
++ l = cursor_bits_lookup[(b ^ m) >> 4] |
+ // Lower 4 bits of mask
+- fb_writeb(cursor_bits_lookup[(b ^ m) & 0x0f],
+- dst++);
++ (cursor_bits_lookup[(b ^ m) & 0x0f] << 8);
+ break;
+ case ROP_COPY:
+ // Upper 4 bits of mask data
+- fb_writeb(cursor_bits_lookup[(b & m) >> 4], dst++);
++ l = cursor_bits_lookup[(b & m) >> 4] |
+ // Lower 4 bits of mask
+- fb_writeb(cursor_bits_lookup[(b & m) & 0x0f],
+- dst++);
++ (cursor_bits_lookup[(b & m) & 0x0f] << 8);
+ break;
+ }
++ /*
++ * If cursor size is not a multiple of 8 characters
++ * we must pad it with transparent pattern (0xaaaa).
++ */
++ if ((j + 1) * 8 > cursor->image.width) {
++ l = comp(l, 0xaaaa,
++ (1 << ((cursor->image.width & 7) * 2)) - 1);
++ }
++ fb_writeb(l & 0xff, dst++);
++ fb_writeb(l >> 8, dst++);
+ }
+ dst += offset;
+ }
+diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/cfbcopyarea.c
+index bb5a96b1645d..bcb57235fcc7 100644
+--- a/drivers/video/cfbcopyarea.c
++++ b/drivers/video/cfbcopyarea.c
+@@ -43,13 +43,22 @@
+ */
+
+ static void
+-bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+- const unsigned long __iomem *src, int src_idx, int bits,
++bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
++ const unsigned long __iomem *src, unsigned src_idx, int bits,
+ unsigned n, u32 bswapmask)
+ {
+ unsigned long first, last;
+ int const shift = dst_idx-src_idx;
+- int left, right;
++
++#if 0
++ /*
++ * If you suspect bug in this function, compare it with this simple
++ * memmove implementation.
++ */
++ fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
++ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
++ return;
++#endif
+
+ first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+ last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
+@@ -98,9 +107,8 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ unsigned long d0, d1;
+ int m;
+
+- right = shift & (bits - 1);
+- left = -shift & (bits - 1);
+- bswapmask &= shift;
++ int const left = shift & (bits - 1);
++ int const right = -shift & (bits - 1);
+
+ if (dst_idx+n <= bits) {
+ // Single destination word
+@@ -110,15 +118,15 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ if (shift > 0) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else if (src_idx+n <= bits) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src + 1);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0<<left | d1>>right;
++ d0 = d0 >> right | d1 << left;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+@@ -135,60 +143,59 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ if (shift > 0) {
+ // Single source word
+ d1 = d0;
+- d0 >>= right;
+- dst++;
++ d0 <<= left;
+ n -= bits - dst_idx;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src++);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+
+- d0 = d0<<left | d1>>right;
+- dst++;
++ d0 = d0 >> right | d1 << left;
+ n -= bits - dst_idx;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+ d0 = d1;
++ dst++;
+
+ // Main chunk
+ m = n % bits;
+ n /= bits;
+ while ((n >= 4) && !bswapmask) {
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = FB_READL(src++);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0 << left | d1 >> right;
++ d0 = d0 >> right | d1 << left;
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(d0, dst++);
+ d0 = d1;
+ }
+
+ // Trailing bits
+- if (last) {
+- if (m <= right) {
++ if (m) {
++ if (m <= bits - right) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src);
+ d1 = fb_rev_pixels_in_long(d1,
+ bswapmask);
+- d0 = d0<<left | d1>>right;
++ d0 = d0 >> right | d1 << left;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+@@ -202,43 +209,46 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ */
+
+ static void
+-bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+- const unsigned long __iomem *src, int src_idx, int bits,
++bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
++ const unsigned long __iomem *src, unsigned src_idx, int bits,
+ unsigned n, u32 bswapmask)
+ {
+ unsigned long first, last;
+ int shift;
+
+- dst += (n-1)/bits;
+- src += (n-1)/bits;
+- if ((n-1) % bits) {
+- dst_idx += (n-1) % bits;
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= bits - 1;
+- src_idx += (n-1) % bits;
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= bits - 1;
+- }
++#if 0
++ /*
++ * If you suspect bug in this function, compare it with this simple
++ * memmove implementation.
++ */
++ fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
++ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
++ return;
++#endif
++
++ dst += (dst_idx + n - 1) / bits;
++ src += (src_idx + n - 1) / bits;
++ dst_idx = (dst_idx + n - 1) % bits;
++ src_idx = (src_idx + n - 1) % bits;
+
+ shift = dst_idx-src_idx;
+
+- first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask);
+- last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits),
+- bswapmask);
++ first = ~fb_shifted_pixels_mask_long(p, (dst_idx + 1) % bits, bswapmask);
++ last = fb_shifted_pixels_mask_long(p, (bits + dst_idx + 1 - n) % bits, bswapmask);
+
+ if (!shift) {
+ // Same alignment for source and dest
+
+ if ((unsigned long)dst_idx+1 >= n) {
+ // Single word
+- if (last)
+- first &= last;
+- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
++ if (first)
++ last &= first;
++ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
+ } else {
+ // Multiple destination words
+
+ // Leading bits
+- if (first != ~0UL) {
++ if (first) {
+ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
+ dst--;
+ src--;
+@@ -262,7 +272,7 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ FB_WRITEL(FB_READL(src--), dst--);
+
+ // Trailing bits
+- if (last)
++ if (last != -1UL)
+ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
+ }
+ } else {
+@@ -270,29 +280,28 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ unsigned long d0, d1;
+ int m;
+
+- int const left = -shift & (bits-1);
+- int const right = shift & (bits-1);
+- bswapmask &= shift;
++ int const left = shift & (bits-1);
++ int const right = -shift & (bits-1);
+
+ if ((unsigned long)dst_idx+1 >= n) {
+ // Single destination word
+- if (last)
+- first &= last;
++ if (first)
++ last &= first;
+ d0 = FB_READL(src);
+ if (shift < 0) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else if (1+(unsigned long)src_idx >= n) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src - 1);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+- FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
++ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+ } else {
+ // Multiple destination words
+ /** We must always remember the last value read, because in case
+@@ -307,12 +316,12 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ if (shift < 0) {
+ // Single source word
+ d1 = d0;
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src--);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+@@ -325,39 +334,39 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ n /= bits;
+ while ((n >= 4) && !bswapmask) {
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = FB_READL(src--);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0 >> right | d1 << left;
++ d0 = d0 << left | d1 >> right;
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(d0, dst--);
+ d0 = d1;
+ }
+
+ // Trailing bits
+- if (last) {
+- if (m <= left) {
++ if (m) {
++ if (m <= bits - left) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src);
+ d1 = fb_rev_pixels_in_long(d1,
+ bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+@@ -371,9 +380,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+ u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
+ u32 height = area->height, width = area->width;
+ unsigned long const bits_per_line = p->fix.line_length*8u;
+- unsigned long __iomem *dst = NULL, *src = NULL;
++ unsigned long __iomem *base = NULL;
+ int bits = BITS_PER_LONG, bytes = bits >> 3;
+- int dst_idx = 0, src_idx = 0, rev_copy = 0;
++ unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
+ u32 bswapmask = fb_compute_bswapmask(p);
+
+ if (p->state != FBINFO_STATE_RUNNING)
+@@ -389,7 +398,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+
+ // split the base of the framebuffer into a long-aligned address and the
+ // index of the first bit
+- dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
++ base = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
+ dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
+ // add offset of source and target area
+ dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
+@@ -402,20 +411,14 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+ while (height--) {
+ dst_idx -= bits_per_line;
+ src_idx -= bits_per_line;
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= (bytes - 1);
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= (bytes - 1);
+- bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
++ bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
++ base + (src_idx / bits), src_idx % bits, bits,
+ width*p->var.bits_per_pixel, bswapmask);
+ }
+ } else {
+ while (height--) {
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= (bytes - 1);
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= (bytes - 1);
+- bitcpy(p, dst, dst_idx, src, src_idx, bits,
++ bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
++ base + (src_idx / bits), src_idx % bits, bits,
+ width*p->var.bits_per_pixel, bswapmask);
+ dst_idx += bits_per_line;
+ src_idx += bits_per_line;
+diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
+index 8335a6fe303e..0d5cb85d071a 100644
+--- a/drivers/video/matrox/matroxfb_accel.c
++++ b/drivers/video/matrox/matroxfb_accel.c
+@@ -192,10 +192,18 @@ void matrox_cfbX_init(struct matrox_fb_info *minfo)
+ minfo->accel.m_dwg_rect = M_DWG_TRAP | M_DWG_SOLID | M_DWG_ARZERO | M_DWG_SGNZERO | M_DWG_SHIFTZERO;
+ if (isMilleniumII(minfo)) minfo->accel.m_dwg_rect |= M_DWG_TRANSC;
+ minfo->accel.m_opmode = mopmode;
++ minfo->accel.m_access = maccess;
++ minfo->accel.m_pitch = mpitch;
+ }
+
+ EXPORT_SYMBOL(matrox_cfbX_init);
+
++static void matrox_accel_restore_maccess(struct matrox_fb_info *minfo)
++{
++ mga_outl(M_MACCESS, minfo->accel.m_access);
++ mga_outl(M_PITCH, minfo->accel.m_pitch);
++}
++
+ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ int sx, int dy, int dx, int height, int width)
+ {
+@@ -207,7 +215,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ CRITBEGIN
+
+ if ((dy < sy) || ((dy == sy) && (dx <= sx))) {
+- mga_fifo(2);
++ mga_fifo(4);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO |
+ M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_AR5, vxres);
+@@ -215,7 +224,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ start = sy*vxres+sx+curr_ydstorg(minfo);
+ end = start+width;
+ } else {
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_SGN, 5);
+ mga_outl(M_AR5, -vxres);
+@@ -224,7 +234,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ start = end+width;
+ dy += height-1;
+ }
+- mga_fifo(4);
++ mga_fifo(6);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_AR0, end);
+ mga_outl(M_AR3, start);
+ mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx);
+@@ -246,7 +257,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ CRITBEGIN
+
+ if ((dy < sy) || ((dy == sy) && (dx <= sx))) {
+- mga_fifo(2);
++ mga_fifo(4);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO |
+ M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_AR5, vxres);
+@@ -254,7 +266,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ start = sy*vxres+sx+curr_ydstorg(minfo);
+ end = start+width;
+ } else {
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_SGN, 5);
+ mga_outl(M_AR5, -vxres);
+@@ -263,7 +276,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ start = end+width;
+ dy += height-1;
+ }
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_AR0, end);
+ mga_outl(M_AR3, start);
+ mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx);
+@@ -298,7 +312,8 @@ static void matroxfb_accel_clear(struct matrox_fb_info *minfo, u_int32_t color,
+
+ CRITBEGIN
+
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE);
+ mga_outl(M_FCOL, color);
+ mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
+@@ -341,7 +356,8 @@ static void matroxfb_cfb4_clear(struct matrox_fb_info *minfo, u_int32_t bgx,
+ width >>= 1;
+ sx >>= 1;
+ if (width) {
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE2);
+ mga_outl(M_FCOL, bgx);
+ mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
+@@ -415,7 +431,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
+
+ CRITBEGIN
+
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ if (easy)
+ mga_outl(M_DWGCTL, M_DWG_ILOAD | M_DWG_SGNZERO | M_DWG_SHIFTZERO | M_DWG_BMONOWF | M_DWG_LINEAR | M_DWG_REPLACE);
+ else
+@@ -425,7 +442,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
+ fxbndry = ((xx + width - 1) << 16) | xx;
+ mmio = minfo->mmio.vbase;
+
+- mga_fifo(6);
++ mga_fifo(8);
++ matrox_accel_restore_maccess(minfo);
+ mga_writel(mmio, M_FXBNDRY, fxbndry);
+ mga_writel(mmio, M_AR0, ar0);
+ mga_writel(mmio, M_AR3, 0);
+diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
+index 11ed57bb704e..556d96ce40bf 100644
+--- a/drivers/video/matrox/matroxfb_base.h
++++ b/drivers/video/matrox/matroxfb_base.h
+@@ -307,6 +307,8 @@ struct matrox_accel_data {
+ #endif
+ u_int32_t m_dwg_rect;
+ u_int32_t m_opmode;
++ u_int32_t m_access;
++ u_int32_t m_pitch;
+ };
+
+ struct v4l2_queryctrl;
+diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
+index c9c8e5a1fdee..ba77f753649c 100644
+--- a/drivers/video/tgafb.c
++++ b/drivers/video/tgafb.c
+@@ -1142,222 +1142,57 @@ copyarea_line_32bpp(struct fb_info *info, u32 dy, u32 sy,
+ __raw_writel(TGA_MODE_SBM_24BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
+ }
+
+-/* The general case of forward copy in 8bpp mode. */
++/* The (almost) general case of backward copy in 8bpp mode. */
+ static inline void
+-copyarea_foreward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+- u32 height, u32 width, u32 line_length)
++copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
++ u32 height, u32 width, u32 line_length,
++ const struct fb_copyarea *area)
+ {
+ struct tga_par *par = (struct tga_par *) info->par;
+- unsigned long i, copied, left;
+- unsigned long dpos, spos, dalign, salign, yincr;
+- u32 smask_first, dmask_first, dmask_last;
+- int pixel_shift, need_prime, need_second;
+- unsigned long n64, n32, xincr_first;
++ unsigned i, yincr;
++ int depos, sepos, backward, last_step, step;
++ u32 mask_last;
++ unsigned n32;
+ void __iomem *tga_regs;
+ void __iomem *tga_fb;
+
+- yincr = line_length;
+- if (dy > sy) {
+- dy += height - 1;
+- sy += height - 1;
+- yincr = -yincr;
+- }
+-
+- /* Compute the offsets and alignments in the frame buffer.
+- More than anything else, these control how we do copies. */
+- dpos = dy * line_length + dx;
+- spos = sy * line_length + sx;
+- dalign = dpos & 7;
+- salign = spos & 7;
+- dpos &= -8;
+- spos &= -8;
+-
+- /* Compute the value for the PIXELSHIFT register. This controls
+- both non-co-aligned source and destination and copy direction. */
+- if (dalign >= salign)
+- pixel_shift = dalign - salign;
+- else
+- pixel_shift = 8 - (salign - dalign);
+-
+- /* Figure out if we need an additional priming step for the
+- residue register. */
+- need_prime = (salign > dalign);
+- if (need_prime)
+- dpos -= 8;
+-
+- /* Begin by copying the leading unaligned destination. Copy enough
+- to make the next destination address 32-byte aligned. */
+- copied = 32 - (dalign + (dpos & 31));
+- if (copied == 32)
+- copied = 0;
+- xincr_first = (copied + 7) & -8;
+- smask_first = dmask_first = (1ul << copied) - 1;
+- smask_first <<= salign;
+- dmask_first <<= dalign + need_prime*8;
+- if (need_prime && copied > 24)
+- copied -= 8;
+- left = width - copied;
+-
+- /* Care for small copies. */
+- if (copied > width) {
+- u32 t;
+- t = (1ul << width) - 1;
+- t <<= dalign + need_prime*8;
+- dmask_first &= t;
+- left = 0;
+- }
+-
+- /* Attempt to use 64-byte copies. This is only possible if the
+- source and destination are co-aligned at 64 bytes. */
+- n64 = need_second = 0;
+- if ((dpos & 63) == (spos & 63)
+- && (height == 1 || line_length % 64 == 0)) {
+- /* We may need a 32-byte copy to ensure 64 byte alignment. */
+- need_second = (dpos + xincr_first) & 63;
+- if ((need_second & 32) != need_second)
+- printk(KERN_ERR "tgafb: need_second wrong\n");
+- if (left >= need_second + 64) {
+- left -= need_second;
+- n64 = left / 64;
+- left %= 64;
+- } else
+- need_second = 0;
+- }
+-
+- /* Copy trailing full 32-byte sections. This will be the main
+- loop if the 64 byte loop can't be used. */
+- n32 = left / 32;
+- left %= 32;
+-
+- /* Copy the trailing unaligned destination. */
+- dmask_last = (1ul << left) - 1;
+-
+- tga_regs = par->tga_regs_base;
+- tga_fb = par->tga_fb_base;
+-
+- /* Set up the MODE and PIXELSHIFT registers. */
+- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_COPY, tga_regs+TGA_MODE_REG);
+- __raw_writel(pixel_shift, tga_regs+TGA_PIXELSHIFT_REG);
+- wmb();
+-
+- for (i = 0; i < height; ++i) {
+- unsigned long j;
+- void __iomem *sfb;
+- void __iomem *dfb;
+-
+- sfb = tga_fb + spos;
+- dfb = tga_fb + dpos;
+- if (dmask_first) {
+- __raw_writel(smask_first, sfb);
+- wmb();
+- __raw_writel(dmask_first, dfb);
+- wmb();
+- sfb += xincr_first;
+- dfb += xincr_first;
+- }
+-
+- if (need_second) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(0xffffffff, dfb);
+- wmb();
+- sfb += 32;
+- dfb += 32;
+- }
+-
+- if (n64 && (((unsigned long)sfb | (unsigned long)dfb) & 63))
+- printk(KERN_ERR
+- "tgafb: misaligned copy64 (s:%p, d:%p)\n",
+- sfb, dfb);
+-
+- for (j = 0; j < n64; ++j) {
+- __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
+- wmb();
+- __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
+- wmb();
+- sfb += 64;
+- dfb += 64;
+- }
+-
+- for (j = 0; j < n32; ++j) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(0xffffffff, dfb);
+- wmb();
+- sfb += 32;
+- dfb += 32;
+- }
+-
+- if (dmask_last) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(dmask_last, dfb);
+- wmb();
+- }
+-
+- spos += yincr;
+- dpos += yincr;
++ /* Do acceleration only if we are aligned on 8 pixels */
++ if ((dx | sx | width) & 7) {
++ cfb_copyarea(info, area);
++ return;
+ }
+
+- /* Reset the MODE register to normal. */
+- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
+-}
+-
+-/* The (almost) general case of backward copy in 8bpp mode. */
+-static inline void
+-copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+- u32 height, u32 width, u32 line_length,
+- const struct fb_copyarea *area)
+-{
+- struct tga_par *par = (struct tga_par *) info->par;
+- unsigned long i, left, yincr;
+- unsigned long depos, sepos, dealign, sealign;
+- u32 mask_first, mask_last;
+- unsigned long n32;
+- void __iomem *tga_regs;
+- void __iomem *tga_fb;
+-
+ yincr = line_length;
+ if (dy > sy) {
+ dy += height - 1;
+ sy += height - 1;
+ yincr = -yincr;
+ }
++ backward = dy == sy && dx > sx && dx < sx + width;
+
+ /* Compute the offsets and alignments in the frame buffer.
+ More than anything else, these control how we do copies. */
+- depos = dy * line_length + dx + width;
+- sepos = sy * line_length + sx + width;
+- dealign = depos & 7;
+- sealign = sepos & 7;
+-
+- /* ??? The documentation appears to be incorrect (or very
+- misleading) wrt how pixel shifting works in backward copy
+- mode, i.e. when PIXELSHIFT is negative. I give up for now.
+- Do handle the common case of co-aligned backward copies,
+- but frob everything else back on generic code. */
+- if (dealign != sealign) {
+- cfb_copyarea(info, area);
+- return;
+- }
+-
+- /* We begin the copy with the trailing pixels of the
+- unaligned destination. */
+- mask_first = (1ul << dealign) - 1;
+- left = width - dealign;
+-
+- /* Care for small copies. */
+- if (dealign > width) {
+- mask_first ^= (1ul << (dealign - width)) - 1;
+- left = 0;
+- }
++ depos = dy * line_length + dx;
++ sepos = sy * line_length + sx;
++ if (backward)
++ depos += width, sepos += width;
+
+ /* Next copy full words at a time. */
+- n32 = left / 32;
+- left %= 32;
++ n32 = width / 32;
++ last_step = width % 32;
+
+ /* Finally copy the unaligned head of the span. */
+- mask_last = -1 << (32 - left);
++ mask_last = (1ul << last_step) - 1;
++
++ if (!backward) {
++ step = 32;
++ last_step = 32;
++ } else {
++ step = -32;
++ last_step = -last_step;
++ sepos -= 32;
++ depos -= 32;
++ }
+
+ tga_regs = par->tga_regs_base;
+ tga_fb = par->tga_fb_base;
+@@ -1374,25 +1209,33 @@ copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+
+ sfb = tga_fb + sepos;
+ dfb = tga_fb + depos;
+- if (mask_first) {
+- __raw_writel(mask_first, sfb);
+- wmb();
+- __raw_writel(mask_first, dfb);
+- wmb();
+- }
+
+- for (j = 0; j < n32; ++j) {
+- sfb -= 32;
+- dfb -= 32;
++ for (j = 0; j < n32; j++) {
++ if (j < 2 && j + 1 < n32 && !backward &&
++ !(((unsigned long)sfb | (unsigned long)dfb) & 63)) {
++ do {
++ __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
++ wmb();
++ __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
++ wmb();
++ sfb += 64;
++ dfb += 64;
++ j += 2;
++ } while (j + 1 < n32);
++ j--;
++ continue;
++ }
+ __raw_writel(0xffffffff, sfb);
+ wmb();
+ __raw_writel(0xffffffff, dfb);
+ wmb();
++ sfb += step;
++ dfb += step;
+ }
+
+ if (mask_last) {
+- sfb -= 32;
+- dfb -= 32;
++ sfb += last_step - step;
++ dfb += last_step - step;
+ __raw_writel(mask_last, sfb);
+ wmb();
+ __raw_writel(mask_last, dfb);
+@@ -1453,14 +1296,9 @@ tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+ else if (bpp == 32)
+ cfb_copyarea(info, area);
+
+- /* Detect overlapping source and destination that requires
+- a backward copy. */
+- else if (dy == sy && dx > sx && dx < sx + width)
+- copyarea_backward_8bpp(info, dx, dy, sx, sy, height,
+- width, line_length, area);
+ else
+- copyarea_foreward_8bpp(info, dx, dy, sx, sy, height,
+- width, line_length);
++ copyarea_8bpp(info, dx, dy, sx, sy, height,
++ width, line_length, area);
+ }
+
+
+diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
+index 40788c925d1c..73705aff53cb 100644
+--- a/drivers/w1/w1_netlink.c
++++ b/drivers/w1/w1_netlink.c
+@@ -54,28 +54,29 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
+ struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
+ struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
+ int avail;
++ u64 *data;
+
+ /* update kernel slave list */
+ w1_slave_found(dev, rn);
+
+ avail = dev->priv_size - cmd->len;
+
+- if (avail > 8) {
+- u64 *data = (void *)(cmd + 1) + cmd->len;
++ if (avail < 8) {
++ msg->ack++;
++ cn_netlink_send(msg, 0, GFP_KERNEL);
+
+- *data = rn;
+- cmd->len += 8;
+- hdr->len += 8;
+- msg->len += 8;
+- return;
++ msg->len = sizeof(struct w1_netlink_msg) +
++ sizeof(struct w1_netlink_cmd);
++ hdr->len = sizeof(struct w1_netlink_cmd);
++ cmd->len = 0;
+ }
+
+- msg->ack++;
+- cn_netlink_send(msg, 0, GFP_KERNEL);
++ data = (void *)(cmd + 1) + cmd->len;
+
+- msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
+- hdr->len = sizeof(struct w1_netlink_cmd);
+- cmd->len = 0;
++ *data = rn;
++ cmd->len += 8;
++ hdr->len += 8;
++ msg->len += 8;
+ }
+
+ static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
+diff --git a/fs/aio.c b/fs/aio.c
+index 062a5f6a1448..12a3de0ee6da 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -52,7 +52,8 @@
+ struct aio_ring {
+ unsigned id; /* kernel internal index number */
+ unsigned nr; /* number of io_events */
+- unsigned head;
++ unsigned head; /* Written to by userland or under ring_lock
++ * mutex by aio_read_events_ring(). */
+ unsigned tail;
+
+ unsigned magic;
+@@ -243,6 +244,11 @@ static void aio_free_ring(struct kioctx *ctx)
+ {
+ int i;
+
++ /* Disconnect the kiotx from the ring file. This prevents future
++ * accesses to the kioctx from page migration.
++ */
++ put_aio_ring_file(ctx);
++
+ for (i = 0; i < ctx->nr_pages; i++) {
+ struct page *page;
+ pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
+@@ -254,8 +260,6 @@ static void aio_free_ring(struct kioctx *ctx)
+ put_page(page);
+ }
+
+- put_aio_ring_file(ctx);
+-
+ if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
+ kfree(ctx->ring_pages);
+ ctx->ring_pages = NULL;
+@@ -283,29 +287,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
+ {
+ struct kioctx *ctx;
+ unsigned long flags;
++ pgoff_t idx;
+ int rc;
+
+ rc = 0;
+
+- /* Make sure the old page hasn't already been changed */
++ /* mapping->private_lock here protects against the kioctx teardown. */
+ spin_lock(&mapping->private_lock);
+ ctx = mapping->private_data;
+- if (ctx) {
+- pgoff_t idx;
+- spin_lock_irqsave(&ctx->completion_lock, flags);
+- idx = old->index;
+- if (idx < (pgoff_t)ctx->nr_pages) {
+- if (ctx->ring_pages[idx] != old)
+- rc = -EAGAIN;
+- } else
+- rc = -EINVAL;
+- spin_unlock_irqrestore(&ctx->completion_lock, flags);
++ if (!ctx) {
++ rc = -EINVAL;
++ goto out;
++ }
++
++ /* The ring_lock mutex. The prevents aio_read_events() from writing
++ * to the ring's head, and prevents page migration from mucking in
++ * a partially initialized kiotx.
++ */
++ if (!mutex_trylock(&ctx->ring_lock)) {
++ rc = -EAGAIN;
++ goto out;
++ }
++
++ idx = old->index;
++ if (idx < (pgoff_t)ctx->nr_pages) {
++ /* Make sure the old page hasn't already been changed */
++ if (ctx->ring_pages[idx] != old)
++ rc = -EAGAIN;
+ } else
+ rc = -EINVAL;
+- spin_unlock(&mapping->private_lock);
+
+ if (rc != 0)
+- return rc;
++ goto out_unlock;
+
+ /* Writeback must be complete */
+ BUG_ON(PageWriteback(old));
+@@ -314,38 +327,26 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
+ rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
+ if (rc != MIGRATEPAGE_SUCCESS) {
+ put_page(new);
+- return rc;
++ goto out_unlock;
+ }
+
+- /* We can potentially race against kioctx teardown here. Use the
+- * address_space's private data lock to protect the mapping's
+- * private_data.
++ /* Take completion_lock to prevent other writes to the ring buffer
++ * while the old page is copied to the new. This prevents new
++ * events from being lost.
+ */
+- spin_lock(&mapping->private_lock);
+- ctx = mapping->private_data;
+- if (ctx) {
+- pgoff_t idx;
+- spin_lock_irqsave(&ctx->completion_lock, flags);
+- migrate_page_copy(new, old);
+- idx = old->index;
+- if (idx < (pgoff_t)ctx->nr_pages) {
+- /* And only do the move if things haven't changed */
+- if (ctx->ring_pages[idx] == old)
+- ctx->ring_pages[idx] = new;
+- else
+- rc = -EAGAIN;
+- } else
+- rc = -EINVAL;
+- spin_unlock_irqrestore(&ctx->completion_lock, flags);
+- } else
+- rc = -EBUSY;
+- spin_unlock(&mapping->private_lock);
++ spin_lock_irqsave(&ctx->completion_lock, flags);
++ migrate_page_copy(new, old);
++ BUG_ON(ctx->ring_pages[idx] != old);
++ ctx->ring_pages[idx] = new;
++ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+- if (rc == MIGRATEPAGE_SUCCESS)
+- put_page(old);
+- else
+- put_page(new);
++ /* The old page is no longer accessible. */
++ put_page(old);
+
++out_unlock:
++ mutex_unlock(&ctx->ring_lock);
++out:
++ spin_unlock(&mapping->private_lock);
+ return rc;
+ }
+ #endif
+@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+ file = aio_private_file(ctx, nr_pages);
+ if (IS_ERR(file)) {
+ ctx->aio_ring_file = NULL;
+- return -EAGAIN;
++ return -ENOMEM;
+ }
+
+ ctx->aio_ring_file = file;
+@@ -415,7 +416,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+
+ if (unlikely(i != nr_pages)) {
+ aio_free_ring(ctx);
+- return -EAGAIN;
++ return -ENOMEM;
+ }
+
+ ctx->mmap_size = nr_pages * PAGE_SIZE;
+@@ -429,7 +430,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+ if (IS_ERR((void *)ctx->mmap_base)) {
+ ctx->mmap_size = 0;
+ aio_free_ring(ctx);
+- return -EAGAIN;
++ return -ENOMEM;
+ }
+
+ pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
+@@ -556,6 +557,10 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+ rcu_read_unlock();
+ spin_unlock(&mm->ioctx_lock);
+
++ /* While kioctx setup is in progress,
++ * we are protected from page migration
++ * changes ring_pages by ->ring_lock.
++ */
+ ring = kmap_atomic(ctx->ring_pages[0]);
+ ring->id = ctx->id;
+ kunmap_atomic(ring);
+@@ -640,24 +645,28 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+
+ ctx->max_reqs = nr_events;
+
+- if (percpu_ref_init(&ctx->users, free_ioctx_users))
+- goto err;
+-
+- if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
+- goto err;
+-
+ spin_lock_init(&ctx->ctx_lock);
+ spin_lock_init(&ctx->completion_lock);
+ mutex_init(&ctx->ring_lock);
++ /* Protect against page migration throughout kiotx setup by keeping
++ * the ring_lock mutex held until setup is complete. */
++ mutex_lock(&ctx->ring_lock);
+ init_waitqueue_head(&ctx->wait);
+
+ INIT_LIST_HEAD(&ctx->active_reqs);
+
++ if (percpu_ref_init(&ctx->users, free_ioctx_users))
++ goto err;
++
++ if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
++ goto err;
++
+ ctx->cpu = alloc_percpu(struct kioctx_cpu);
+ if (!ctx->cpu)
+ goto err;
+
+- if (aio_setup_ring(ctx) < 0)
++ err = aio_setup_ring(ctx);
++ if (err < 0)
+ goto err;
+
+ atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
+@@ -683,6 +692,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+ if (err)
+ goto err_cleanup;
+
++ /* Release the ring_lock mutex now that all setup is complete. */
++ mutex_unlock(&ctx->ring_lock);
++
+ pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
+ ctx, ctx->user_id, mm, ctx->nr_events);
+ return ctx;
+@@ -692,6 +704,7 @@ err_cleanup:
+ err_ctx:
+ aio_free_ring(ctx);
+ err:
++ mutex_unlock(&ctx->ring_lock);
+ free_percpu(ctx->cpu);
+ free_percpu(ctx->reqs.pcpu_count);
+ free_percpu(ctx->users.pcpu_count);
+@@ -1024,6 +1037,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
+
+ mutex_lock(&ctx->ring_lock);
+
++ /* Access to ->ring_pages here is protected by ctx->ring_lock. */
+ ring = kmap_atomic(ctx->ring_pages[0]);
+ head = ring->head;
+ tail = ring->tail;
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index 88556dc0458e..d5abafd56a6d 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -706,9 +706,7 @@ static int lkb_idr_is_local(int id, void *p, void *data)
+ {
+ struct dlm_lkb *lkb = p;
+
+- if (!lkb->lkb_nodeid)
+- return 1;
+- return 0;
++ return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
+ }
+
+ static int lkb_idr_is_any(int id, void *p, void *data)
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index a9d2bf941066..1e25d6b57bc5 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2445,23 +2445,6 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
+ up_write(&EXT4_I(inode)->i_data_sem);
+ }
+
+-/*
+- * Update i_disksize after writeback has been started. Races with truncate
+- * are avoided by checking i_size under i_data_sem.
+- */
+-static inline void ext4_wb_update_i_disksize(struct inode *inode, loff_t newsize)
+-{
+- loff_t i_size;
+-
+- down_write(&EXT4_I(inode)->i_data_sem);
+- i_size = i_size_read(inode);
+- if (newsize > i_size)
+- newsize = i_size;
+- if (newsize > EXT4_I(inode)->i_disksize)
+- EXT4_I(inode)->i_disksize = newsize;
+- up_write(&EXT4_I(inode)->i_data_sem);
+-}
+-
+ struct ext4_group_info {
+ unsigned long bb_state;
+ struct rb_root bb_free_root;
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 3da21945ff1f..1b890101397b 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -82,7 +82,7 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
+ size_t count = iov_length(iov, nr_segs);
+ loff_t final_size = pos + count;
+
+- if (pos >= inode->i_size)
++ if (pos >= i_size_read(inode))
+ return 0;
+
+ if ((pos & blockmask) || (final_size & blockmask))
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index f173ef12c97a..ea9793d8a77f 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -515,6 +515,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ "logical block %lu\n", inode->i_ino, flags, map->m_len,
+ (unsigned long) map->m_lblk);
+
++ /* We can handle the block number less than EXT_MAX_BLOCKS */
++ if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
++ return -EIO;
++
+ /* Lookup extent status tree firstly */
+ if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
+ ext4_es_lru_add(inode);
+@@ -2229,13 +2233,23 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ return err;
+ } while (map->m_len);
+
+- /* Update on-disk size after IO is submitted */
++ /*
++ * Update on-disk size after IO is submitted. Races with
++ * truncate are avoided by checking i_size under i_data_sem.
++ */
+ disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
+ if (disksize > EXT4_I(inode)->i_disksize) {
+ int err2;
+-
+- ext4_wb_update_i_disksize(inode, disksize);
++ loff_t i_size;
++
++ down_write(&EXT4_I(inode)->i_data_sem);
++ i_size = i_size_read(inode);
++ if (disksize > i_size)
++ disksize = i_size;
++ if (disksize > EXT4_I(inode)->i_disksize)
++ EXT4_I(inode)->i_disksize = disksize;
+ err2 = ext4_mark_inode_dirty(handle, inode);
++ up_write(&EXT4_I(inode)->i_data_sem);
+ if (err2)
+ ext4_error(inode->i_sb,
+ "Failed to mark inode %lu dirty",
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index d7d0c7b46ed4..02e94ef1489b 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -307,13 +307,14 @@ static void ext4_end_bio(struct bio *bio, int error)
+ if (error) {
+ struct inode *inode = io_end->inode;
+
+- ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
++ ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
+ "(offset %llu size %ld starting block %llu)",
+- inode->i_ino,
++ error, inode->i_ino,
+ (unsigned long long) io_end->offset,
+ (long) io_end->size,
+ (unsigned long long)
+ bi_sector >> (inode->i_blkbits - 9));
++ mapping_set_error(inode->i_mapping, error);
+ }
+
+ if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 1423c4816a47..298e9c8da364 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -517,8 +517,8 @@ static void ext4_xattr_update_super_block(handle_t *handle,
+ }
+
+ /*
+- * Release the xattr block BH: If the reference count is > 1, decrement
+- * it; otherwise free the block.
++ * Release the xattr block BH: If the reference count is > 1, decrement it;
++ * otherwise free the block.
+ */
+ static void
+ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+@@ -538,16 +538,31 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ if (ce)
+ mb_cache_entry_free(ce);
+ get_bh(bh);
++ unlock_buffer(bh);
+ ext4_free_blocks(handle, inode, bh, 0, 1,
+ EXT4_FREE_BLOCKS_METADATA |
+ EXT4_FREE_BLOCKS_FORGET);
+- unlock_buffer(bh);
+ } else {
+ le32_add_cpu(&BHDR(bh)->h_refcount, -1);
+ if (ce)
+ mb_cache_entry_release(ce);
++ /*
++ * Beware of this ugliness: Releasing of xattr block references
++ * from different inodes can race and so we have to protect
++ * from a race where someone else frees the block (and releases
++ * its journal_head) before we are done dirtying the buffer. In
++ * nojournal mode this race is harmless and we actually cannot
++ * call ext4_handle_dirty_xattr_block() with locked buffer as
++ * that function can call sync_dirty_buffer() so for that case
++ * we handle the dirtying after unlocking the buffer.
++ */
++ if (ext4_handle_valid(handle))
++ error = ext4_handle_dirty_xattr_block(handle, inode,
++ bh);
+ unlock_buffer(bh);
+- error = ext4_handle_dirty_xattr_block(handle, inode, bh);
++ if (!ext4_handle_valid(handle))
++ error = ext4_handle_dirty_xattr_block(handle, inode,
++ bh);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+ dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 10d6c41aecad..6bf06a07f3e0 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -235,6 +235,7 @@ out_err:
+ if (warned++ == 0)
+ printk(KERN_WARNING
+ "lockd_up: makesock failed, error=%d\n", err);
++ svc_shutdown_net(serv, net);
+ return err;
+ }
+
+diff --git a/fs/locks.c b/fs/locks.c
+index b27a3005d78d..ad95fbd20f8a 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1359,11 +1359,10 @@ int __break_lease(struct inode *inode, unsigned int mode)
+
+ restart:
+ break_time = flock->fl_break_time;
+- if (break_time != 0) {
++ if (break_time != 0)
+ break_time -= jiffies;
+- if (break_time == 0)
+- break_time++;
+- }
++ if (break_time == 0)
++ break_time++;
+ locks_insert_block(flock, new_fl);
+ spin_unlock(&inode->i_lock);
+ error = wait_event_interruptible_timeout(new_fl->fl_wait,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index bcd42fbc04e7..40062e42c955 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1064,6 +1064,7 @@ static void nfs4_opendata_free(struct kref *kref)
+ dput(p->dentry);
+ nfs_sb_deactive(sb);
+ nfs_fattr_free_names(&p->f_attr);
++ kfree(p->f_attr.mdsthreshold);
+ kfree(p);
+ }
+
+@@ -2236,10 +2237,12 @@ static int _nfs4_do_open(struct inode *dir,
+ }
+ }
+
+- if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+- opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
+- if (!opendata->f_attr.mdsthreshold)
+- goto err_free_label;
++ if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
++ if (!opendata->f_attr.mdsthreshold) {
++ opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
++ if (!opendata->f_attr.mdsthreshold)
++ goto err_free_label;
++ }
+ opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
+ }
+ if (dentry->d_inode != NULL)
+@@ -2267,11 +2270,10 @@ static int _nfs4_do_open(struct inode *dir,
+ if (opendata->file_created)
+ *opened |= FILE_CREATED;
+
+- if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
++ if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
+ *ctx_th = opendata->f_attr.mdsthreshold;
+- else
+- kfree(opendata->f_attr.mdsthreshold);
+- opendata->f_attr.mdsthreshold = NULL;
++ opendata->f_attr.mdsthreshold = NULL;
++ }
+
+ nfs4_label_free(olabel);
+
+@@ -2281,7 +2283,6 @@ static int _nfs4_do_open(struct inode *dir,
+ err_free_label:
+ nfs4_label_free(olabel);
+ err_opendata_put:
+- kfree(opendata->f_attr.mdsthreshold);
+ nfs4_opendata_put(opendata);
+ err_put_state_owner:
+ nfs4_put_state_owner(sp);
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 7f05cd140de3..3eaa6e30a2dc 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -637,9 +637,11 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
+
+ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
+ {
++ int maxtime = max_cb_time(clp->net);
+ struct rpc_timeout timeparms = {
+- .to_initval = max_cb_time(clp->net),
++ .to_initval = maxtime,
+ .to_retries = 0,
++ .to_maxval = maxtime,
+ };
+ struct rpc_create_args args = {
+ .net = clp->net,
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 419572f33b72..b9e784486729 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1349,6 +1349,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
+ /* If op is non-idempotent */
+ if (opdesc->op_flags & OP_MODIFIES_SOMETHING) {
+ plen = opdesc->op_rsize_bop(rqstp, op);
++ /*
++ * If there's still another operation, make sure
++ * we'll have space to at least encode an error:
++ */
++ if (resp->opcnt < args->opcnt)
++ plen += COMPOUND_ERR_SLACK_SPACE;
+ op->status = nfsd4_check_resp_size(resp, plen);
+ }
+
+@@ -1513,7 +1519,8 @@ static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *o
+
+ static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+ {
+- return (op_encode_hdr_size + 2 + 1024) * sizeof(__be32);
++ return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) *
++ sizeof(__be32);
+ }
+
+ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 0874998a49cd..5cbdf38ffc66 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -5048,7 +5048,6 @@ nfs4_state_destroy_net(struct net *net)
+ int i;
+ struct nfs4_client *clp = NULL;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+- struct rb_node *node, *tmp;
+
+ for (i = 0; i < CLIENT_HASH_SIZE; i++) {
+ while (!list_empty(&nn->conf_id_hashtbl[i])) {
+@@ -5057,13 +5056,11 @@ nfs4_state_destroy_net(struct net *net)
+ }
+ }
+
+- node = rb_first(&nn->unconf_name_tree);
+- while (node != NULL) {
+- tmp = node;
+- node = rb_next(tmp);
+- clp = rb_entry(tmp, struct nfs4_client, cl_namenode);
+- rb_erase(tmp, &nn->unconf_name_tree);
+- destroy_client(clp);
++ for (i = 0; i < CLIENT_HASH_SIZE; i++) {
++ while (!list_empty(&nn->unconf_id_hashtbl[i])) {
++ clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
++ destroy_client(clp);
++ }
+ }
+
+ kfree(nn->sessionid_hashtbl);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ecc735e30bea..4ab5ff492ca1 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2502,6 +2502,8 @@ out_acl:
+ goto out;
+ }
+ if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
++ if ((buflen -= 16) < 0)
++ goto out_resource;
+ WRITE32(3);
+ WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD0);
+ WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD1);
+@@ -3510,6 +3512,9 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_test_stateid_id *stateid, *next;
+ __be32 *p;
+
++ if (nfserr)
++ return nfserr;
++
+ RESERVE_SPACE(4 + (4 * test_stateid->ts_num_ids));
+ *p++ = htonl(test_stateid->ts_num_ids);
+
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 7f555179bf81..f34d9de802ab 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -699,6 +699,11 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net)
+ if (err != 0 || fd < 0)
+ return -EINVAL;
+
++ if (svc_alien_sock(net, fd)) {
++ printk(KERN_ERR "%s: socket net is different to NFSd's one\n", __func__);
++ return -EINVAL;
++ }
++
+ err = nfsd_create_serv(net);
+ if (err != 0)
+ return err;
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 30f34ab02137..479eb681c27c 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -282,7 +282,7 @@ void nfsd_lockd_shutdown(void);
+ * reason.
+ */
+ #define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
+-#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
++#define COMPOUND_ERR_SLACK_SPACE 16 /* OP_SETATTR */
+
+ #define NFSD_LAUNDROMAT_MINTIMEOUT 1 /* seconds */
+
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 72cb28e73ca0..e9a80e4553a3 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -407,6 +407,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ umode_t ftype = 0;
+ __be32 err;
+ int host_err;
++ bool get_write_count;
+ int size_change = 0;
+
+ if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+@@ -414,10 +415,18 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ if (iap->ia_valid & ATTR_SIZE)
+ ftype = S_IFREG;
+
++ /* Callers that do fh_verify should do the fh_want_write: */
++ get_write_count = !fhp->fh_dentry;
++
+ /* Get inode */
+ err = fh_verify(rqstp, fhp, ftype, accmode);
+ if (err)
+ goto out;
++ if (get_write_count) {
++ host_err = fh_want_write(fhp);
++ if (host_err)
++ return nfserrno(host_err);
++ }
+
+ dentry = fhp->fh_dentry;
+ inode = dentry->d_inode;
+diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
+index 5d18ad10c27f..4f66e007dae1 100644
+--- a/fs/ocfs2/buffer_head_io.c
++++ b/fs/ocfs2/buffer_head_io.c
+@@ -90,7 +90,6 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
+ * information for this bh as it's not marked locally
+ * uptodate. */
+ ret = -EIO;
+- put_bh(bh);
+ mlog_errno(ret);
+ }
+
+@@ -420,7 +419,6 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
+
+ if (!buffer_uptodate(bh)) {
+ ret = -EIO;
+- put_bh(bh);
+ mlog_errno(ret);
+ }
+
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 0b5adca1b178..7b4a3fa63fab 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -537,7 +537,10 @@ master_here:
+ /* success! see if any other nodes need recovery */
+ mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
+ dlm->name, dlm->reco.dead_node, dlm->node_num);
+- dlm_reset_recovery(dlm);
++ spin_lock(&dlm->spinlock);
++ __dlm_reset_recovery(dlm);
++ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
++ spin_unlock(&dlm->spinlock);
+ }
+ dlm_end_recovery(dlm);
+
+@@ -695,6 +698,14 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
+ if (all_nodes_done) {
+ int ret;
+
++ /* Set this flag on recovery master to avoid
++ * a new recovery for another dead node start
++ * before the recovery is not done. That may
++ * cause recovery hung.*/
++ spin_lock(&dlm->spinlock);
++ dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
++ spin_unlock(&dlm->spinlock);
++
+ /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
+ * just send a finalize message to everyone and
+ * clean up */
+@@ -1750,13 +1761,13 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lockres *mres)
+ {
+ struct dlm_migratable_lock *ml;
+- struct list_head *queue;
++ struct list_head *queue, *iter;
+ struct list_head *tmpq = NULL;
+ struct dlm_lock *newlock = NULL;
+ struct dlm_lockstatus *lksb = NULL;
+ int ret = 0;
+ int i, j, bad;
+- struct dlm_lock *lock = NULL;
++ struct dlm_lock *lock;
+ u8 from = O2NM_MAX_NODES;
+ unsigned int added = 0;
+ __be64 c;
+@@ -1791,14 +1802,16 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ /* MIGRATION ONLY! */
+ BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
+
++ lock = NULL;
+ spin_lock(&res->spinlock);
+ for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
+ tmpq = dlm_list_idx_to_ptr(res, j);
+- list_for_each_entry(lock, tmpq, list) {
+- if (lock->ml.cookie != ml->cookie)
+- lock = NULL;
+- else
++ list_for_each(iter, tmpq) {
++ lock = list_entry(iter,
++ struct dlm_lock, list);
++ if (lock->ml.cookie == ml->cookie)
+ break;
++ lock = NULL;
+ }
+ if (lock)
+ break;
+@@ -2875,8 +2888,8 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
+ BUG();
+ }
+ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
++ __dlm_reset_recovery(dlm);
+ spin_unlock(&dlm->spinlock);
+- dlm_reset_recovery(dlm);
+ dlm_kick_recovery_thread(dlm);
+ break;
+ default:
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index be3f8676a438..c19c2c57650b 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -430,7 +430,6 @@ leave:
+
+ brelse(new_fe_bh);
+ brelse(parent_fe_bh);
+- kfree(si.name);
+ kfree(si.value);
+
+ ocfs2_free_dir_lookup_result(&lookup);
+@@ -1818,7 +1817,6 @@ bail:
+
+ brelse(new_fe_bh);
+ brelse(parent_fe_bh);
+- kfree(si.name);
+ kfree(si.value);
+ ocfs2_free_dir_lookup_result(&lookup);
+ if (inode_ac)
+diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
+index 1fd2051109a3..af677353a3f5 100644
+--- a/fs/reiserfs/dir.c
++++ b/fs/reiserfs/dir.c
+@@ -125,6 +125,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
+ int d_reclen;
+ char *d_name;
+ ino_t d_ino;
++ loff_t cur_pos = deh_offset(deh);
+
+ if (!de_visible(deh))
+ /* it is hidden entry */
+@@ -196,8 +197,9 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
+ if (local_buf != small_buf) {
+ kfree(local_buf);
+ }
+- // next entry should be looked for with such offset
+- next_pos = deh_offset(deh) + 1;
++
++ /* deh_offset(deh) may be invalid now. */
++ next_pos = cur_pos + 1;
+
+ if (item_moved(&tmp_ih, &path_to_entry)) {
+ set_cpu_key_k_offset(&pos_key,
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index d98503bde7e9..b6043a0c629f 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -473,15 +473,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
+ * 0 . 13 (Windows Server 2008)
+ * 1 . 1 (Windows 7)
+ * 2 . 4 (Windows 8)
++ * 3 . 0 (Windows 8 R2)
+ */
+
+ #define VERSION_WS2008 ((0 << 16) | (13))
+ #define VERSION_WIN7 ((1 << 16) | (1))
+ #define VERSION_WIN8 ((2 << 16) | (4))
++#define VERSION_WIN8_1 ((3 << 16) | (0))
++
+
+ #define VERSION_INVAL -1
+
+-#define VERSION_CURRENT VERSION_WIN8
++#define VERSION_CURRENT VERSION_WIN8_1
+
+ /* Make maximum size of pipe payload of 16K */
+ #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
+@@ -884,7 +887,7 @@ struct vmbus_channel_relid_released {
+ struct vmbus_channel_initiate_contact {
+ struct vmbus_channel_message_header header;
+ u32 vmbus_version_requested;
+- u32 padding2;
++ u32 target_vcpu; /* The VCPU the host should respond to */
+ u64 interrupt_page;
+ u64 monitor_page1;
+ u64 monitor_page2;
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index bec6dbe939a0..3fee55e73e5e 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -822,6 +822,7 @@ struct ata_port {
+ unsigned long qc_allocated;
+ unsigned int qc_active;
+ int nr_active_links; /* #links with active qcs */
++ unsigned int last_tag; /* track next tag hw expects */
+
+ struct ata_link link; /* host default link */
+ struct ata_link *slave_link; /* see ata_slave_link_init() */
+diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
+index 62fd1b756e99..947009ed5996 100644
+--- a/include/linux/sunrpc/svcsock.h
++++ b/include/linux/sunrpc/svcsock.h
+@@ -56,6 +56,7 @@ int svc_recv(struct svc_rqst *, long);
+ int svc_send(struct svc_rqst *);
+ void svc_drop(struct svc_rqst *);
+ void svc_sock_update_bufs(struct svc_serv *serv);
++bool svc_alien_sock(struct net *net, int fd);
+ int svc_addsock(struct svc_serv *serv, const int fd,
+ char *name_return, const size_t len);
+ void svc_init_xprt_sock(void);
+diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
+index 335e8a7cad39..c140620dad92 100644
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -85,6 +85,12 @@
+
+ #endif /* _NETINET_IN_H */
+
++/* Definitions for xattr.h */
++#if defined(_SYS_XATTR_H)
++#define __UAPI_DEF_XATTR 0
++#else
++#define __UAPI_DEF_XATTR 1
++#endif
+
+ /* If we did not see any headers from any supported C libraries,
+ * or we are being included in the kernel, then define everything
+@@ -98,6 +104,9 @@
+ #define __UAPI_DEF_IPV6_MREQ 1
+ #define __UAPI_DEF_IPPROTO_V6 1
+
++/* Definitions for xattr.h */
++#define __UAPI_DEF_XATTR 1
++
+ #endif /* __GLIBC__ */
+
+ #endif /* _UAPI_LIBC_COMPAT_H */
+diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
+index 437f1b0f8937..c5e2c7df1b0f 100644
+--- a/include/uapi/linux/videodev2.h
++++ b/include/uapi/linux/videodev2.h
+@@ -1059,14 +1059,14 @@ struct v4l2_bt_timings {
+
+ /* A few useful defines to calculate the total blanking and frame sizes */
+ #define V4L2_DV_BT_BLANKING_WIDTH(bt) \
+- (bt->hfrontporch + bt->hsync + bt->hbackporch)
++ ((bt)->hfrontporch + (bt)->hsync + (bt)->hbackporch)
+ #define V4L2_DV_BT_FRAME_WIDTH(bt) \
+- (bt->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
++ ((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
+ #define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
+- (bt->vfrontporch + bt->vsync + bt->vbackporch + \
+- bt->il_vfrontporch + bt->il_vsync + bt->il_vbackporch)
++ ((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
++ (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
+ #define V4L2_DV_BT_FRAME_HEIGHT(bt) \
+- (bt->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
++ ((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
+
+ /** struct v4l2_dv_timings - DV timings
+ * @type: the type of the timings
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index e4629b93bdd6..a63c14607f86 100644
+--- a/include/uapi/linux/xattr.h
++++ b/include/uapi/linux/xattr.h
+@@ -7,11 +7,18 @@
+ Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
+ Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ */
++
++#include <linux/libc-compat.h>
++
+ #ifndef _UAPI_LINUX_XATTR_H
+ #define _UAPI_LINUX_XATTR_H
+
++#ifdef __UAPI_DEF_XATTR
++#define __USE_KERNEL_XATTR_DEFS
++
+ #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
+ #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
++#endif
+
+ /* Namespaces */
+ #define XATTR_OS2_PREFIX "os2."
+diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
+index 93c5d5ecff4e..741a4269eb8f 100644
+--- a/lib/percpu_counter.c
++++ b/lib/percpu_counter.c
+@@ -166,7 +166,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
+ struct percpu_counter *fbc;
+
+ compute_batch_value();
+- if (action != CPU_DEAD)
++ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+ return NOTIFY_OK;
+
+ cpu = (unsigned long)hcpu;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index f0a4ca4cc219..efbb9dc67f2f 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1552,6 +1552,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ while (min_count < persistent_huge_pages(h)) {
+ if (!free_pool_huge_page(h, nodes_allowed, 0))
+ break;
++ cond_resched_lock(&hugetlb_lock);
+ }
+ while (count < persistent_huge_pages(h)) {
+ if (!adjust_pool_surplus(h, nodes_allowed, 1))
+diff --git a/mm/internal.h b/mm/internal.h
+index 8b6cfd63b5a5..fdddbc83ac5f 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -369,5 +369,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+ #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
+ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
+ #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
++#define ALLOC_FAIR 0x100 /* fair zone allocation */
+
+ #endif /* __MM_INTERNAL_H */
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 192e6eebe4f2..1b12dfad0794 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -79,6 +79,7 @@ void clear_page_mlock(struct page *page)
+ */
+ void mlock_vma_page(struct page *page)
+ {
++ /* Serialize with page migration */
+ BUG_ON(!PageLocked(page));
+
+ if (!TestSetPageMlocked(page)) {
+@@ -153,6 +154,7 @@ unsigned int munlock_vma_page(struct page *page)
+ {
+ unsigned int nr_pages;
+
++ /* For try_to_munlock() and to serialize with page migration */
+ BUG_ON(!PageLocked(page));
+
+ if (TestClearPageMlocked(page)) {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 6fca39097766..09459deb0b51 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1219,15 +1219,6 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ }
+ local_irq_restore(flags);
+ }
+-static bool gfp_thisnode_allocation(gfp_t gfp_mask)
+-{
+- return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
+-}
+-#else
+-static bool gfp_thisnode_allocation(gfp_t gfp_mask)
+-{
+- return false;
+-}
+ #endif
+
+ /*
+@@ -1564,12 +1555,7 @@ again:
+ get_pageblock_migratetype(page));
+ }
+
+- /*
+- * NOTE: GFP_THISNODE allocations do not partake in the kswapd
+- * aging protocol, so they can't be fair.
+- */
+- if (!gfp_thisnode_allocation(gfp_flags))
+- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
++ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+
+ __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ zone_statistics(preferred_zone, zone, gfp_flags);
+@@ -1935,23 +1921,12 @@ zonelist_scan:
+ * zone size to ensure fair page aging. The zone a
+ * page was allocated in should have no effect on the
+ * time the page has in memory before being reclaimed.
+- *
+- * Try to stay in local zones in the fastpath. If
+- * that fails, the slowpath is entered, which will do
+- * another pass starting with the local zones, but
+- * ultimately fall back to remote zones that do not
+- * partake in the fairness round-robin cycle of this
+- * zonelist.
+- *
+- * NOTE: GFP_THISNODE allocations do not partake in
+- * the kswapd aging protocol, so they can't be fair.
+ */
+- if ((alloc_flags & ALLOC_WMARK_LOW) &&
+- !gfp_thisnode_allocation(gfp_mask)) {
+- if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+- continue;
++ if (alloc_flags & ALLOC_FAIR) {
+ if (!zone_local(preferred_zone, zone))
+ continue;
++ if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
++ continue;
+ }
+ /*
+ * When allocating a page cache page for writing, we
+@@ -2399,32 +2374,40 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
+ return page;
+ }
+
+-static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
+- struct zonelist *zonelist,
+- enum zone_type high_zoneidx,
+- struct zone *preferred_zone)
++static void reset_alloc_batches(struct zonelist *zonelist,
++ enum zone_type high_zoneidx,
++ struct zone *preferred_zone)
+ {
+ struct zoneref *z;
+ struct zone *zone;
+
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+- if (!(gfp_mask & __GFP_NO_KSWAPD))
+- wakeup_kswapd(zone, order, zone_idx(preferred_zone));
+ /*
+ * Only reset the batches of zones that were actually
+- * considered in the fast path, we don't want to
+- * thrash fairness information for zones that are not
++ * considered in the fairness pass, we don't want to
++ * trash fairness information for zones that are not
+ * actually part of this zonelist's round-robin cycle.
+ */
+ if (!zone_local(preferred_zone, zone))
+ continue;
+ mod_zone_page_state(zone, NR_ALLOC_BATCH,
+- high_wmark_pages(zone) -
+- low_wmark_pages(zone) -
+- zone_page_state(zone, NR_ALLOC_BATCH));
++ high_wmark_pages(zone) - low_wmark_pages(zone) -
++ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+ }
+ }
+
++static void wake_all_kswapds(unsigned int order,
++ struct zonelist *zonelist,
++ enum zone_type high_zoneidx,
++ struct zone *preferred_zone)
++{
++ struct zoneref *z;
++ struct zone *zone;
++
++ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
++ wakeup_kswapd(zone, order, zone_idx(preferred_zone));
++}
++
+ static inline int
+ gfp_to_alloc_flags(gfp_t gfp_mask)
+ {
+@@ -2513,12 +2496,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ * allowed per node queues are empty and that nodes are
+ * over allocated.
+ */
+- if (gfp_thisnode_allocation(gfp_mask))
++ if (IS_ENABLED(CONFIG_NUMA) &&
++ (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+ goto nopage;
+
+ restart:
+- prepare_slowpath(gfp_mask, order, zonelist,
+- high_zoneidx, preferred_zone);
++ if (!(gfp_mask & __GFP_NO_KSWAPD))
++ wake_all_kswapds(order, zonelist, high_zoneidx, preferred_zone);
+
+ /*
+ * OK, we're below the kswapd watermark and have kicked background
+@@ -2695,7 +2679,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ struct page *page = NULL;
+ int migratetype = allocflags_to_migratetype(gfp_mask);
+ unsigned int cpuset_mems_cookie;
+- int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
++ int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
+ struct mem_cgroup *memcg = NULL;
+
+ gfp_mask &= gfp_allowed_mask;
+@@ -2736,12 +2720,29 @@ retry_cpuset:
+ if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+ alloc_flags |= ALLOC_CMA;
+ #endif
++retry:
+ /* First allocation attempt */
+ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+ zonelist, high_zoneidx, alloc_flags,
+ preferred_zone, migratetype);
+ if (unlikely(!page)) {
+ /*
++ * The first pass makes sure allocations are spread
++ * fairly within the local node. However, the local
++ * node might have free pages left after the fairness
++ * batches are exhausted, and remote zones haven't
++ * even been considered yet. Try once more without
++ * fairness, and include remote zones now, before
++ * entering the slowpath and waking kswapd: prefer
++ * spilling to a remote zone over swapping locally.
++ */
++ if (alloc_flags & ALLOC_FAIR) {
++ reset_alloc_batches(zonelist, high_zoneidx,
++ preferred_zone);
++ alloc_flags &= ~ALLOC_FAIR;
++ goto retry;
++ }
++ /*
+ * Runtime PM, block IO and its error handling path
+ * can deadlock because I/O on the device might not
+ * complete.
+diff --git a/mm/rmap.c b/mm/rmap.c
+index b9d2222a0ecb..6e3139835e00 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1392,9 +1392,19 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
+ BUG_ON(!page || PageAnon(page));
+
+ if (locked_vma) {
+- mlock_vma_page(page); /* no-op if already mlocked */
+- if (page == check_page)
++ if (page == check_page) {
++ /* we know we have check_page locked */
++ mlock_vma_page(page);
+ ret = SWAP_MLOCK;
++ } else if (trylock_page(page)) {
++ /*
++ * If we can lock the page, perform mlock.
++ * Otherwise leave the page alone, it will be
++ * eventually encountered again later.
++ */
++ mlock_vma_page(page);
++ unlock_page(page);
++ }
+ continue; /* don't unmap */
+ }
+
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 611abfcfb5eb..ab0fbb458c11 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1320,6 +1320,7 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
+ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
+ void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
+ __le16 fc, bool acked);
++void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata);
+ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
+
+ /* IBSS code */
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index e765f77bb97a..2c5f21c7857f 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -148,6 +148,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!rcu_access_pointer(sdata->vif.chanctx_conf))
+ continue;
++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++ continue;
+ power = min(power, sdata->vif.bss_conf.txpower);
+ }
+ rcu_read_unlock();
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 8d7f4abe65ba..e41c477c6d9f 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2881,8 +2881,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
+ bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
+ channel);
+ if (bss) {
+- ieee80211_rx_bss_put(local, bss);
+ sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
++ ieee80211_rx_bss_put(local, bss);
+ }
+ }
+
+@@ -3684,6 +3684,32 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
+ }
+
+ #ifdef CONFIG_PM
++void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
++{
++ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
++ u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
++
++ sdata_lock(sdata);
++
++ if (ifmgd->auth_data) {
++ /*
++ * If we are trying to authenticate while suspending, cfg80211
++ * won't know and won't actually abort those attempts, thus we
++ * need to do that ourselves.
++ */
++ ieee80211_send_deauth_disassoc(sdata,
++ ifmgd->auth_data->bss->bssid,
++ IEEE80211_STYPE_DEAUTH,
++ WLAN_REASON_DEAUTH_LEAVING,
++ false, frame_buf);
++ ieee80211_destroy_auth_data(sdata, false);
++ cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
++ IEEE80211_DEAUTH_FRAME_LEN);
++ }
++
++ sdata_unlock(sdata);
++}
++
+ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
+ {
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 0c2a29484c07..6fb38558a5e6 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -355,6 +355,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
+ struct ieee80211_roc_work *dep;
+
+ /* start this ROC */
++ ieee80211_offchannel_stop_vifs(local);
+
+ /* switch channel etc */
+ ieee80211_recalc_idle(local);
+diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
+index 340126204343..efb510e6f206 100644
+--- a/net/mac80211/pm.c
++++ b/net/mac80211/pm.c
+@@ -101,10 +101,18 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+
+ /* remove all interfaces that were created in the driver */
+ list_for_each_entry(sdata, &local->interfaces, list) {
+- if (!ieee80211_sdata_running(sdata) ||
+- sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+- sdata->vif.type == NL80211_IFTYPE_MONITOR)
++ if (!ieee80211_sdata_running(sdata))
+ continue;
++ switch (sdata->vif.type) {
++ case NL80211_IFTYPE_AP_VLAN:
++ case NL80211_IFTYPE_MONITOR:
++ continue;
++ case NL80211_IFTYPE_STATION:
++ ieee80211_mgd_quiesce(sdata);
++ break;
++ default:
++ break;
++ }
+
+ drv_remove_interface(local, sdata);
+ }
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index d6a47e76efff..267bc8e4b8b6 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2806,7 +2806,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
+
+- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++ if (sdata->vif.type == NL80211_IFTYPE_AP)
+ sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
+ if (!ieee80211_tx_prepare(sdata, &tx, skb))
+ break;
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 9c9caaa5e0d3..6ac0f1c3fc28 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1395,6 +1395,22 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
+ return svsk;
+ }
+
++bool svc_alien_sock(struct net *net, int fd)
++{
++ int err;
++ struct socket *sock = sockfd_lookup(fd, &err);
++ bool ret = false;
++
++ if (!sock)
++ goto out;
++ if (sock_net(sock->sk) != net)
++ ret = true;
++ sockfd_put(sock);
++out:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(svc_alien_sock);
++
+ /**
+ * svc_addsock - add a listener socket to an RPC service
+ * @serv: pointer to RPC service to which to add a new listener
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6a32c857f704..d859dd5b99a8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -944,6 +944,7 @@ static int alc_codec_rename_from_preset(struct hda_codec *codec)
+
+ static const struct snd_pci_quirk beep_white_list[] = {
+ SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
++ SND_PCI_QUIRK(0x1043, 0x115d, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x8376, "EeePC", 1),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
+@@ -3049,8 +3050,9 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
+
+ if (spec->mute_led_polarity)
+ enabled = !enabled;
+- pinval = AC_PINCTL_IN_EN |
+- (enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80);
++ pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
++ pinval &= ~AC_PINCTL_VREFEN;
++ pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
+ if (spec->mute_led_nid)
+ snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
+ }
+@@ -3480,6 +3482,15 @@ static void alc_fixup_no_shutup(struct hda_codec *codec,
+ }
+ }
+
++static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ struct alc_spec *spec = codec->spec;
++ spec->gen.auto_mute_via_amp = 1;
++ }
++}
++
+ static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -4299,6 +4310,7 @@ static int patch_alc269(struct hda_codec *codec)
+ spec->codec_variant = ALC269_TYPE_ALC284;
+ break;
+ case 0x10ec0286:
++ case 0x10ec0288:
+ spec->codec_variant = ALC269_TYPE_ALC286;
+ break;
+ case 0x10ec0255:
+@@ -4646,6 +4658,7 @@ enum {
+ ALC662_FIXUP_BASS_CHMAP,
+ ALC662_FIXUP_BASS_1A,
+ ALC662_FIXUP_BASS_1A_CHMAP,
++ ALC668_FIXUP_AUTO_MUTE,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -4806,6 +4819,12 @@ static const struct hda_fixup alc662_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+ },
++ [ALC668_FIXUP_AUTO_MUTE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_auto_mute_via_amp,
++ .chained = true,
++ .chain_id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE
++ },
+ [ALC668_FIXUP_DELL_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -4851,11 +4870,13 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+ SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+- SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+- SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE),
++ SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE),
+ SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+- SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE),
++ SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
+ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
+@@ -5095,6 +5116,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
+ { .id = 0x10ec0284, .name = "ALC284", .patch = patch_alc269 },
+ { .id = 0x10ec0286, .name = "ALC286", .patch = patch_alc269 },
++ { .id = 0x10ec0288, .name = "ALC288", .patch = patch_alc269 },
+ { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
+ { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
+ { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
+diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
+index 28ec872e54c0..b6e278fe8f67 100644
+--- a/sound/pci/ice1712/ice1712.c
++++ b/sound/pci/ice1712/ice1712.c
+@@ -685,9 +685,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pointer(struct snd_pcm_substream *
+ if (!(snd_ice1712_read(ice, ICE1712_IREG_PBK_CTRL) & 1))
+ return 0;
+ ptr = runtime->buffer_size - inw(ice->ddma_port + 4);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substream *substream)
+@@ -704,9 +705,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substrea
+ addr = ICE1712_DSC_ADDR0;
+ ptr = snd_ice1712_ds_read(ice, substream->number * 2, addr) -
+ ice->playback_con_virt_addr[substream->number];
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *substream)
+@@ -717,9 +719,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *s
+ if (!(snd_ice1712_read(ice, ICE1712_IREG_CAP_CTRL) & 1))
+ return 0;
+ ptr = inl(ICEREG(ice, CONCAP_ADDR)) - ice->capture_con_virt_addr;
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static const struct snd_pcm_hardware snd_ice1712_playback = {
+@@ -1113,9 +1116,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pro_pointer(struct snd_pcm_substre
+ if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_PLAYBACK_START))
+ return 0;
+ ptr = ice->playback_pro_size - (inw(ICEMT(ice, PLAYBACK_SIZE)) << 2);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substream *substream)
+@@ -1126,9 +1130,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substrea
+ if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_CAPTURE_START_SHADOW))
+ return 0;
+ ptr = ice->capture_pro_size - (inw(ICEMT(ice, CAPTURE_SIZE)) << 2);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static const struct snd_pcm_hardware snd_ice1712_playback_pro = {
+diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
+index 1e0fa3b5f79a..e1dfebbea650 100644
+--- a/sound/soc/codecs/cs42l51.c
++++ b/sound/soc/codecs/cs42l51.c
+@@ -124,9 +124,8 @@ static int cs42l51_set_chan_mix(struct snd_kcontrol *kcontrol,
+
+ static const DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -5150, 50, 0);
+ static const DECLARE_TLV_DB_SCALE(tone_tlv, -1050, 150, 0);
+-/* This is a lie. after -102 db, it stays at -102 */
+-/* maybe a range would be better */
+-static const DECLARE_TLV_DB_SCALE(aout_tlv, -11550, 50, 0);
++
++static const DECLARE_TLV_DB_SCALE(aout_tlv, -10200, 50, 0);
+
+ static const DECLARE_TLV_DB_SCALE(boost_tlv, 1600, 1600, 0);
+ static const char *chan_mix[] = {
+@@ -141,7 +140,7 @@ static const struct soc_enum cs42l51_chan_mix =
+ static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
+ SOC_DOUBLE_R_SX_TLV("PCM Playback Volume",
+ CS42L51_PCMA_VOL, CS42L51_PCMB_VOL,
+- 6, 0x19, 0x7F, adc_pcm_tlv),
++ 0, 0x19, 0x7F, adc_pcm_tlv),
+ SOC_DOUBLE_R("PCM Playback Switch",
+ CS42L51_PCMA_VOL, CS42L51_PCMB_VOL, 7, 1, 1),
+ SOC_DOUBLE_R_SX_TLV("Analog Playback Volume",
+@@ -149,7 +148,7 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
+ 0, 0x34, 0xE4, aout_tlv),
+ SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume",
+ CS42L51_ADCA_VOL, CS42L51_ADCB_VOL,
+- 6, 0x19, 0x7F, adc_pcm_tlv),
++ 0, 0x19, 0x7F, adc_pcm_tlv),
+ SOC_DOUBLE_R("ADC Mixer Switch",
+ CS42L51_ADCA_VOL, CS42L51_ADCB_VOL, 7, 1, 1),
+ SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0),
+diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
+index be2ba1b6fe4a..ab3ac7b1fce2 100644
+--- a/sound/soc/codecs/cs42l52.c
++++ b/sound/soc/codecs/cs42l52.c
+@@ -352,7 +352,7 @@ static const char * const right_swap_text[] = {
+ static const unsigned int swap_values[] = { 0, 1, 3 };
+
+ static const struct soc_enum adca_swap_enum =
+- SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 2, 1,
++ SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 2, 3,
+ ARRAY_SIZE(left_swap_text),
+ left_swap_text,
+ swap_values);
+@@ -361,7 +361,7 @@ static const struct snd_kcontrol_new adca_mixer =
+ SOC_DAPM_ENUM("Route", adca_swap_enum);
+
+ static const struct soc_enum pcma_swap_enum =
+- SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 6, 1,
++ SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 6, 3,
+ ARRAY_SIZE(left_swap_text),
+ left_swap_text,
+ swap_values);
+@@ -370,7 +370,7 @@ static const struct snd_kcontrol_new pcma_mixer =
+ SOC_DAPM_ENUM("Route", pcma_swap_enum);
+
+ static const struct soc_enum adcb_swap_enum =
+- SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 0, 1,
++ SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 0, 3,
+ ARRAY_SIZE(right_swap_text),
+ right_swap_text,
+ swap_values);
+@@ -379,7 +379,7 @@ static const struct snd_kcontrol_new adcb_mixer =
+ SOC_DAPM_ENUM("Route", adcb_swap_enum);
+
+ static const struct soc_enum pcmb_swap_enum =
+- SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 4, 1,
++ SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 4, 3,
+ ARRAY_SIZE(right_swap_text),
+ right_swap_text,
+ swap_values);
+diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
+index 3b20c86cdb01..eade6e2d883d 100644
+--- a/sound/soc/codecs/cs42l73.c
++++ b/sound/soc/codecs/cs42l73.c
+@@ -325,7 +325,7 @@ static const char * const cs42l73_mono_mix_texts[] = {
+ static const unsigned int cs42l73_mono_mix_values[] = { 0, 1, 2 };
+
+ static const struct soc_enum spk_asp_enum =
+- SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 6, 1,
++ SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 6, 3,
+ ARRAY_SIZE(cs42l73_mono_mix_texts),
+ cs42l73_mono_mix_texts,
+ cs42l73_mono_mix_values);
+@@ -343,7 +343,7 @@ static const struct snd_kcontrol_new spk_xsp_mixer =
+ SOC_DAPM_ENUM("Route", spk_xsp_enum);
+
+ static const struct soc_enum esl_asp_enum =
+- SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 2, 5,
++ SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 2, 3,
+ ARRAY_SIZE(cs42l73_mono_mix_texts),
+ cs42l73_mono_mix_texts,
+ cs42l73_mono_mix_values);
+@@ -352,7 +352,7 @@ static const struct snd_kcontrol_new esl_asp_mixer =
+ SOC_DAPM_ENUM("Route", esl_asp_enum);
+
+ static const struct soc_enum esl_xsp_enum =
+- SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 0, 7,
++ SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 0, 3,
+ ARRAY_SIZE(cs42l73_mono_mix_texts),
+ cs42l73_mono_mix_texts,
+ cs42l73_mono_mix_values);
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b2949aed1ac2..4136cc25154e 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -251,7 +251,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
+ static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
+ {
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
+- kfree(data->widget);
+ kfree(data->wlist);
+ kfree(data);
+ }
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index 685fc72fc751..b001dbff0f38 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -751,6 +751,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+ case 0:
+ if (!target_cpus)
+ return;
++ break;
+
+ case 1:
+ target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
diff --git a/1020_linux-3.12.21.patch b/1020_linux-3.12.21.patch
new file mode 100644
index 00000000..e47419b2
--- /dev/null
+++ b/1020_linux-3.12.21.patch
@@ -0,0 +1,4218 @@
+diff --git a/Makefile b/Makefile
+index d8adfdbe0344..e4a8804bb609 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 20
++SUBLEVEL = 21
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
+index b2c68ce139ae..a5b30c71a8d3 100644
+--- a/arch/powerpc/lib/crtsavres.S
++++ b/arch/powerpc/lib/crtsavres.S
+@@ -231,6 +231,87 @@ _GLOBAL(_rest32gpr_31_x)
+ mr 1,11
+ blr
+
++#ifdef CONFIG_ALTIVEC
++/* Called with r0 pointing just beyond the end of the vector save area. */
++
++_GLOBAL(_savevr_20)
++ li r11,-192
++ stvx vr20,r11,r0
++_GLOBAL(_savevr_21)
++ li r11,-176
++ stvx vr21,r11,r0
++_GLOBAL(_savevr_22)
++ li r11,-160
++ stvx vr22,r11,r0
++_GLOBAL(_savevr_23)
++ li r11,-144
++ stvx vr23,r11,r0
++_GLOBAL(_savevr_24)
++ li r11,-128
++ stvx vr24,r11,r0
++_GLOBAL(_savevr_25)
++ li r11,-112
++ stvx vr25,r11,r0
++_GLOBAL(_savevr_26)
++ li r11,-96
++ stvx vr26,r11,r0
++_GLOBAL(_savevr_27)
++ li r11,-80
++ stvx vr27,r11,r0
++_GLOBAL(_savevr_28)
++ li r11,-64
++ stvx vr28,r11,r0
++_GLOBAL(_savevr_29)
++ li r11,-48
++ stvx vr29,r11,r0
++_GLOBAL(_savevr_30)
++ li r11,-32
++ stvx vr30,r11,r0
++_GLOBAL(_savevr_31)
++ li r11,-16
++ stvx vr31,r11,r0
++ blr
++
++_GLOBAL(_restvr_20)
++ li r11,-192
++ lvx vr20,r11,r0
++_GLOBAL(_restvr_21)
++ li r11,-176
++ lvx vr21,r11,r0
++_GLOBAL(_restvr_22)
++ li r11,-160
++ lvx vr22,r11,r0
++_GLOBAL(_restvr_23)
++ li r11,-144
++ lvx vr23,r11,r0
++_GLOBAL(_restvr_24)
++ li r11,-128
++ lvx vr24,r11,r0
++_GLOBAL(_restvr_25)
++ li r11,-112
++ lvx vr25,r11,r0
++_GLOBAL(_restvr_26)
++ li r11,-96
++ lvx vr26,r11,r0
++_GLOBAL(_restvr_27)
++ li r11,-80
++ lvx vr27,r11,r0
++_GLOBAL(_restvr_28)
++ li r11,-64
++ lvx vr28,r11,r0
++_GLOBAL(_restvr_29)
++ li r11,-48
++ lvx vr29,r11,r0
++_GLOBAL(_restvr_30)
++ li r11,-32
++ lvx vr30,r11,r0
++_GLOBAL(_restvr_31)
++ li r11,-16
++ lvx vr31,r11,r0
++ blr
++
++#endif /* CONFIG_ALTIVEC */
++
+ #else /* CONFIG_PPC64 */
+
+ .section ".text.save.restore","ax",@progbits
+@@ -356,6 +437,111 @@ _restgpr0_31:
+ mtlr r0
+ blr
+
++#ifdef CONFIG_ALTIVEC
++/* Called with r0 pointing just beyond the end of the vector save area. */
++
++.globl _savevr_20
++_savevr_20:
++ li r12,-192
++ stvx vr20,r12,r0
++.globl _savevr_21
++_savevr_21:
++ li r12,-176
++ stvx vr21,r12,r0
++.globl _savevr_22
++_savevr_22:
++ li r12,-160
++ stvx vr22,r12,r0
++.globl _savevr_23
++_savevr_23:
++ li r12,-144
++ stvx vr23,r12,r0
++.globl _savevr_24
++_savevr_24:
++ li r12,-128
++ stvx vr24,r12,r0
++.globl _savevr_25
++_savevr_25:
++ li r12,-112
++ stvx vr25,r12,r0
++.globl _savevr_26
++_savevr_26:
++ li r12,-96
++ stvx vr26,r12,r0
++.globl _savevr_27
++_savevr_27:
++ li r12,-80
++ stvx vr27,r12,r0
++.globl _savevr_28
++_savevr_28:
++ li r12,-64
++ stvx vr28,r12,r0
++.globl _savevr_29
++_savevr_29:
++ li r12,-48
++ stvx vr29,r12,r0
++.globl _savevr_30
++_savevr_30:
++ li r12,-32
++ stvx vr30,r12,r0
++.globl _savevr_31
++_savevr_31:
++ li r12,-16
++ stvx vr31,r12,r0
++ blr
++
++.globl _restvr_20
++_restvr_20:
++ li r12,-192
++ lvx vr20,r12,r0
++.globl _restvr_21
++_restvr_21:
++ li r12,-176
++ lvx vr21,r12,r0
++.globl _restvr_22
++_restvr_22:
++ li r12,-160
++ lvx vr22,r12,r0
++.globl _restvr_23
++_restvr_23:
++ li r12,-144
++ lvx vr23,r12,r0
++.globl _restvr_24
++_restvr_24:
++ li r12,-128
++ lvx vr24,r12,r0
++.globl _restvr_25
++_restvr_25:
++ li r12,-112
++ lvx vr25,r12,r0
++.globl _restvr_26
++_restvr_26:
++ li r12,-96
++ lvx vr26,r12,r0
++.globl _restvr_27
++_restvr_27:
++ li r12,-80
++ lvx vr27,r12,r0
++.globl _restvr_28
++_restvr_28:
++ li r12,-64
++ lvx vr28,r12,r0
++.globl _restvr_29
++_restvr_29:
++ li r12,-48
++ lvx vr29,r12,r0
++.globl _restvr_30
++_restvr_30:
++ li r12,-32
++ lvx vr30,r12,r0
++.globl _restvr_31
++_restvr_31:
++ li r12,-16
++ lvx vr31,r12,r0
++ blr
++
++#endif /* CONFIG_ALTIVEC */
++
+ #endif /* CONFIG_PPC64 */
+
+ #endif
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 92af83d79c97..77046f7177d5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1073,7 +1073,6 @@ static inline u64 get_kernel_ns(void)
+ {
+ struct timespec ts;
+
+- WARN_ON(preemptible());
+ ktime_get_ts(&ts);
+ monotonic_to_bootbased(&ts);
+ return timespec_to_ns(&ts);
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index be6b86078957..ba81b546d714 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -274,7 +274,7 @@ void __init xen_init_spinlocks(void)
+ printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
+ return;
+ }
+-
++ printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
+ pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
+ pv_lock_ops.unlock_kick = xen_unlock_kick;
+ }
+@@ -290,6 +290,9 @@ static __init int xen_init_spinlocks_jump(void)
+ if (!xen_pvspin)
+ return 0;
+
++ if (!xen_domain())
++ return 0;
++
+ static_key_slow_inc(&paravirt_ticketlocks_enabled);
+ return 0;
+ }
+diff --git a/block/blk-core.c b/block/blk-core.c
+index fce4b9387f36..bf214ae98937 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2297,7 +2297,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
+ if (!req->bio)
+ return false;
+
+- trace_block_rq_complete(req->q, req);
++ trace_block_rq_complete(req->q, req, nr_bytes);
+
+ /*
+ * For fs requests, rq is just carrier of independent bio's
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index cb1db2979d3d..db60c91804c3 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2272,7 +2272,7 @@ out_partial:
+ rbd_obj_request_put(obj_request);
+ out_unwind:
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request)
+- rbd_obj_request_put(obj_request);
++ rbd_img_obj_request_del(img_request, obj_request);
+
+ return -ENOMEM;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index dd6f84bf6c22..a1e980938fef 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1530,9 +1530,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
+ /*
+ * If the device type is not TV, continue.
+ */
+- if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+- p_child->device_type != DEVICE_TYPE_TV)
++ switch (p_child->device_type) {
++ case DEVICE_TYPE_INT_TV:
++ case DEVICE_TYPE_TV:
++ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
++ break;
++ default:
+ continue;
++ }
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
+index 037786d7c1dc..ed90fbe63686 100644
+--- a/drivers/gpu/drm/qxl/qxl_ttm.c
++++ b/drivers/gpu/drm/qxl/qxl_ttm.c
+@@ -433,6 +433,7 @@ static int qxl_sync_obj_flush(void *sync_obj)
+
+ static void qxl_sync_obj_unref(void **sync_obj)
+ {
++ *sync_obj = NULL;
+ }
+
+ static void *qxl_sync_obj_ref(void *sync_obj)
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 25370ac56b4b..bcefa1de3e97 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -1092,7 +1092,7 @@ static const u32 spectre_golden_registers[] =
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x28350, 0x3f3f3fff, 0x00000082,
+- 0x28355, 0x0000003f, 0x00000000,
++ 0x28354, 0x0000003f, 0x00000000,
+ 0x3e78, 0x00000001, 0x00000002,
+ 0x913c, 0xffff03df, 0x00000004,
+ 0xc768, 0x00000008, 0x00000008,
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 0d1aa050d41d..2f7fd3ff12c0 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -745,6 +745,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
++ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 8b059eb09d9b..1b9aa982257e 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -834,14 +834,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int ret;
++ SVGA3dCmdSurfaceDMASuffix *suffix;
++ uint32_t bo_size;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
++ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
++ header->size - sizeof(*suffix));
++
++ /* Make sure device and verifier stays in sync. */
++ if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
++ DRM_ERROR("Invalid DMA suffix size.\n");
++ return -EINVAL;
++ }
++
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->dma.guest.ptr,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
++ /* Make sure DMA doesn't cross BO boundaries. */
++ bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
++ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
++ DRM_ERROR("Invalid DMA offset.\n");
++ return -EINVAL;
++ }
++
++ bo_size -= cmd->dma.guest.ptr.offset;
++ if (unlikely(suffix->maximumOffset > bo_size))
++ suffix->maximumOffset = bo_size;
++
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->dma.host.sid,
+ NULL);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index ed5ce2a41bbf..021b5227e783 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+ }
+
+ if (!vmw_kms_validate_mode_vram(vmw_priv,
+- info->fix.line_length,
++ var->xres * var->bits_per_pixel/8,
+ var->yoffset + var->yres)) {
+ DRM_ERROR("Requested geom can not fit in framebuffer\n");
+ return -EINVAL;
+@@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info)
+ struct vmw_private *vmw_priv = par->vmw_priv;
+ int ret;
+
++ info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
++
+ ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
+@@ -177,6 +179,7 @@ static int vmw_fb_set_par(struct fb_info *info)
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
++ vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index aedfe50d557a..0335f86502c2 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -718,6 +718,9 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
+ case HID_MAIN_ITEM_TAG_END_COLLECTION:
+ break;
+ case HID_MAIN_ITEM_TAG_INPUT:
++ /* ignore constant inputs, they will be ignored by hid-input */
++ if (data & HID_MAIN_ITEM_CONSTANT)
++ break;
+ for (i = 0; i < parser->local.usage_index; i++)
+ hid_scan_input_usage(parser, parser->local.usage[i]);
+ break;
+@@ -1822,8 +1825,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index aeeea796f595..ca275f47e860 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -636,6 +636,7 @@
+
+ #define USB_VENDOR_ID_NEXIO 0x1870
+ #define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
++#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110
+
+ #define USB_VENDOR_ID_NEXTWINDOW 0x1926
+ #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
+@@ -753,9 +754,11 @@
+ #define USB_VENDOR_ID_SIGMATEL 0x066F
+ #define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+
+-#define USB_VENDOR_ID_SIS2_TOUCH 0x0457
++#define USB_VENDOR_ID_SIS_TOUCH 0x0457
+ #define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
+ #define USB_DEVICE_ID_SIS817_TOUCH 0x0817
++#define USB_DEVICE_ID_SIS_TS 0x1013
++#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
+
+ #define USB_VENDOR_ID_SKYCABLE 0x1223
+ #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
+@@ -807,6 +810,9 @@
+ #define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
+ #define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
+ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
++#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
++#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
++#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+
+ #define USB_VENDOR_ID_THINGM 0x27b8
+ #define USB_DEVICE_ID_BLINK1 0x01ed
+@@ -937,7 +943,5 @@
+ #define USB_VENDOR_ID_PRIMAX 0x0461
+ #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+
+-#define USB_VENDOR_ID_SIS 0x0457
+-#define USB_DEVICE_ID_SIS_TS 0x1013
+
+ #endif
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index d83b1e8b505b..f134d73beca1 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1301,11 +1301,14 @@ static const struct hid_device_id mt_devices[] = {
+
+ /* SiS panels */
+ { .driver_data = MT_CLS_DEFAULT,
+- HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
++ HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
+ USB_DEVICE_ID_SIS9200_TOUCH) },
+ { .driver_data = MT_CLS_DEFAULT,
+- HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
++ HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
+ USB_DEVICE_ID_SIS817_TOUCH) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
++ USB_DEVICE_ID_SIS1030_TOUCH) },
+
+ /* Stantum panels */
+ { .driver_data = MT_CLS_CONFIDENCE,
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 0db9a67278ba..8e4ddb369883 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -74,6 +74,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+@@ -84,8 +85,10 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
+- { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
+- { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+@@ -114,7 +117,9 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
+- { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
+
+ { 0, 0 }
+ };
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 6edc2db428e9..66c4aee20c72 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -94,6 +94,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
+ struct pool_info *pi = data;
+ struct r1bio *r1_bio;
+ struct bio *bio;
++ int need_pages;
+ int i, j;
+
+ r1_bio = r1bio_pool_alloc(gfp_flags, pi);
+@@ -116,15 +117,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
+ * RESYNC_PAGES for each bio.
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
+- j = pi->raid_disks;
++ need_pages = pi->raid_disks;
+ else
+- j = 1;
+- while(j--) {
++ need_pages = 1;
++ for (j = 0; j < need_pages; j++) {
+ bio = r1_bio->bios[j];
+ bio->bi_vcnt = RESYNC_PAGES;
+
+ if (bio_alloc_pages(bio, gfp_flags))
+- goto out_free_bio;
++ goto out_free_pages;
+ }
+ /* If not user-requests, copy the page pointers to all bios */
+ if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
+@@ -138,6 +139,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
+
+ return r1_bio;
+
++out_free_pages:
++ while (--j >= 0) {
++ struct bio_vec *bv;
++
++ bio_for_each_segment_all(bv, r1_bio->bios[j], i)
++ __free_page(bv->bv_page);
++ }
++
+ out_free_bio:
+ while (++j < pi->raid_disks)
+ bio_put(r1_bio->bios[j]);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index dd8057d0bae7..f5a8b9c83ca6 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4623,6 +4623,7 @@ static int __init bonding_init(void)
+ out:
+ return res;
+ err:
++ bond_destroy_debugfs();
+ rtnl_link_unregister(&bond_link_ops);
+ err_link:
+ unregister_pernet_subsys(&bond_net_ops);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+index 9fbeee522d2c..32c92abf5094 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+@@ -1217,9 +1217,6 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+ ETH_VLAN_FILTER_CLASSIFY, config);
+ }
+
+-#define list_next_entry(pos, member) \
+- list_entry((pos)->member.next, typeof(*(pos)), member)
+-
+ /**
+ * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index aae7ba66e7bb..65a058967cbb 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -12197,7 +12197,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
+ if (tg3_flag(tp, MAX_RXPEND_64) &&
+ tp->rx_pending > 63)
+ tp->rx_pending = 63;
+- tp->rx_jumbo_pending = ering->rx_jumbo_pending;
++
++ if (tg3_flag(tp, JUMBO_RING_ENABLE))
++ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+ for (i = 0; i < tp->irq_max; i++)
+ tp->napi[i].tx_pending = ering->tx_pending;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+index 3e2d5047cdb3..d9303d8d9280 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+@@ -55,7 +55,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
+
+ cq->ring = ring;
+ cq->is_tx = mode;
+- spin_lock_init(&cq->lock);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
+ cq->buf_size, 2 * PAGE_SIZE);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index fa37b7a61213..35d3821bed50 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1219,15 +1219,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_cq *cq;
+- unsigned long flags;
+ int i;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+- spin_lock_irqsave(&cq->lock, flags);
+- napi_synchronize(&cq->napi);
+- mlx4_en_process_rx_cq(dev, cq, 0);
+- spin_unlock_irqrestore(&cq->lock, flags);
++ napi_schedule(&cq->napi);
+ }
+ }
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index bf06e3610d27..a47455fcfb56 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -306,7 +306,6 @@ struct mlx4_en_cq {
+ struct mlx4_cq mcq;
+ struct mlx4_hwq_resources wqres;
+ int ring;
+- spinlock_t lock;
+ struct net_device *dev;
+ struct napi_struct napi;
+ int size;
+diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
+index 9826594c8a48..65087178a0a7 100644
+--- a/drivers/net/ethernet/sfc/nic.c
++++ b/drivers/net/ethernet/sfc/nic.c
+@@ -155,13 +155,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
+ efx->net_dev->rx_cpu_rmap = NULL;
+ #endif
+
+- /* Disable MSI/MSI-X interrupts */
+- efx_for_each_channel(channel, efx)
+- free_irq(channel->irq, &efx->msi_context[channel->channel]);
+-
+- /* Disable legacy interrupt */
+- if (efx->legacy_irq)
++ if (EFX_INT_MODE_USE_MSI(efx)) {
++ /* Disable MSI/MSI-X interrupts */
++ efx_for_each_channel(channel, efx)
++ free_irq(channel->irq,
++ &efx->msi_context[channel->channel]);
++ } else {
++ /* Disable legacy interrupt */
+ free_irq(efx->legacy_irq, efx);
++ }
+ }
+
+ /* Register dump */
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 9bf46bd19b87..743aa91c853c 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -263,11 +263,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ const struct macvlan_dev *vlan = netdev_priv(dev);
+ const struct macvlan_port *port = vlan->port;
+ const struct macvlan_dev *dest;
+- __u8 ip_summed = skb->ip_summed;
+
+ if (vlan->mode == MACVLAN_MODE_BRIDGE) {
+ const struct ethhdr *eth = (void *)skb->data;
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* send to other bridge ports directly */
+ if (is_multicast_ether_addr(eth->h_dest)) {
+@@ -285,7 +283,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ xmit_world:
+- skb->ip_summed = ip_summed;
+ skb->dev = vlan->lowerdev;
+ return dev_queue_xmit(skb);
+ }
+@@ -428,8 +425,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+
+- if (change & IFF_ALLMULTI)
+- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
++ if (dev->flags & IFF_UP) {
++ if (change & IFF_ALLMULTI)
++ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
++ }
+ }
+
+ static void macvlan_set_mac_lists(struct net_device *dev)
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 5895e4dbbf2a..d0f165f2877b 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -315,6 +315,15 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+ segs = nskb;
+ }
+ } else {
++ /* If we receive a partial checksum and the tap side
++ * doesn't support checksum offload, compute the checksum.
++ * Note: it doesn't matter which checksum feature to
++ * check, we either support them all or none.
++ */
++ if (skb->ip_summed == CHECKSUM_PARTIAL &&
++ !(features & NETIF_F_ALL_CSUM) &&
++ skb_checksum_help(skb))
++ goto drop;
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ }
+
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index cc70ecfc7062..ad4a94e9ff57 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
+ if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
+ return;
+
+- spin_lock(&sl->lock);
++ spin_lock_bh(&sl->lock);
+ if (sl->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet */
+ sl->dev->stats.tx_packets++;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+- spin_unlock(&sl->lock);
++ spin_unlock_bh(&sl->lock);
+ sl_unlock(sl);
+ return;
+ }
+@@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
+ actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+- spin_unlock(&sl->lock);
++ spin_unlock_bh(&sl->lock);
+ }
+
+ static void sl_tx_timeout(struct net_device *dev)
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index 25ba7eca9a13..7cabe4583904 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
+ cdc_ncm_unbind(dev, intf);
+ }
+
++/* verify that the ethernet protocol is IPv4 or IPv6 */
++static bool is_ip_proto(__be16 proto)
++{
++ switch (proto) {
++ case htons(ETH_P_IP):
++ case htons(ETH_P_IPV6):
++ return true;
++ }
++ return false;
++}
+
+ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+ {
+@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
+ struct cdc_ncm_ctx *ctx = info->ctx;
+ __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
+ u16 tci = 0;
++ bool is_ip;
+ u8 *c;
+
+ if (!ctx)
+@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
+ if (skb->len <= ETH_HLEN)
+ goto error;
+
++ /* Some applications using e.g. packet sockets will
++ * bypass the VLAN acceleration and create tagged
++ * ethernet frames directly. We primarily look for
++ * the accelerated out-of-band tag, but fall back if
++ * required
++ */
++ skb_reset_mac_header(skb);
++ if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
++ __vlan_get_tag(skb, &tci) == 0) {
++ is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
++ skb_pull(skb, VLAN_ETH_HLEN);
++ } else {
++ is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
++ skb_pull(skb, ETH_HLEN);
++ }
++
+ /* mapping VLANs to MBIM sessions:
+ * no tag => IPS session <0>
+ * 1 - 255 => IPS session <vlanid>
+ * 256 - 511 => DSS session <vlanid - 256>
+ * 512 - 4095 => unsupported, drop
+ */
+- vlan_get_tag(skb, &tci);
+-
+ switch (tci & 0x0f00) {
+ case 0x0000: /* VLAN ID 0 - 255 */
+- /* verify that datagram is IPv4 or IPv6 */
+- skb_reset_mac_header(skb);
+- switch (eth_hdr(skb)->h_proto) {
+- case htons(ETH_P_IP):
+- case htons(ETH_P_IPV6):
+- break;
+- default:
++ if (!is_ip)
+ goto error;
+- }
+ c = (u8 *)&sign;
+ c[3] = tci;
+ break;
+@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
+ "unsupported tci=0x%04x\n", tci);
+ goto error;
+ }
+- skb_pull(skb, ETH_HLEN);
+ }
+
+ spin_lock_bh(&ctx->mtx);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 558469fda3b7..dca474319c8a 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -649,6 +649,22 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
+ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
++ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
++ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
++ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
++ {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */
++ {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */
++ {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */
++ {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */
++ {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */
++ {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */
++ {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */
++ {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */
++ {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */
++ {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */
++ {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */
++ {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */
++ {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */
+ {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
+ {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
+ {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
+@@ -699,6 +715,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
++ {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
+ {QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
+ {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
+ {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
+@@ -709,14 +726,28 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
++ {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
++ {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
++ {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
+ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
++ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
++ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
+ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
+ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
++ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
+ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
+ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
++ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
++ {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
+ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
++ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
++ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
++ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
++ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
++ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
++ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+
+ /* 4. Gobi 1000 devices */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
+index dfffd0f37916..a70692779a16 100644
+--- a/drivers/scsi/megaraid/megaraid_mm.c
++++ b/drivers/scsi/megaraid/megaraid_mm.c
+@@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
+
+ pthru32->dataxferaddr = kioc->buf_paddr;
+ if (kioc->data_dir & UIOC_WR) {
++ if (pthru32->dataxferlen > kioc->xferlen)
++ return -EINVAL;
+ if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
+ pthru32->dataxferlen)) {
+ return (-EFAULT);
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 307a81137607..4109530e92a0 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ unsigned long flags;
+
++ starget->state = STARGET_DEL;
+ transport_destroy_device(dev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->hostt->target_destroy)
+@@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
+ }
+
+ /**
++ * scsi_target_reap_ref_release - remove target from visibility
++ * @kref: the reap_ref in the target being released
++ *
++ * Called on last put of reap_ref, which is the indication that no device
++ * under this target is visible anymore, so render the target invisible in
++ * sysfs. Note: we have to be in user context here because the target reaps
++ * should be done in places where the scsi device visibility is being removed.
++ */
++static void scsi_target_reap_ref_release(struct kref *kref)
++{
++ struct scsi_target *starget
++ = container_of(kref, struct scsi_target, reap_ref);
++
++ /*
++ * if we get here and the target is still in the CREATED state that
++ * means it was allocated but never made visible (because a scan
++ * turned up no LUNs), so don't call device_del() on it.
++ */
++ if (starget->state != STARGET_CREATED) {
++ transport_remove_device(&starget->dev);
++ device_del(&starget->dev);
++ }
++ scsi_target_destroy(starget);
++}
++
++static void scsi_target_reap_ref_put(struct scsi_target *starget)
++{
++ kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
++}
++
++/**
+ * scsi_alloc_target - allocate a new or find an existing target
+ * @parent: parent of the target (need not be a scsi host)
+ * @channel: target channel number (zero if no channels)
+@@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
+ + shost->transportt->target_size;
+ struct scsi_target *starget;
+ struct scsi_target *found_target;
+- int error;
++ int error, ref_got;
+
+ starget = kzalloc(size, GFP_KERNEL);
+ if (!starget) {
+@@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
+ }
+ dev = &starget->dev;
+ device_initialize(dev);
+- starget->reap_ref = 1;
++ kref_init(&starget->reap_ref);
+ dev->parent = get_device(parent);
+ dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
+ dev->bus = &scsi_bus_type;
+@@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
+ return starget;
+
+ found:
+- found_target->reap_ref++;
++ /*
++ * release routine already fired if kref is zero, so if we can still
++ * take the reference, the target must be alive. If we can't, it must
++ * be dying and we need to wait for a new target
++ */
++ ref_got = kref_get_unless_zero(&found_target->reap_ref);
++
+ spin_unlock_irqrestore(shost->host_lock, flags);
+- if (found_target->state != STARGET_DEL) {
++ if (ref_got) {
+ put_device(dev);
+ return found_target;
+ }
+- /* Unfortunately, we found a dying target; need to
+- * wait until it's dead before we can get a new one */
++ /*
++ * Unfortunately, we found a dying target; need to wait until it's
++ * dead before we can get a new one. There is an anomaly here. We
++ * *should* call scsi_target_reap() to balance the kref_get() of the
++ * reap_ref above. However, since the target being released, it's
++ * already invisible and the reap_ref is irrelevant. If we call
++ * scsi_target_reap() we might spuriously do another device_del() on
++ * an already invisible target.
++ */
+ put_device(&found_target->dev);
+- flush_scheduled_work();
++ /*
++ * length of time is irrelevant here, we just want to yield the CPU
++ * for a tick to avoid busy waiting for the target to die.
++ */
++ msleep(1);
+ goto retry;
+ }
+
+-static void scsi_target_reap_usercontext(struct work_struct *work)
+-{
+- struct scsi_target *starget =
+- container_of(work, struct scsi_target, ew.work);
+-
+- transport_remove_device(&starget->dev);
+- device_del(&starget->dev);
+- scsi_target_destroy(starget);
+-}
+-
+ /**
+ * scsi_target_reap - check to see if target is in use and destroy if not
+ * @starget: target to be checked
+@@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
+ */
+ void scsi_target_reap(struct scsi_target *starget)
+ {
+- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+- unsigned long flags;
+- enum scsi_target_state state;
+- int empty = 0;
+-
+- spin_lock_irqsave(shost->host_lock, flags);
+- state = starget->state;
+- if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
+- empty = 1;
+- starget->state = STARGET_DEL;
+- }
+- spin_unlock_irqrestore(shost->host_lock, flags);
+-
+- if (!empty)
+- return;
+-
+- BUG_ON(state == STARGET_DEL);
+- if (state == STARGET_CREATED)
+- scsi_target_destroy(starget);
+- else
+- execute_in_process_context(scsi_target_reap_usercontext,
+- &starget->ew);
++ /*
++ * serious problem if this triggers: STARGET_DEL is only set in the if
++ * the reap_ref drops to zero, so we're trying to do another final put
++ * on an already released kref
++ */
++ BUG_ON(starget->state == STARGET_DEL);
++ scsi_target_reap_ref_put(starget);
+ }
+
+ /**
+@@ -1532,6 +1556,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
+ }
+ mutex_unlock(&shost->scan_mutex);
+ scsi_autopm_put_target(starget);
++ /*
++ * paired with scsi_alloc_target(). Target will be destroyed unless
++ * scsi_probe_and_add_lun made an underlying device visible
++ */
+ scsi_target_reap(starget);
+ put_device(&starget->dev);
+
+@@ -1612,8 +1640,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
+
+ out_reap:
+ scsi_autopm_put_target(starget);
+- /* now determine if the target has any children at all
+- * and if not, nuke it */
++ /*
++ * paired with scsi_alloc_target(): determine if the target has
++ * any children at all and if not, nuke it
++ */
+ scsi_target_reap(starget);
+
+ put_device(&starget->dev);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 40c639491b27..dfb007c95b98 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -332,17 +332,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
+ {
+ struct scsi_device *sdev;
+ struct device *parent;
+- struct scsi_target *starget;
+ struct list_head *this, *tmp;
+ unsigned long flags;
+
+ sdev = container_of(work, struct scsi_device, ew.work);
+
+ parent = sdev->sdev_gendev.parent;
+- starget = to_scsi_target(parent);
+
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+- starget->reap_ref++;
+ list_del(&sdev->siblings);
+ list_del(&sdev->same_target_siblings);
+ list_del(&sdev->starved_entry);
+@@ -362,8 +359,6 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
+ /* NULL queue means the device can't be used */
+ sdev->request_queue = NULL;
+
+- scsi_target_reap(scsi_target(sdev));
+-
+ kfree(sdev->inquiry);
+ kfree(sdev);
+
+@@ -1018,6 +1013,13 @@ void __scsi_remove_device(struct scsi_device *sdev)
+ sdev->host->hostt->slave_destroy(sdev);
+ transport_destroy_device(dev);
+
++ /*
++ * Paired with the kref_get() in scsi_sysfs_initialize(). We have
++ * remoed sysfs visibility from the device, so make the target
++ * invisible if this was the last device underneath it.
++ */
++ scsi_target_reap(scsi_target(sdev));
++
+ put_device(dev);
+ }
+
+@@ -1080,7 +1082,7 @@ void scsi_remove_target(struct device *dev)
+ continue;
+ if (starget->dev.parent == dev || &starget->dev == dev) {
+ /* assuming new targets arrive at the end */
+- starget->reap_ref++;
++ kref_get(&starget->reap_ref);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ if (last)
+ scsi_target_reap(last);
+@@ -1164,6 +1166,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
+ list_add_tail(&sdev->same_target_siblings, &starget->devices);
+ list_add_tail(&sdev->siblings, &shost->__devices);
+ spin_unlock_irqrestore(shost->host_lock, flags);
++ /*
++ * device can now only be removed via __scsi_remove_device() so hold
++ * the target. Target will be held in CREATED state until something
++ * beneath it becomes visible (in which case it moves to RUNNING)
++ */
++ kref_get(&starget->reap_ref);
+ }
+
+ int scsi_is_sdev_device(const struct device *dev)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e8404319ca68..0b5e381bcbe6 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1560,13 +1560,27 @@ static const struct usb_device_id acm_ids[] = {
+ },
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+- { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+- { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+- { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+- { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+- { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+- { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+- { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
++ { USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
+
+ { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
+ .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index cbecb5ff7d90..1dbfb52dbcd6 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -552,9 +552,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_dequeue_state *state)
+ {
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
++ struct xhci_virt_ep *ep = &dev->eps[ep_index];
+ struct xhci_ring *ep_ring;
+ struct xhci_generic_trb *trb;
+- struct xhci_ep_ctx *ep_ctx;
+ dma_addr_t addr;
+ u64 hw_dequeue;
+
+@@ -570,8 +570,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Finding endpoint context");
+- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+- hw_dequeue = le64_to_cpu(ep_ctx->deq);
++ /* 4.6.9 the css flag is written to the stream context for streams */
++ if (ep->ep_state & EP_HAS_STREAMS) {
++ struct xhci_stream_ctx *ctx =
++ &ep->stream_info->stream_ctx_array[stream_id];
++ hw_dequeue = le64_to_cpu(ctx->stream_ring);
++ } else {
++ struct xhci_ep_ctx *ep_ctx
++ = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
++ hw_dequeue = le64_to_cpu(ep_ctx->deq);
++ }
+
+ /* Find virtual address and segment of hardware dequeue pointer */
+ state->new_deq_seg = ep_ring->deq_seg;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 6987b535aa98..71873cafb9d3 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -104,6 +104,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
+ { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
+ { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
++ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b7f715fead15..c94be8c051c0 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -910,6 +910,39 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
+ /* Cressi Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
++ /* Brainboxes Devices */
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index e599fbfcde5f..993c93df6874 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1326,3 +1326,40 @@
+ * Manufacturer: Cressi
+ */
+ #define FTDI_CRESSI_PID 0x87d0
++
++/*
++ * Brainboxes devices
++ */
++#define BRAINBOXES_VID 0x05d1
++#define BRAINBOXES_VX_001_PID 0x1001 /* VX-001 ExpressCard 1 Port RS232 */
++#define BRAINBOXES_VX_012_PID 0x1002 /* VX-012 ExpressCard 2 Port RS232 */
++#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
++#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
++#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
++#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
++#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
++#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
++#define BRAINBOXES_US_606_3_PID 0x2003 /* US-606 6 Port RS232 Serial Port 4 and 6 */
++#define BRAINBOXES_US_701_1_PID 0x2011 /* US-701 4xRS232 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_701_2_PID 0x2012 /* US-701 4xRS422 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_279_1_PID 0x2021 /* US-279 8xRS422 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_279_2_PID 0x2022 /* US-279 8xRS422 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_279_3_PID 0x2023 /* US-279 8xRS422 1Mbaud Port 5 and 6 */
++#define BRAINBOXES_US_279_4_PID 0x2024 /* US-279 8xRS422 1Mbaud Port 7 and 8 */
++#define BRAINBOXES_US_346_1_PID 0x3011 /* US-346 4xRS422/485 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_346_2_PID 0x3012 /* US-346 4xRS422/485 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_257_PID 0x5001 /* US-257 2xRS232 1Mbaud */
++#define BRAINBOXES_US_313_PID 0x6001 /* US-313 2xRS422/485 1Mbaud */
++#define BRAINBOXES_US_357_PID 0x7001 /* US_357 1xRS232/422/485 */
++#define BRAINBOXES_US_842_1_PID 0x8001 /* US-842 8xRS422/485 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_842_2_PID 0x8002 /* US-842 8xRS422/485 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_842_3_PID 0x8003 /* US-842 8xRS422/485 1Mbaud Port 5 and 6 */
++#define BRAINBOXES_US_842_4_PID 0x8004 /* US-842 8xRS422/485 1Mbaud Port 7 and 8 */
++#define BRAINBOXES_US_160_1_PID 0x9001 /* US-160 16xRS232 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_160_2_PID 0x9002 /* US-160 16xRS232 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_160_3_PID 0x9003 /* US-160 16xRS232 1Mbaud Port 5 and 6 */
++#define BRAINBOXES_US_160_4_PID 0x9004 /* US-160 16xRS232 1Mbaud Port 7 and 8 */
++#define BRAINBOXES_US_160_5_PID 0x9005 /* US-160 16xRS232 1Mbaud Port 9 and 10 */
++#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
++#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
++#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index b7187bf32469..1c7bc5249cc1 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -29,6 +29,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/mutex.h>
+ #include <linux/serial.h>
++#include <linux/swab.h>
+ #include <linux/kfifo.h>
+ #include <linux/ioctl.h>
+ #include <linux/firmware.h>
+@@ -281,7 +282,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
+ {
+ int status = 0;
+ __u8 read_length;
+- __be16 be_start_address;
++ u16 be_start_address;
+
+ dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
+
+@@ -297,10 +298,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
+ if (read_length > 1) {
+ dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
+ }
+- be_start_address = cpu_to_be16(start_address);
++ /*
++ * NOTE: Must use swab as wIndex is sent in little-endian
++ * byte order regardless of host byte order.
++ */
++ be_start_address = swab16((u16)start_address);
+ status = ti_vread_sync(dev, UMPC_MEMORY_READ,
+ (__u16)address_type,
+- (__force __u16)be_start_address,
++ be_start_address,
+ buffer, read_length);
+
+ if (status) {
+@@ -397,7 +402,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
+ struct device *dev = &serial->serial->dev->dev;
+ int status = 0;
+ int write_length;
+- __be16 be_start_address;
++ u16 be_start_address;
+
+ /* We can only send a maximum of 1 aligned byte page at a time */
+
+@@ -412,11 +417,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
+ __func__, start_address, write_length);
+ usb_serial_debug_data(dev, __func__, write_length, buffer);
+
+- /* Write first page */
+- be_start_address = cpu_to_be16(start_address);
++ /*
++ * Write first page.
++ *
++ * NOTE: Must use swab as wIndex is sent in little-endian byte order
++ * regardless of host byte order.
++ */
++ be_start_address = swab16((u16)start_address);
+ status = ti_vsend_sync(serial->serial->dev,
+ UMPC_MEMORY_WRITE, (__u16)address_type,
+- (__force __u16)be_start_address,
++ be_start_address,
+ buffer, write_length);
+ if (status) {
+ dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
+@@ -439,11 +449,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
+ __func__, start_address, write_length);
+ usb_serial_debug_data(dev, __func__, write_length, buffer);
+
+- /* Write next page */
+- be_start_address = cpu_to_be16(start_address);
++ /*
++ * Write next page.
++ *
++ * NOTE: Must use swab as wIndex is sent in little-endian byte
++ * order regardless of host byte order.
++ */
++ be_start_address = swab16((u16)start_address);
+ status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
+ (__u16)address_type,
+- (__force __u16)be_start_address,
++ be_start_address,
+ buffer, write_length);
+ if (status) {
+ dev_err(dev, "%s - ERROR %d\n", __func__, status);
+@@ -590,8 +605,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
+ if (rom_desc->Type == desc_type)
+ return start_address;
+
+- start_address = start_address + sizeof(struct ti_i2c_desc)
+- + rom_desc->Size;
++ start_address = start_address + sizeof(struct ti_i2c_desc) +
++ le16_to_cpu(rom_desc->Size);
+
+ } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
+
+@@ -604,7 +619,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
+ __u16 i;
+ __u8 cs = 0;
+
+- for (i = 0; i < rom_desc->Size; i++)
++ for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
+ cs = (__u8)(cs + buffer[i]);
+
+ if (cs != rom_desc->CheckSum) {
+@@ -658,7 +673,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
+ break;
+
+ if ((start_address + sizeof(struct ti_i2c_desc) +
+- rom_desc->Size) > TI_MAX_I2C_SIZE) {
++ le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
+ status = -ENODEV;
+ dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
+ break;
+@@ -673,7 +688,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
+ /* Read the descriptor data */
+ status = read_rom(serial, start_address +
+ sizeof(struct ti_i2c_desc),
+- rom_desc->Size, buffer);
++ le16_to_cpu(rom_desc->Size),
++ buffer);
+ if (status)
+ break;
+
+@@ -682,7 +698,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
+ break;
+ }
+ start_address = start_address + sizeof(struct ti_i2c_desc) +
+- rom_desc->Size;
++ le16_to_cpu(rom_desc->Size);
+
+ } while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
+ (start_address < TI_MAX_I2C_SIZE));
+@@ -721,7 +737,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
+
+ /* Read the descriptor data */
+ status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
+- rom_desc->Size, buffer);
++ le16_to_cpu(rom_desc->Size), buffer);
+ if (status)
+ goto exit;
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 68fc9fe65936..f213ee978516 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
+ #define QUALCOMM_VENDOR_ID 0x05C6
+
+ #define CMOTECH_VENDOR_ID 0x16d8
+-#define CMOTECH_PRODUCT_6008 0x6008
+-#define CMOTECH_PRODUCT_6280 0x6280
++#define CMOTECH_PRODUCT_6001 0x6001
++#define CMOTECH_PRODUCT_CMU_300 0x6002
++#define CMOTECH_PRODUCT_6003 0x6003
++#define CMOTECH_PRODUCT_6004 0x6004
++#define CMOTECH_PRODUCT_6005 0x6005
++#define CMOTECH_PRODUCT_CGU_628A 0x6006
++#define CMOTECH_PRODUCT_CHE_628S 0x6007
++#define CMOTECH_PRODUCT_CMU_301 0x6008
++#define CMOTECH_PRODUCT_CHU_628 0x6280
++#define CMOTECH_PRODUCT_CHU_628S 0x6281
++#define CMOTECH_PRODUCT_CDU_680 0x6803
++#define CMOTECH_PRODUCT_CDU_685A 0x6804
++#define CMOTECH_PRODUCT_CHU_720S 0x7001
++#define CMOTECH_PRODUCT_7002 0x7002
++#define CMOTECH_PRODUCT_CHU_629K 0x7003
++#define CMOTECH_PRODUCT_7004 0x7004
++#define CMOTECH_PRODUCT_7005 0x7005
++#define CMOTECH_PRODUCT_CGU_629 0x7006
++#define CMOTECH_PRODUCT_CHU_629S 0x700a
++#define CMOTECH_PRODUCT_CHU_720I 0x7211
++#define CMOTECH_PRODUCT_7212 0x7212
++#define CMOTECH_PRODUCT_7213 0x7213
++#define CMOTECH_PRODUCT_7251 0x7251
++#define CMOTECH_PRODUCT_7252 0x7252
++#define CMOTECH_PRODUCT_7253 0x7253
+
+ #define TELIT_VENDOR_ID 0x1bc7
+ #define TELIT_PRODUCT_UC864E 0x1003
+@@ -243,6 +266,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_DUAL 0x1005
+ #define TELIT_PRODUCT_CC864_SINGLE 0x1006
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
++#define TELIT_PRODUCT_UE910_V2 0x1012
+ #define TELIT_PRODUCT_LE920 0x1200
+
+ /* ZTE PRODUCTS */
+@@ -286,6 +310,7 @@ static void option_instat_callback(struct urb *urb);
+ #define ALCATEL_PRODUCT_X060S_X200 0x0000
+ #define ALCATEL_PRODUCT_X220_X500D 0x0017
+ #define ALCATEL_PRODUCT_L100V 0x011e
++#define ALCATEL_PRODUCT_L800MA 0x0203
+
+ #define PIRELLI_VENDOR_ID 0x1266
+ #define PIRELLI_PRODUCT_C100_1 0x1002
+@@ -348,6 +373,7 @@ static void option_instat_callback(struct urb *urb);
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
+ #define OLIVETTI_PRODUCT_OLICARD145 0xc003
+ #define OLIVETTI_PRODUCT_OLICARD200 0xc005
++#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
+
+ /* Celot products */
+ #define CELOT_VENDOR_ID 0x211f
+@@ -501,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ .reserved = BIT(1) | BIT(2),
+ };
+
++static const struct option_blacklist_info net_intf0_blacklist = {
++ .reserved = BIT(0),
++};
++
+ static const struct option_blacklist_info net_intf1_blacklist = {
+ .reserved = BIT(1),
+ };
+@@ -1034,13 +1064,53 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
+- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+@@ -1498,6 +1568,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+@@ -1543,6 +1615,9 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist
+ },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist
++ },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 968a40201e5f..7ed681a714a5 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -136,9 +136,18 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)}, /* Sierra Wireless MC73xx Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)}, /* Sierra Wireless MC73xx NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)}, /* Sierra Wireless MC73xx Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index de958c5b52e3..b6910b7ab7e2 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -291,7 +291,6 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
+- { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
+
+ { }
+ };
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 6091bd5a1f4f..52260afaa102 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1348,10 +1348,12 @@ static int usb_serial_register(struct usb_serial_driver *driver)
+ static void usb_serial_deregister(struct usb_serial_driver *device)
+ {
+ pr_info("USB Serial deregistering driver %s\n", device->description);
++
+ mutex_lock(&table_lock);
+ list_del(&device->driver_list);
+- usb_serial_bus_deregister(device);
+ mutex_unlock(&table_lock);
++
++ usb_serial_bus_deregister(device);
+ }
+
+ /**
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index 85365784040b..6fa78361be56 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -470,6 +470,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
+ int err;
+ int i;
+
++ if (!port->bulk_in_size || !port->bulk_out_size)
++ return -ENODEV;
++
+ portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
+ if (!portdata)
+ return -ENOMEM;
+@@ -477,9 +480,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
+ init_usb_anchor(&portdata->delayed);
+
+ for (i = 0; i < N_IN_URB; i++) {
+- if (!port->bulk_in_size)
+- break;
+-
+ buffer = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buffer)
+ goto bail_out_error;
+@@ -493,9 +493,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
+ }
+
+ for (i = 0; i < N_OUT_URB; i++) {
+- if (!port->bulk_out_size)
+- break;
+-
+ buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
+ if (!buffer)
+ goto bail_out_error2;
+diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
+index ba77f753649c..a78ca6a01094 100644
+--- a/drivers/video/tgafb.c
++++ b/drivers/video/tgafb.c
+@@ -188,6 +188,8 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+
+ if (var->xres_virtual != var->xres || var->yres_virtual != var->yres)
+ return -EINVAL;
++ if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len)
++ return -EINVAL;
+ if (var->nonstd)
+ return -EINVAL;
+ if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ)
+@@ -268,6 +270,7 @@ tgafb_set_par(struct fb_info *info)
+ par->yres = info->var.yres;
+ par->pll_freq = pll_freq = 1000000000 / info->var.pixclock;
+ par->bits_per_pixel = info->var.bits_per_pixel;
++ info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
+
+ tga_type = par->tga_type;
+
+@@ -1314,6 +1317,7 @@ tgafb_init_fix(struct fb_info *info)
+ int tga_bus_tc = TGA_BUS_TC(par->dev);
+ u8 tga_type = par->tga_type;
+ const char *tga_type_name = NULL;
++ unsigned memory_size;
+
+ switch (tga_type) {
+ case TGA_TYPE_8PLANE:
+@@ -1321,21 +1325,25 @@ tgafb_init_fix(struct fb_info *info)
+ tga_type_name = "Digital ZLXp-E1";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E1";
++ memory_size = 2097152;
+ break;
+ case TGA_TYPE_24PLANE:
+ if (tga_bus_pci)
+ tga_type_name = "Digital ZLXp-E2";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E2";
++ memory_size = 8388608;
+ break;
+ case TGA_TYPE_24PLUSZ:
+ if (tga_bus_pci)
+ tga_type_name = "Digital ZLXp-E3";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E3";
++ memory_size = 16777216;
+ break;
+ default:
+ tga_type_name = "Unknown";
++ memory_size = 16777216;
+ break;
+ }
+
+@@ -1347,9 +1355,8 @@ tgafb_init_fix(struct fb_info *info)
+ ? FB_VISUAL_PSEUDOCOLOR
+ : FB_VISUAL_DIRECTCOLOR);
+
+- info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
+ info->fix.smem_start = (size_t) par->tga_fb_base;
+- info->fix.smem_len = info->fix.line_length * par->yres;
++ info->fix.smem_len = memory_size;
+ info->fix.mmio_start = (size_t) par->tga_regs_base;
+ info->fix.mmio_len = 512;
+
+@@ -1473,6 +1480,9 @@ static int tgafb_register(struct device *dev)
+ modedb_tga = &modedb_tc;
+ modedbsize_tga = 1;
+ }
++
++ tgafb_init_fix(info);
++
+ ret = fb_find_mode(&info->var, info,
+ mode_option ? mode_option : mode_option_tga,
+ modedb_tga, modedbsize_tga, NULL,
+@@ -1490,7 +1500,6 @@ static int tgafb_register(struct device *dev)
+ }
+
+ tgafb_set_par(info);
+- tgafb_init_fix(info);
+
+ if (register_framebuffer(info) < 0) {
+ printk(KERN_ERR "tgafb: Could not register framebuffer\n");
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 77fc5e181077..b38bd052ce6d 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -253,6 +253,11 @@ cifs_alloc_inode(struct super_block *sb)
+ cifs_set_oplock_level(cifs_inode, 0);
+ cifs_inode->delete_pending = false;
+ cifs_inode->invalid_mapping = false;
++ clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cifs_inode->flags);
++ clear_bit(CIFS_INODE_PENDING_WRITERS, &cifs_inode->flags);
++ clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cifs_inode->flags);
++ spin_lock_init(&cifs_inode->writers_lock);
++ cifs_inode->writers = 0;
+ cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
+ cifs_inode->server_eof = 0;
+ cifs_inode->uniqueid = 0;
+@@ -731,19 +736,26 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+ {
+ struct inode *inode = file_inode(iocb->ki_filp);
++ struct cifsInodeInfo *cinode = CIFS_I(inode);
+ ssize_t written;
+ int rc;
+
++ written = cifs_get_writer(cinode);
++ if (written)
++ return written;
++
+ written = generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+ if (CIFS_CACHE_WRITE(CIFS_I(inode)))
+- return written;
++ goto out;
+
+ rc = filemap_fdatawrite(inode->i_mapping);
+ if (rc)
+ cifs_dbg(FYI, "cifs_file_aio_write: %d rc on %p inode\n",
+ rc, inode);
+
++out:
++ cifs_put_writer(cinode);
+ return written;
+ }
+
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index db95dca335ca..2f6f1ac52d3f 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -228,6 +228,8 @@ struct smb_version_operations {
+ /* verify the message */
+ int (*check_message)(char *, unsigned int);
+ bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
++ void (*downgrade_oplock)(struct TCP_Server_Info *,
++ struct cifsInodeInfo *, bool);
+ /* process transaction2 response */
+ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
+ char *, int);
+@@ -1072,6 +1074,12 @@ struct cifsInodeInfo {
+ unsigned int epoch; /* used to track lease state changes */
+ bool delete_pending; /* DELETE_ON_CLOSE is set */
+ bool invalid_mapping; /* pagecache is invalid */
++ unsigned long flags;
++#define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
++#define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
++#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
++ spinlock_t writers_lock;
++ unsigned int writers; /* Number of writers on this inode */
+ unsigned long time; /* jiffies of last update of inode */
+ u64 server_eof; /* current file size on server -- protected by i_lock */
+ u64 uniqueid; /* server inode number */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 45ccfbd8ea5f..c6bfe5b368f9 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -127,6 +127,9 @@ extern u64 cifs_UnixTimeToNT(struct timespec);
+ extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
+ int offset);
+ extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
++extern int cifs_get_writer(struct cifsInodeInfo *cinode);
++extern void cifs_put_writer(struct cifsInodeInfo *cinode);
++extern void cifs_done_oplock_break(struct cifsInodeInfo *cinode);
+ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
+ struct file_lock *flock, const unsigned int xid);
+ extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 81476e18a789..643a18491bed 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -2611,12 +2611,20 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+ ssize_t written;
+
++ written = cifs_get_writer(cinode);
++ if (written)
++ return written;
++
+ if (CIFS_CACHE_WRITE(cinode)) {
+ if (cap_unix(tcon->ses) &&
+ (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
+- && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+- return generic_file_aio_write(iocb, iov, nr_segs, pos);
+- return cifs_writev(iocb, iov, nr_segs, pos);
++ && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
++ written = generic_file_aio_write(
++ iocb, iov, nr_segs, pos);
++ goto out;
++ }
++ written = cifs_writev(iocb, iov, nr_segs, pos);
++ goto out;
+ }
+ /*
+ * For non-oplocked files in strict cache mode we need to write the data
+@@ -2636,6 +2644,8 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+ inode);
+ cinode->oplock = 0;
+ }
++out:
++ cifs_put_writer(cinode);
+ return written;
+ }
+
+@@ -3647,6 +3657,13 @@ static int cifs_launder_page(struct page *page)
+ return rc;
+ }
+
++static int
++cifs_pending_writers_wait(void *unused)
++{
++ schedule();
++ return 0;
++}
++
+ void cifs_oplock_break(struct work_struct *work)
+ {
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+@@ -3654,8 +3671,15 @@ void cifs_oplock_break(struct work_struct *work)
+ struct inode *inode = cfile->dentry->d_inode;
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++ struct TCP_Server_Info *server = tcon->ses->server;
+ int rc = 0;
+
++ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
++ cifs_pending_writers_wait, TASK_UNINTERRUPTIBLE);
++
++ server->ops->downgrade_oplock(server, cinode,
++ test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
++
+ if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
+ cifs_has_mand_locks(cinode)) {
+ cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
+@@ -3692,6 +3716,7 @@ void cifs_oplock_break(struct work_struct *work)
+ cinode);
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ }
++ cifs_done_oplock_break(cinode);
+ }
+
+ const struct address_space_operations cifs_addr_ops = {
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 138a011633fe..912a52e5e8cc 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -472,8 +472,22 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ cifs_dbg(FYI, "file id match, oplock break\n");
+ pCifsInode = CIFS_I(netfile->dentry->d_inode);
+
+- cifs_set_oplock_level(pCifsInode,
+- pSMB->OplockLevel ? OPLOCK_READ : 0);
++ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
++ &pCifsInode->flags);
++
++ /*
++ * Set flag if the server downgrades the oplock
++ * to L2 else clear.
++ */
++ if (pSMB->OplockLevel)
++ set_bit(
++ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
++ &pCifsInode->flags);
++ else
++ clear_bit(
++ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
++ &pCifsInode->flags);
++
+ queue_work(cifsiod_wq,
+ &netfile->oplock_break);
+ netfile->oplock_break_cancelled = false;
+@@ -557,6 +571,62 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
+ cinode->oplock = 0;
+ }
+
++static int
++cifs_oplock_break_wait(void *unused)
++{
++ schedule();
++ return signal_pending(current) ? -ERESTARTSYS : 0;
++}
++
++/*
++ * We wait for oplock breaks to be processed before we attempt to perform
++ * writes.
++ */
++int cifs_get_writer(struct cifsInodeInfo *cinode)
++{
++ int rc;
++
++start:
++ rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
++ cifs_oplock_break_wait, TASK_KILLABLE);
++ if (rc)
++ return rc;
++
++ spin_lock(&cinode->writers_lock);
++ if (!cinode->writers)
++ set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
++ cinode->writers++;
++ /* Check to see if we have started servicing an oplock break */
++ if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
++ cinode->writers--;
++ if (cinode->writers == 0) {
++ clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
++ wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
++ }
++ spin_unlock(&cinode->writers_lock);
++ goto start;
++ }
++ spin_unlock(&cinode->writers_lock);
++ return 0;
++}
++
++void cifs_put_writer(struct cifsInodeInfo *cinode)
++{
++ spin_lock(&cinode->writers_lock);
++ cinode->writers--;
++ if (cinode->writers == 0) {
++ clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
++ wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
++ }
++ spin_unlock(&cinode->writers_lock);
++}
++
++void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
++{
++ clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
++ wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
++}
++
+ bool
+ backup_cred(struct cifs_sb_info *cifs_sb)
+ {
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index e6ed0dc3071b..58bd01efa05b 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -372,6 +372,16 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
+ return 0;
+ }
+
++static void
++cifs_downgrade_oplock(struct TCP_Server_Info *server,
++ struct cifsInodeInfo *cinode, bool set_level2)
++{
++ if (set_level2)
++ cifs_set_oplock_level(cinode, OPLOCK_READ);
++ else
++ cifs_set_oplock_level(cinode, 0);
++}
++
+ static bool
+ cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+ char *buf, int malformed)
+@@ -957,6 +967,7 @@ struct smb_version_operations smb1_operations = {
+ .clear_stats = cifs_clear_stats,
+ .print_stats = cifs_print_stats,
+ .is_oplock_break = is_valid_oplock_break,
++ .downgrade_oplock = cifs_downgrade_oplock,
+ .check_trans2 = cifs_check_trans2,
+ .need_neg = cifs_need_neg,
+ .negotiate = cifs_negotiate,
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index fb3966265b6e..b8021fde987d 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -575,9 +575,21 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ else
+ cfile->oplock_break_cancelled = false;
+
+- server->ops->set_oplock_level(cinode,
+- rsp->OplockLevel ? SMB2_OPLOCK_LEVEL_II : 0,
+- 0, NULL);
++ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
++ &cinode->flags);
++
++ /*
++ * Set flag if the server downgrades the oplock
++ * to L2 else clear.
++ */
++ if (rsp->OplockLevel)
++ set_bit(
++ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
++ &cinode->flags);
++ else
++ clear_bit(
++ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
++ &cinode->flags);
+
+ queue_work(cifsiod_wq, &cfile->oplock_break);
+
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 027a0c6f7ca0..13e505191364 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -646,6 +646,17 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ }
+
+ static void
++smb2_downgrade_oplock(struct TCP_Server_Info *server,
++ struct cifsInodeInfo *cinode, bool set_level2)
++{
++ if (set_level2)
++ server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
++ 0, NULL);
++ else
++ server->ops->set_oplock_level(cinode, 0, 0, NULL);
++}
++
++static void
+ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
+ {
+@@ -851,6 +862,7 @@ struct smb_version_operations smb20_operations = {
+ .clear_stats = smb2_clear_stats,
+ .print_stats = smb2_print_stats,
+ .is_oplock_break = smb2_is_valid_oplock_break,
++ .downgrade_oplock = smb2_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb2_negotiate_wsize,
+@@ -922,6 +934,7 @@ struct smb_version_operations smb21_operations = {
+ .clear_stats = smb2_clear_stats,
+ .print_stats = smb2_print_stats,
+ .is_oplock_break = smb2_is_valid_oplock_break,
++ .downgrade_oplock = smb2_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb2_negotiate_wsize,
+@@ -994,6 +1007,7 @@ struct smb_version_operations smb30_operations = {
+ .print_stats = smb2_print_stats,
+ .dump_share_caps = smb2_dump_share_caps,
+ .is_oplock_break = smb2_is_valid_oplock_break,
++ .downgrade_oplock = smb2_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb2_negotiate_wsize,
+diff --git a/fs/file_table.c b/fs/file_table.c
+index e900ca518635..05e2ac19b6c4 100644
+--- a/fs/file_table.c
++++ b/fs/file_table.c
+@@ -211,10 +211,10 @@ static void drop_file_write_access(struct file *file)
+ struct dentry *dentry = file->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+
+- put_write_access(inode);
+-
+ if (special_file(inode->i_mode))
+ return;
++
++ put_write_access(inode);
+ if (file_check_writeable(file) != 0)
+ return;
+ __mnt_drop_write(mnt);
+diff --git a/fs/open.c b/fs/open.c
+index d420331ca32a..730a5870895d 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -627,23 +627,12 @@ out:
+ static inline int __get_file_write_access(struct inode *inode,
+ struct vfsmount *mnt)
+ {
+- int error;
+- error = get_write_access(inode);
++ int error = get_write_access(inode);
+ if (error)
+ return error;
+- /*
+- * Do not take mount writer counts on
+- * special files since no writes to
+- * the mount itself will occur.
+- */
+- if (!special_file(inode->i_mode)) {
+- /*
+- * Balanced in __fput()
+- */
+- error = __mnt_want_write(mnt);
+- if (error)
+- put_write_access(inode);
+- }
++ error = __mnt_want_write(mnt);
++ if (error)
++ put_write_access(inode);
+ return error;
+ }
+
+@@ -676,12 +665,11 @@ static int do_dentry_open(struct file *f,
+
+ path_get(&f->f_path);
+ inode = f->f_inode = f->f_path.dentry->d_inode;
+- if (f->f_mode & FMODE_WRITE) {
++ if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
+ error = __get_file_write_access(inode, f->f_path.mnt);
+ if (error)
+ goto cleanup_file;
+- if (!special_file(inode->i_mode))
+- file_take_write(f);
++ file_take_write(f);
+ }
+
+ f->f_mapping = inode->i_mapping;
+@@ -722,7 +710,6 @@ cleanup_all:
+ fops_put(f->f_op);
+ file_sb_list_del(f);
+ if (f->f_mode & FMODE_WRITE) {
+- put_write_access(inode);
+ if (!special_file(inode->i_mode)) {
+ /*
+ * We don't consider this a real
+@@ -730,6 +717,7 @@ cleanup_all:
+ * because it all happenend right
+ * here, so just reset the state.
+ */
++ put_write_access(inode);
+ file_reset_write(f);
+ __mnt_drop_write(f->f_path.mnt);
+ }
+diff --git a/fs/super.c b/fs/super.c
+index 0225c20f8770..d127de207376 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -845,7 +845,10 @@ void emergency_remount(void)
+
+ static DEFINE_IDA(unnamed_dev_ida);
+ static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
+-static int unnamed_dev_start = 0; /* don't bother trying below it */
++/* Many userspace utilities consider an FSID of 0 invalid.
++ * Always return at least 1 from get_anon_bdev.
++ */
++static int unnamed_dev_start = 1;
+
+ int get_anon_bdev(dev_t *p)
+ {
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index b12079afbd5f..a52136ce13ad 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -669,32 +669,47 @@ static inline int pmd_numa(pmd_t pmd)
+ #ifndef pte_mknonnuma
+ static inline pte_t pte_mknonnuma(pte_t pte)
+ {
+- pte = pte_clear_flags(pte, _PAGE_NUMA);
+- return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED);
++ pteval_t val = pte_val(pte);
++
++ val &= ~_PAGE_NUMA;
++ val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
++ return __pte(val);
+ }
+ #endif
+
+ #ifndef pmd_mknonnuma
+ static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+ {
+- pmd = pmd_clear_flags(pmd, _PAGE_NUMA);
+- return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED);
++ pmdval_t val = pmd_val(pmd);
++
++ val &= ~_PAGE_NUMA;
++ val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
++
++ return __pmd(val);
+ }
+ #endif
+
+ #ifndef pte_mknuma
+ static inline pte_t pte_mknuma(pte_t pte)
+ {
+- pte = pte_set_flags(pte, _PAGE_NUMA);
+- return pte_clear_flags(pte, _PAGE_PRESENT);
++ pteval_t val = pte_val(pte);
++
++ val &= ~_PAGE_PRESENT;
++ val |= _PAGE_NUMA;
++
++ return __pte(val);
+ }
+ #endif
+
+ #ifndef pmd_mknuma
+ static inline pmd_t pmd_mknuma(pmd_t pmd)
+ {
+- pmd = pmd_set_flags(pmd, _PAGE_NUMA);
+- return pmd_clear_flags(pmd, _PAGE_PRESENT);
++ pmdval_t val = pmd_val(pmd);
++
++ val &= ~_PAGE_PRESENT;
++ val |= _PAGE_NUMA;
++
++ return __pmd(val);
+ }
+ #endif
+ #else
+diff --git a/include/linux/list.h b/include/linux/list.h
+index f4d8a2f12a33..2ece63847001 100644
+--- a/include/linux/list.h
++++ b/include/linux/list.h
+@@ -373,6 +373,22 @@ static inline void list_splice_tail_init(struct list_head *list,
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
+ /**
++ * list_next_entry - get the next element in list
++ * @pos: the type * to cursor
++ * @member: the name of the list_struct within the struct.
++ */
++#define list_next_entry(pos, member) \
++ list_entry((pos)->member.next, typeof(*(pos)), member)
++
++/**
++ * list_prev_entry - get the prev element in list
++ * @pos: the type * to cursor
++ * @member: the name of the list_struct within the struct.
++ */
++#define list_prev_entry(pos, member) \
++ list_entry((pos)->member.prev, typeof(*(pos)), member)
++
++/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
+index f28544b2f9af..321f4ec71f6f 100644
+--- a/include/linux/rtnetlink.h
++++ b/include/linux/rtnetlink.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/mutex.h>
+ #include <linux/netdevice.h>
++#include <linux/wait.h>
+ #include <uapi/linux/rtnetlink.h>
+
+ extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
+@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
+ extern void rtnl_unlock(void);
+ extern int rtnl_trylock(void);
+ extern int rtnl_is_locked(void);
++
++extern wait_queue_head_t netdev_unregistering_wq;
++extern struct mutex net_mutex;
++
+ #ifdef CONFIG_PROVE_LOCKING
+ extern int lockdep_rtnl_is_held(void);
+ #endif /* #ifdef CONFIG_PROVE_LOCKING */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index b1e963efbde8..8594b065d3a8 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1501,6 +1501,24 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+ }
+
+
++static inline int pid_alive(const struct task_struct *p);
++static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
++{
++ pid_t pid = 0;
++
++ rcu_read_lock();
++ if (pid_alive(tsk))
++ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
++ rcu_read_unlock();
++
++ return pid;
++}
++
++static inline pid_t task_ppid_nr(const struct task_struct *tsk)
++{
++ return task_ppid_nr_ns(tsk, &init_pid_ns);
++}
++
+ static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+ {
+@@ -1540,7 +1558,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
+ */
+-static inline int pid_alive(struct task_struct *p)
++static inline int pid_alive(const struct task_struct *p)
+ {
+ return p->pids[PIDTYPE_PID].pid != NULL;
+ }
+diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
+index 54f91d35e5fd..302ab805b0bb 100644
+--- a/include/linux/sock_diag.h
++++ b/include/linux/sock_diag.h
+@@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
+ void sock_diag_save_cookie(void *sk, __u32 *cookie);
+
+ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
+-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
++int sock_diag_put_filterinfo(struct sock *sk,
+ struct sk_buff *skb, int attrtype);
+
+ #endif
+diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
+index 7d64d3609ec9..428277869400 100644
+--- a/include/net/af_vsock.h
++++ b/include/net/af_vsock.h
+@@ -155,7 +155,11 @@ struct vsock_transport {
+
+ /**** CORE ****/
+
+-int vsock_core_init(const struct vsock_transport *t);
++int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
++static inline int vsock_core_init(const struct vsock_transport *t)
++{
++ return __vsock_core_init(t, THIS_MODULE);
++}
+ void vsock_core_exit(void);
+
+ /**** UTILS ****/
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 2b786b7e3585..a210b33fc8e2 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -32,6 +32,11 @@ struct route_info {
+ #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
+ #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+
++/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
++ * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
++ */
++#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
++
+ /*
+ * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
+ * between IPV6_ADDR_PREFERENCES socket option values
+diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
+index 88a1d4060d52..88877224473f 100644
+--- a/include/net/netfilter/nf_conntrack_extend.h
++++ b/include/net/netfilter/nf_conntrack_extend.h
+@@ -47,8 +47,8 @@ enum nf_ct_ext_id {
+ /* Extensions: optional stuff which isn't permanently in struct. */
+ struct nf_ct_ext {
+ struct rcu_head rcu;
+- u8 offset[NF_CT_EXT_NUM];
+- u8 len;
++ u16 offset[NF_CT_EXT_NUM];
++ u16 len;
+ char data[0];
+ };
+
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 2174d8da0770..8b31f09dd695 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -1245,6 +1245,7 @@ struct sctp_endpoint {
+ /* SCTP-AUTH: endpoint shared keys */
+ struct list_head endpoint_shared_keys;
+ __u16 active_key_id;
++ __u8 auth_enable;
+ };
+
+ /* Recover the outter endpoint structure. */
+@@ -1273,7 +1274,8 @@ struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
+ int sctp_has_association(struct net *net, const union sctp_addr *laddr,
+ const union sctp_addr *paddr);
+
+-int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
++int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
++ const struct sctp_association *asoc,
+ sctp_cid_t, sctp_init_chunk_t *peer_init,
+ struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
+ int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index d65fbec2533d..b4f1effc9216 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -257,7 +257,7 @@ struct scsi_target {
+ struct list_head siblings;
+ struct list_head devices;
+ struct device dev;
+- unsigned int reap_ref; /* protected by the host lock */
++ struct kref reap_ref; /* last put renders target invisible */
+ unsigned int channel;
+ unsigned int id; /* target id ... replace
+ * scsi_device.id eventually */
+@@ -284,7 +284,6 @@ struct scsi_target {
+ #define SCSI_DEFAULT_TARGET_BLOCKED 3
+
+ char scsi_level;
+- struct execute_work ew;
+ enum scsi_target_state state;
+ void *hostdata; /* available to low-level driver */
+ unsigned long starget_data[0]; /* for the transport */
+diff --git a/include/trace/events/block.h b/include/trace/events/block.h
+index 4c2301d2ef1a..2aaf370c462b 100644
+--- a/include/trace/events/block.h
++++ b/include/trace/events/block.h
+@@ -132,6 +132,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
+ * block_rq_complete - block IO operation completed by device driver
+ * @q: queue containing the block operation request
+ * @rq: block operations request
++ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_complete tracepoint event indicates that some portion
+ * of operation request has been completed by the device driver. If
+@@ -139,11 +140,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
+ * do for the request. If @rq->bio is non-NULL then there is
+ * additional work required to complete the request.
+ */
+-DEFINE_EVENT(block_rq_with_error, block_rq_complete,
++TRACE_EVENT(block_rq_complete,
+
+- TP_PROTO(struct request_queue *q, struct request *rq),
++ TP_PROTO(struct request_queue *q, struct request *rq,
++ unsigned int nr_bytes),
+
+- TP_ARGS(q, rq)
++ TP_ARGS(q, rq, nr_bytes),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( int, errors )
++ __array( char, rwbs, RWBS_LEN )
++ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
++ __entry->sector = blk_rq_pos(rq);
++ __entry->nr_sector = nr_bytes >> 9;
++ __entry->errors = rq->errors;
++
++ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
++ blk_dump_cmd(__get_str(cmd), rq);
++ ),
++
++ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->rwbs, __get_str(cmd),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->errors)
+ );
+
+ DECLARE_EVENT_CLASS(block_rq,
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 6def25f1b351..50512d11a445 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1613,10 +1613,10 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
+ spin_unlock_irq(&tsk->sighand->siglock);
+
+ audit_log_format(ab,
+- " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
++ " ppid=%d pid=%d auid=%u uid=%u gid=%u"
+ " euid=%u suid=%u fsuid=%u"
+- " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
+- sys_getppid(),
++ " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
++ task_ppid_nr(tsk),
+ tsk->pid,
+ from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
+ from_kuid(&init_user_ns, cred->uid),
+@@ -1627,7 +1627,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
+ from_kgid(&init_user_ns, cred->egid),
+ from_kgid(&init_user_ns, cred->sgid),
+ from_kgid(&init_user_ns, cred->fsgid),
+- audit_get_sessionid(tsk), tty);
++ tty, audit_get_sessionid(tsk));
+
+ get_task_comm(name, tsk);
+ audit_log_format(ab, " comm=");
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 9845cb32b60a..3b79a47ddb13 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -472,7 +472,7 @@ static int audit_filter_rules(struct task_struct *tsk,
+ case AUDIT_PPID:
+ if (ctx) {
+ if (!ctx->ppid)
+- ctx->ppid = sys_getppid();
++ ctx->ppid = task_ppid_nr(tsk);
+ result = audit_comparator(ctx->ppid, f->op, f->val);
+ }
+ break;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index fea4f6cf7e90..6c318bc71be5 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2210,9 +2210,6 @@ static void __perf_event_sync_stat(struct perf_event *event,
+ perf_event_update_userpage(next_event);
+ }
+
+-#define list_next_entry(pos, member) \
+- list_entry(pos->member.next, typeof(*pos), member)
+-
+ static void perf_event_sync_stat(struct perf_event_context *ctx,
+ struct perf_event_context *next_ctx)
+ {
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index 64522ecdfe0e..271ce26faa21 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -260,7 +260,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
+ bool tick_check_replacement(struct clock_event_device *curdev,
+ struct clock_event_device *newdev)
+ {
+- if (tick_check_percpu(curdev, newdev, smp_processor_id()))
++ if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
+ return false;
+
+ return tick_check_preferred(curdev, newdev);
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 7f727b34280d..e0e5f73db5a4 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -703,6 +703,7 @@ void blk_trace_shutdown(struct request_queue *q)
+ * blk_add_trace_rq - Add a trace for a request oriented action
+ * @q: queue the io is for
+ * @rq: the source request
++ * @nr_bytes: number of completed bytes
+ * @what: the action
+ *
+ * Description:
+@@ -710,7 +711,7 @@ void blk_trace_shutdown(struct request_queue *q)
+ *
+ **/
+ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+- u32 what)
++ unsigned int nr_bytes, u32 what)
+ {
+ struct blk_trace *bt = q->blk_trace;
+
+@@ -719,11 +720,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ what |= BLK_TC_ACT(BLK_TC_PC);
+- __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
++ __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
+ what, rq->errors, rq->cmd_len, rq->cmd);
+ } else {
+ what |= BLK_TC_ACT(BLK_TC_FS);
+- __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
++ __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
+ rq->cmd_flags, what, rq->errors, 0, NULL);
+ }
+ }
+@@ -731,33 +732,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+ static void blk_add_trace_rq_abort(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_ABORT);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
+ }
+
+ static void blk_add_trace_rq_insert(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_INSERT);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
+ }
+
+ static void blk_add_trace_rq_issue(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
+ }
+
+ static void blk_add_trace_rq_requeue(void *ignore,
+ struct request_queue *q,
+ struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
+ }
+
+ static void blk_add_trace_rq_complete(void *ignore,
+ struct request_queue *q,
+- struct request *rq)
++ struct request *rq,
++ unsigned int nr_bytes)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
++ blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
+ }
+
+ /**
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 031cc5655a51..63630aef3bd3 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -641,6 +641,9 @@ static int tracepoint_module_coming(struct module *mod)
+ struct tp_module *tp_mod, *iter;
+ int ret = 0;
+
++ if (!mod->num_tracepoints)
++ return 0;
++
+ /*
+ * We skip modules that taint the kernel, especially those with different
+ * module headers (for forced load), to make sure we don't cause a crash.
+@@ -684,6 +687,9 @@ static int tracepoint_module_going(struct module *mod)
+ {
+ struct tp_module *pos;
+
++ if (!mod->num_tracepoints)
++ return 0;
++
+ mutex_lock(&tracepoints_mutex);
+ tracepoint_update_probe_range(mod->tracepoints_ptrs,
+ mod->tracepoints_ptrs + mod->num_tracepoints);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index efbb9dc67f2f..92e103b72dcb 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1177,6 +1177,7 @@ static void return_unused_surplus_pages(struct hstate *h,
+ while (nr_pages--) {
+ if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
+ break;
++ cond_resched_lock(&hugetlb_lock);
+ }
+ }
+
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index d1537dcd4df8..1b89bc7468de 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -512,10 +512,48 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
+ }
+ }
+
++static int vlan_calculate_locking_subclass(struct net_device *real_dev)
++{
++ int subclass = 0;
++
++ while (is_vlan_dev(real_dev)) {
++ subclass++;
++ real_dev = vlan_dev_priv(real_dev)->real_dev;
++ }
++
++ return subclass;
++}
++
++static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
++{
++ int err = 0, subclass;
++
++ subclass = vlan_calculate_locking_subclass(to);
++
++ spin_lock_nested(&to->addr_list_lock, subclass);
++ err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
++ if (!err)
++ __dev_set_rx_mode(to);
++ spin_unlock(&to->addr_list_lock);
++}
++
++static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
++{
++ int err = 0, subclass;
++
++ subclass = vlan_calculate_locking_subclass(to);
++
++ spin_lock_nested(&to->addr_list_lock, subclass);
++ err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
++ if (!err)
++ __dev_set_rx_mode(to);
++ spin_unlock(&to->addr_list_lock);
++}
++
+ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+ {
+- dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+- dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
++ vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
++ vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ }
+
+ /*
+@@ -624,9 +662,7 @@ static int vlan_dev_init(struct net_device *dev)
+
+ SET_NETDEV_DEVTYPE(dev, &vlan_type);
+
+- if (is_vlan_dev(real_dev))
+- subclass = 1;
+-
++ subclass = vlan_calculate_locking_subclass(dev);
+ vlan_dev_set_lockdep_class(dev, subclass);
+
+ vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
+index 1ce4b8763ef2..067996091638 100644
+--- a/net/batman-adv/gateway_client.c
++++ b/net/batman-adv/gateway_client.c
+@@ -38,8 +38,10 @@
+
+ static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
+ {
+- if (atomic_dec_and_test(&gw_node->refcount))
++ if (atomic_dec_and_test(&gw_node->refcount)) {
++ batadv_orig_node_free_ref(gw_node->orig_node);
+ kfree_rcu(gw_node, rcu);
++ }
+ }
+
+ static struct batadv_gw_node *
+@@ -344,9 +346,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
+ struct batadv_gw_node *gw_node;
+ int down, up;
+
++ if (!atomic_inc_not_zero(&orig_node->refcount))
++ return;
++
+ gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
+- if (!gw_node)
++ if (!gw_node) {
++ batadv_orig_node_free_ref(orig_node);
+ return;
++ }
+
+ INIT_HLIST_NODE(&gw_node->list);
+ gw_node->orig_node = orig_node;
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index a2fd37ec35f7..c378750602cd 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -72,7 +72,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
+ goto drop;
+
+ if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
+- goto drop;
++ goto out;
+
+ /* insert into forwarding database after filtering to avoid spoofing */
+ br = p->br;
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index f75d92e4f96b..b47b344e0963 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -446,6 +446,20 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
++static int br_dev_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[])
++{
++ struct net_bridge *br = netdev_priv(dev);
++
++ if (tb[IFLA_ADDRESS]) {
++ spin_lock_bh(&br->lock);
++ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
++ spin_unlock_bh(&br->lock);
++ }
++
++ return register_netdevice(dev);
++}
++
+ static size_t br_get_link_af_size(const struct net_device *dev)
+ {
+ struct net_port_vlans *pv;
+@@ -474,6 +488,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
+ .priv_size = sizeof(struct net_bridge),
+ .setup = br_dev_setup,
+ .validate = br_validate,
++ .newlink = br_dev_newlink,
+ .dellink = br_dev_delete,
+ };
+
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 53f0990eab58..45a26debe64e 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -201,7 +201,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+ * rejected.
+ */
+ if (!v)
+- return false;
++ goto drop;
+
+ err = br_vlan_get_tag(skb, vid);
+ if (!*vid) {
+@@ -212,7 +212,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+ * vlan untagged or priority-tagged traffic belongs to.
+ */
+ if (pvid == VLAN_N_VID)
+- return false;
++ goto drop;
+
+ /* PVID is set on this port. Any untagged or priority-tagged
+ * ingress frame is considered to belong to this vlan.
+@@ -235,7 +235,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+ /* Frame had a valid vlan tag. See if vlan is allowed */
+ if (test_bit(*vid, v->vlan_bitmap))
+ return true;
+-
++drop:
++ kfree_skb(skb);
+ return false;
+ }
+
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index ac7802428384..b166fc2ec4b9 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ if (repl->num_counters &&
+ copy_to_user(repl->counters, counterstmp,
+ repl->num_counters * sizeof(struct ebt_counter))) {
+- ret = -EFAULT;
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
+ }
+- else
+- ret = 0;
+
+ /* decrease module count and free resources */
+ EBT_ENTRY_ITERATE(table->entries, table->entries_size,
+diff --git a/net/core/dev.c b/net/core/dev.c
+index b32797590b40..01d53d62a2ec 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3959,6 +3959,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+ skb->vlan_tci = 0;
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
++ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+
+ napi->skb = skb;
+ }
+@@ -4968,6 +4969,7 @@ void __dev_set_rx_mode(struct net_device *dev)
+ if (ops->ndo_set_rx_mode)
+ ops->ndo_set_rx_mode(dev);
+ }
++EXPORT_SYMBOL(__dev_set_rx_mode);
+
+ void dev_set_rx_mode(struct net_device *dev)
+ {
+@@ -5246,7 +5248,7 @@ static int dev_new_index(struct net *net)
+
+ /* Delayed registration/unregisteration */
+ static LIST_HEAD(net_todo_list);
+-static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
++DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+
+ static void net_set_todo(struct net_device *dev)
+ {
+diff --git a/net/core/filter.c b/net/core/filter.c
+index ad30d626a5bd..ebce437678fc 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -355,6 +355,8 @@ load_b:
+
+ if (skb_is_nonlinear(skb))
+ return 0;
++ if (skb->len < sizeof(struct nlattr))
++ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+@@ -371,11 +373,13 @@ load_b:
+
+ if (skb_is_nonlinear(skb))
+ return 0;
++ if (skb->len < sizeof(struct nlattr))
++ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+ nla = (struct nlattr *)&skb->data[A];
+- if (nla->nla_len > A - skb->len)
++ if (nla->nla_len > skb->len - A)
+ return 0;
+
+ nla = nla_find_nested(nla, X);
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 81d3a9a08453..7c8ffd974961 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -24,7 +24,7 @@
+
+ static LIST_HEAD(pernet_list);
+ static struct list_head *first_device = &pernet_list;
+-static DEFINE_MUTEX(net_mutex);
++DEFINE_MUTEX(net_mutex);
+
+ LIST_HEAD(net_namespace_list);
+ EXPORT_SYMBOL_GPL(net_namespace_list);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 37b492eaa4f8..7b03d44b7be4 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
+ }
+ EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+
++/* Return with the rtnl_lock held when there are no network
++ * devices unregistering in any network namespace.
++ */
++static void rtnl_lock_unregistering_all(void)
++{
++ struct net *net;
++ bool unregistering;
++ DEFINE_WAIT(wait);
++
++ for (;;) {
++ prepare_to_wait(&netdev_unregistering_wq, &wait,
++ TASK_UNINTERRUPTIBLE);
++ unregistering = false;
++ rtnl_lock();
++ for_each_net(net) {
++ if (net->dev_unreg_count > 0) {
++ unregistering = true;
++ break;
++ }
++ }
++ if (!unregistering)
++ break;
++ __rtnl_unlock();
++ schedule();
++ }
++ finish_wait(&netdev_unregistering_wq, &wait);
++}
++
+ /**
+ * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
+ * @ops: struct rtnl_link_ops * to unregister
+ */
+ void rtnl_link_unregister(struct rtnl_link_ops *ops)
+ {
+- rtnl_lock();
++ /* Close the race with cleanup_net() */
++ mutex_lock(&net_mutex);
++ rtnl_lock_unregistering_all();
+ __rtnl_link_unregister(ops);
+ rtnl_unlock();
++ mutex_unlock(&net_mutex);
+ }
+ EXPORT_SYMBOL_GPL(rtnl_link_unregister);
+
+@@ -714,7 +745,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
+ return 0;
+ }
+
+-static size_t rtnl_port_size(const struct net_device *dev)
++static size_t rtnl_port_size(const struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+ size_t port_size = nla_total_size(4) /* PORT_VF */
+ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
+@@ -730,7 +762,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
+ size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ + port_size;
+
+- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
++ !(ext_filter_mask & RTEXT_FILTER_VF))
+ return 0;
+ if (dev_num_vf(dev->dev.parent))
+ return port_self_size + vf_ports_size +
+@@ -765,7 +798,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ + nla_total_size(ext_filter_mask
+ & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+- + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
++ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
+@@ -827,11 +860,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
+ return 0;
+ }
+
+-static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
++static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+ int err;
+
+- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
++ !(ext_filter_mask & RTEXT_FILTER_VF))
+ return 0;
+
+ err = rtnl_port_self_fill(skb, dev);
+@@ -1016,7 +1051,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ nla_nest_end(skb, vfinfo);
+ }
+
+- if (rtnl_port_fill(skb, dev))
++ if (rtnl_port_fill(skb, dev, ext_filter_mask))
+ goto nla_put_failure;
+
+ if (dev->rtnl_link_ops) {
+@@ -1070,6 +1105,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ struct hlist_head *head;
+ struct nlattr *tb[IFLA_MAX+1];
+ u32 ext_filter_mask = 0;
++ int err;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+@@ -1090,11 +1126,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+- if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+- NETLINK_CB(cb->skb).portid,
+- cb->nlh->nlmsg_seq, 0,
+- NLM_F_MULTI,
+- ext_filter_mask) <= 0)
++ err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
++ NETLINK_CB(cb->skb).portid,
++ cb->nlh->nlmsg_seq, 0,
++ NLM_F_MULTI,
++ ext_filter_mask);
++ /* If we ran out of room on the first message,
++ * we're in trouble
++ */
++ WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
++
++ if (err <= 0)
+ goto out;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 21571dc4f2df..f69f2ed1dbc3 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3535,12 +3535,14 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
+ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+ {
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+- unsigned int hdr_len;
+
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+- hdr_len = tcp_hdrlen(skb);
+- else
+- hdr_len = sizeof(struct udphdr);
+- return hdr_len + shinfo->gso_size;
++ return tcp_hdrlen(skb) + shinfo->gso_size;
++
++ /* UFO sets gso_size to the size of the fragmentation
++ * payload, i.e. the size of the L4 (UDP) header is already
++ * accounted for.
++ */
++ return shinfo->gso_size;
+ }
+ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index a0e9cf6379de..6a7fae228634 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
+ }
+ EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
+
+-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
++int sock_diag_put_filterinfo(struct sock *sk,
+ struct sk_buff *skb, int attrtype)
+ {
+ struct nlattr *attr;
+@@ -57,7 +57,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
+ unsigned int len;
+ int err = 0;
+
+- if (!ns_capable(user_ns, CAP_NET_ADMIN)) {
++ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ nla_reserve(skb, attrtype, 0);
+ return 0;
+ }
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index d5dbca5ecf62..9f1014ab86c6 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -819,13 +819,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
+ fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
+ if (fi == NULL)
+ goto failure;
++ fib_info_cnt++;
+ if (cfg->fc_mx) {
+ fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ if (!fi->fib_metrics)
+ goto failure;
+ } else
+ fi->fib_metrics = (u32 *) dst_default_metrics;
+- fib_info_cnt++;
+
+ fi->fib_net = hold_net(net);
+ fi->fib_protocol = cfg->fc_protocol;
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index 98d7e53d2afd..bd1c5baf69be 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -42,12 +42,12 @@
+ static bool ip_may_fragment(const struct sk_buff *skb)
+ {
+ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
+- !skb->local_df;
++ skb->local_df;
+ }
+
+ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+ {
+- if (skb->len <= mtu || skb->local_df)
++ if (skb->len <= mtu)
+ return false;
+
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index d30636080a11..b3becd0727db 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -463,6 +463,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
+ static void ipgre_tunnel_setup(struct net_device *dev)
+ {
+ dev->netdev_ops = &ipgre_netdev_ops;
++ dev->type = ARPHRD_IPGRE;
+ ip_tunnel_setup(dev, ipgre_net_id);
+ }
+
+@@ -501,7 +502,6 @@ static int ipgre_tunnel_init(struct net_device *dev)
+ memcpy(dev->dev_addr, &iph->saddr, 4);
+ memcpy(dev->broadcast, &iph->daddr, 4);
+
+- dev->type = ARPHRD_IPGRE;
+ dev->flags = IFF_NOARP;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->addr_len = 4;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 3bedb26cfb53..d9dbe0f78612 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -433,6 +433,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ tunnel->i_seqno = ntohl(tpi->seq) + 1;
+ }
+
++ skb_reset_network_header(skb);
++
+ err = IP_ECN_decapsulate(iph, skb);
+ if (unlikely(err)) {
+ if (log_ecn_error)
+@@ -853,6 +855,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+ */
+ if (!IS_ERR(itn->fb_tunnel_dev)) {
+ itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
++ itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
+ ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
+ }
+ rtnl_unlock();
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 26847e189c04..33e2bf806249 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -271,6 +271,7 @@ static const struct net_device_ops vti_netdev_ops = {
+ static void vti_tunnel_setup(struct net_device *dev)
+ {
+ dev->netdev_ops = &vti_netdev_ops;
++ dev->type = ARPHRD_TUNNEL;
+ ip_tunnel_setup(dev, vti_net_id);
+ }
+
+@@ -282,7 +283,6 @@ static int vti_tunnel_init(struct net_device *dev)
+ memcpy(dev->dev_addr, &iph->saddr, 4);
+ memcpy(dev->broadcast, &iph->daddr, 4);
+
+- dev->type = ARPHRD_TUNNEL;
+ dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
+ dev->mtu = ETH_DATA_LEN;
+ dev->flags = IFF_NOARP;
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 85a4f21aac1a..c8abe31961ed 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1039,8 +1039,10 @@ static int __do_replace(struct net *net, const char *name,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index d23118d95ff9..651c10774d58 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1226,8 +1226,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 2557b9a52373..2b681867164d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1525,7 +1525,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ struct in_device *out_dev;
+ unsigned int flags = 0;
+ bool do_cache;
+- u32 itag;
++ u32 itag = 0;
+
+ /* get a working reference to the output device */
+ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
+@@ -2358,7 +2358,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
+ }
+ } else
+ #endif
+- if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
++ if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
+ goto nla_put_failure;
+ }
+
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index b6ae92a51f58..894b7cea5d7b 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -408,7 +408,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+ ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
+ ratio += cnt;
+
+- ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
++ ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
+ }
+
+ /* Some calls are for duplicates without timetamps */
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 5bec666aba61..5e30677953d7 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1418,7 +1418,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
+
+ if (w->skip) {
+ w->skip--;
+- continue;
++ goto skip;
+ }
+
+ err = w->func(w);
+@@ -1428,6 +1428,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
+ w->count++;
+ continue;
+ }
++skip:
+ w->state = FWS_U;
+ case FWS_U:
+ if (fn == w->root)
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index bf4a9a084de5..994d73cc2fe0 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1554,6 +1554,15 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+ return 0;
+ }
+
++static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
++{
++ struct net *net = dev_net(dev);
++ struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
++
++ if (dev != ign->fb_tunnel_dev)
++ unregister_netdevice_queue(dev, head);
++}
++
+ static size_t ip6gre_get_size(const struct net_device *dev)
+ {
+ return
+@@ -1631,6 +1640,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+ .validate = ip6gre_tunnel_validate,
+ .newlink = ip6gre_newlink,
+ .changelink = ip6gre_changelink,
++ .dellink = ip6gre_dellink,
+ .get_size = ip6gre_get_size,
+ .fill_info = ip6gre_fill_info,
+ };
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 516e136f15ca..45010f0d1167 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -323,12 +323,16 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
+
+ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+ {
+- if (skb->len <= mtu || skb->local_df)
++ if (skb->len <= mtu)
+ return false;
+
++ /* ipv6 conntrack defrag sets max_frag_size + local_df */
+ if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
+ return true;
+
++ if (skb->local_df)
++ return false;
++
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ return false;
+
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index c1e11b5d6ccc..aac89c3c6af4 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1549,7 +1549,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
+ {
+ u8 proto;
+
+- if (!data)
++ if (!data || !data[IFLA_IPTUN_PROTO])
+ return 0;
+
+ proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 44400c216dc6..89a4e4ddd8bb 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1236,8 +1236,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 77f81beabbd3..b4bb6a29aa16 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1329,7 +1329,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu)
+- return mtu;
++ goto out;
+
+ mtu = IPV6_MIN_MTU;
+
+@@ -1339,7 +1339,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
+ mtu = idev->cnf.mtu6;
+ rcu_read_unlock();
+
+- return mtu;
++out:
++ return min_t(unsigned int, mtu, IP6_MAX_MTU);
+ }
+
+ static struct dst_entry *icmp6_dst_gc_list;
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 44441c0c5037..9a0e5874e73e 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -754,9 +754,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ session->deref = pppol2tp_session_sock_put;
+
+ /* If PMTU discovery was enabled, use the MTU that was discovered */
+- dst = sk_dst_get(sk);
++ dst = sk_dst_get(tunnel->sock);
+ if (dst != NULL) {
+- u32 pmtu = dst_mtu(__sk_dst_get(sk));
++ u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
+ if (pmtu != 0)
+ session->mtu = session->mru = pmtu -
+ PPPOL2TP_HEADER_OVERHEAD;
+diff --git a/net/packet/diag.c b/net/packet/diag.c
+index a9584a2f6d69..ec8b6e8a80b1 100644
+--- a/net/packet/diag.c
++++ b/net/packet/diag.c
+@@ -171,7 +171,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ goto out_nlmsg_trim;
+
+ if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
+- sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER))
++ sock_diag_put_filterinfo(sk, skb, PACKET_DIAG_FILTER))
+ goto out_nlmsg_trim;
+
+ return nlmsg_end(skb, nlh);
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index 8c4fa5dec824..43b871f6cddf 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -387,14 +387,13 @@ nomem:
+ */
+ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
+ {
+- struct net *net = sock_net(asoc->base.sk);
+ struct sctp_auth_bytes *secret;
+ struct sctp_shared_key *ep_key;
+
+ /* If we don't support AUTH, or peer is not capable
+ * we don't need to do anything.
+ */
+- if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
++ if (!asoc->ep->auth_enable || !asoc->peer.auth_capable)
+ return 0;
+
+ /* If the key_id is non-zero and we couldn't find an
+@@ -441,16 +440,16 @@ struct sctp_shared_key *sctp_auth_get_shkey(
+ */
+ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
+ {
+- struct net *net = sock_net(ep->base.sk);
+ struct crypto_hash *tfm = NULL;
+ __u16 id;
+
+- /* if the transforms are already allocted, we are done */
+- if (!net->sctp.auth_enable) {
++ /* If AUTH extension is disabled, we are done */
++ if (!ep->auth_enable) {
+ ep->auth_hmacs = NULL;
+ return 0;
+ }
+
++ /* If the transforms are already allocated, we are done */
+ if (ep->auth_hmacs)
+ return 0;
+
+@@ -671,12 +670,10 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
+ /* Check if peer requested that this chunk is authenticated */
+ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
+ {
+- struct net *net;
+ if (!asoc)
+ return 0;
+
+- net = sock_net(asoc->base.sk);
+- if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
++ if (!asoc->ep->auth_enable || !asoc->peer.auth_capable)
+ return 0;
+
+ return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
+@@ -685,12 +682,10 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
+ /* Check if we requested that peer authenticate this chunk. */
+ int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
+ {
+- struct net *net;
+ if (!asoc)
+ return 0;
+
+- net = sock_net(asoc->base.sk);
+- if (!net->sctp.auth_enable)
++ if (!asoc->ep->auth_enable)
+ return 0;
+
+ return __sctp_auth_cid(chunk,
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
+index 09b8daac87c8..477dd23a9864 100644
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -69,7 +69,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
+ if (!ep->digest)
+ return NULL;
+
+- if (net->sctp.auth_enable) {
++ ep->auth_enable = net->sctp.auth_enable;
++ if (ep->auth_enable) {
+ /* Allocate space for HMACS and CHUNKS authentication
+ * variables. There are arrays that we encode directly
+ * into parameters to make the rest of the operations easier.
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 5e17092f4ada..2b216f1f6b23 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -492,8 +492,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ continue;
+ if ((laddr->state == SCTP_ADDR_SRC) &&
+ (AF_INET == laddr->a.sa.sa_family)) {
+- fl4->saddr = laddr->a.v4.sin_addr.s_addr;
+ fl4->fl4_sport = laddr->a.v4.sin_port;
++ flowi4_update_output(fl4,
++ asoc->base.sk->sk_bound_dev_if,
++ RT_CONN_FLAGS(asoc->base.sk),
++ daddr->v4.sin_addr.s_addr,
++ laddr->a.v4.sin_addr.s_addr);
++
+ rt = ip_route_output_key(sock_net(sk), fl4);
+ if (!IS_ERR(rt)) {
+ dst = &rt->dst;
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 26be077b8267..1e06f3b23108 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -218,6 +218,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
+ gfp_t gfp, int vparam_len)
+ {
+ struct net *net = sock_net(asoc->base.sk);
++ struct sctp_endpoint *ep = asoc->ep;
+ sctp_inithdr_t init;
+ union sctp_params addrs;
+ size_t chunksize;
+@@ -277,7 +278,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
+ chunksize += vparam_len;
+
+ /* Account for AUTH related parameters */
+- if (net->sctp.auth_enable) {
++ if (ep->auth_enable) {
+ /* Add random parameter length*/
+ chunksize += sizeof(asoc->c.auth_random);
+
+@@ -362,7 +363,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
+ }
+
+ /* Add SCTP-AUTH chunks to the parameter list */
+- if (net->sctp.auth_enable) {
++ if (ep->auth_enable) {
+ sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
+ asoc->c.auth_random);
+ if (auth_hmacs)
+@@ -2023,7 +2024,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
+ /* if the peer reports AUTH, assume that he
+ * supports AUTH.
+ */
+- if (net->sctp.auth_enable)
++ if (asoc->ep->auth_enable)
+ asoc->peer.auth_capable = 1;
+ break;
+ case SCTP_CID_ASCONF:
+@@ -2115,6 +2116,7 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
+ * SCTP_IERROR_NO_ERROR - continue with the chunk
+ */
+ static sctp_ierror_t sctp_verify_param(struct net *net,
++ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ union sctp_params param,
+ sctp_cid_t cid,
+@@ -2165,7 +2167,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
+ goto fallthrough;
+
+ case SCTP_PARAM_RANDOM:
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ goto fallthrough;
+
+ /* SCTP-AUTH: Secion 6.1
+@@ -2182,7 +2184,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
+ break;
+
+ case SCTP_PARAM_CHUNKS:
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ goto fallthrough;
+
+ /* SCTP-AUTH: Section 3.2
+@@ -2198,7 +2200,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
+ break;
+
+ case SCTP_PARAM_HMAC_ALGO:
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ goto fallthrough;
+
+ hmacs = (struct sctp_hmac_algo_param *)param.p;
+@@ -2233,10 +2235,9 @@ fallthrough:
+ }
+
+ /* Verify the INIT packet before we process it. */
+-int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
+- sctp_cid_t cid,
+- sctp_init_chunk_t *peer_init,
+- struct sctp_chunk *chunk,
++int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
++ const struct sctp_association *asoc, sctp_cid_t cid,
++ sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
+ struct sctp_chunk **errp)
+ {
+ union sctp_params param;
+@@ -2277,8 +2278,8 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
+
+ /* Verify all the variable length parameters */
+ sctp_walk_params(param, peer_init, init_hdr.params) {
+-
+- result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
++ result = sctp_verify_param(net, ep, asoc, param, cid,
++ chunk, errp);
+ switch (result) {
+ case SCTP_IERROR_ABORT:
+ case SCTP_IERROR_NOMEM:
+@@ -2510,6 +2511,7 @@ static int sctp_process_param(struct sctp_association *asoc,
+ struct sctp_af *af;
+ union sctp_addr_param *addr_param;
+ struct sctp_transport *t;
++ struct sctp_endpoint *ep = asoc->ep;
+
+ /* We maintain all INIT parameters in network byte order all the
+ * time. This allows us to not worry about whether the parameters
+@@ -2649,7 +2651,7 @@ do_addr_param:
+ goto fall_through;
+
+ case SCTP_PARAM_RANDOM:
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ goto fall_through;
+
+ /* Save peer's random parameter */
+@@ -2662,7 +2664,7 @@ do_addr_param:
+ break;
+
+ case SCTP_PARAM_HMAC_ALGO:
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ goto fall_through;
+
+ /* Save peer's HMAC list */
+@@ -2678,7 +2680,7 @@ do_addr_param:
+ break;
+
+ case SCTP_PARAM_CHUNKS:
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ goto fall_through;
+
+ asoc->peer.peer_chunks = kmemdup(param.p,
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 0a5f0508c43a..5dcfe8ca7f69 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -358,7 +358,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
+
+ /* Verify the INIT chunk before processing it. */
+ err_chunk = NULL;
+- if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
++ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
+ (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
+ &err_chunk)) {
+ /* This chunk contains fatal error. It is to be discarded.
+@@ -525,7 +525,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
+
+ /* Verify the INIT chunk before processing it. */
+ err_chunk = NULL;
+- if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
++ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
+ (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
+ &err_chunk)) {
+
+@@ -1431,7 +1431,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
+
+ /* Verify the INIT chunk before processing it. */
+ err_chunk = NULL;
+- if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
++ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
+ (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
+ &err_chunk)) {
+ /* This chunk contains fatal error. It is to be discarded.
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 14c801528da8..e00a041129c2 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3296,10 +3296,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authchunk val;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (optlen != sizeof(struct sctp_authchunk))
+@@ -3316,7 +3316,7 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
+ }
+
+ /* add this chunk id to the endpoint */
+- return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk);
++ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
+ }
+
+ /*
+@@ -3329,12 +3329,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_hmacalgo *hmacs;
+ u32 idents;
+ int err;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (optlen < sizeof(struct sctp_hmacalgo))
+@@ -3351,7 +3351,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
+ goto out;
+ }
+
+- err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs);
++ err = sctp_auth_ep_set_hmacs(ep, hmacs);
+ out:
+ kfree(hmacs);
+ return err;
+@@ -3367,12 +3367,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authkey *authkey;
+ struct sctp_association *asoc;
+ int ret;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (optlen <= sizeof(struct sctp_authkey))
+@@ -3393,7 +3393,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
+ goto out;
+ }
+
+- ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
++ ret = sctp_auth_set_key(ep, asoc, authkey);
+ out:
+ kzfree(authkey);
+ return ret;
+@@ -3409,11 +3409,11 @@ static int sctp_setsockopt_active_key(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authkeyid val;
+ struct sctp_association *asoc;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (optlen != sizeof(struct sctp_authkeyid))
+@@ -3425,8 +3425,7 @@ static int sctp_setsockopt_active_key(struct sock *sk,
+ if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
+ return -EINVAL;
+
+- return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc,
+- val.scact_keynumber);
++ return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
+ }
+
+ /*
+@@ -3438,11 +3437,11 @@ static int sctp_setsockopt_del_key(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authkeyid val;
+ struct sctp_association *asoc;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (optlen != sizeof(struct sctp_authkeyid))
+@@ -3454,8 +3453,7 @@ static int sctp_setsockopt_del_key(struct sock *sk,
+ if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
+ return -EINVAL;
+
+- return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc,
+- val.scact_keynumber);
++ return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
+
+ }
+
+@@ -5353,16 +5351,16 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
+ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_hmacalgo __user *p = (void __user *)optval;
+ struct sctp_hmac_algo_param *hmacs;
+ __u16 data_len = 0;
+ u32 num_idents;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+- hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
++ hmacs = ep->auth_hmacs_list;
+ data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
+
+ if (len < sizeof(struct sctp_hmacalgo) + data_len)
+@@ -5383,11 +5381,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ static int sctp_getsockopt_active_key(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authkeyid val;
+ struct sctp_association *asoc;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (len < sizeof(struct sctp_authkeyid))
+@@ -5402,7 +5400,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
+ if (asoc)
+ val.scact_keynumber = asoc->active_key_id;
+ else
+- val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
++ val.scact_keynumber = ep->active_key_id;
+
+ len = sizeof(struct sctp_authkeyid);
+ if (put_user(len, optlen))
+@@ -5416,7 +5414,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
+ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authchunks __user *p = (void __user *)optval;
+ struct sctp_authchunks val;
+ struct sctp_association *asoc;
+@@ -5424,7 +5422,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
+ u32 num_chunks = 0;
+ char __user *to;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (len < sizeof(struct sctp_authchunks))
+@@ -5460,7 +5458,7 @@ num:
+ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+ {
+- struct net *net = sock_net(sk);
++ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authchunks __user *p = (void __user *)optval;
+ struct sctp_authchunks val;
+ struct sctp_association *asoc;
+@@ -5468,7 +5466,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
+ u32 num_chunks = 0;
+ char __user *to;
+
+- if (!net->sctp.auth_enable)
++ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (len < sizeof(struct sctp_authchunks))
+@@ -5485,7 +5483,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
+ if (asoc)
+ ch = (struct sctp_chunks_param*)asoc->c.auth_chunks;
+ else
+- ch = sctp_sk(sk)->ep->auth_chunk_list;
++ ch = ep->auth_chunk_list;
+
+ if (!ch)
+ goto num;
+@@ -6564,6 +6562,46 @@ static void __sctp_write_space(struct sctp_association *asoc)
+ }
+ }
+
++static void sctp_wake_up_waiters(struct sock *sk,
++ struct sctp_association *asoc)
++{
++ struct sctp_association *tmp = asoc;
++
++ /* We do accounting for the sndbuf space per association,
++ * so we only need to wake our own association.
++ */
++ if (asoc->ep->sndbuf_policy)
++ return __sctp_write_space(asoc);
++
++ /* If association goes down and is just flushing its
++ * outq, then just normally notify others.
++ */
++ if (asoc->base.dead)
++ return sctp_write_space(sk);
++
++ /* Accounting for the sndbuf space is per socket, so we
++ * need to wake up others, try to be fair and in case of
++ * other associations, let them have a go first instead
++ * of just doing a sctp_write_space() call.
++ *
++ * Note that we reach sctp_wake_up_waiters() only when
++ * associations free up queued chunks, thus we are under
++ * lock and the list of associations on a socket is
++ * guaranteed not to change.
++ */
++ for (tmp = list_next_entry(tmp, asocs); 1;
++ tmp = list_next_entry(tmp, asocs)) {
++ /* Manually skip the head element. */
++ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
++ continue;
++ /* Wake up association. */
++ __sctp_write_space(tmp);
++ /* We've reached the end. */
++ if (tmp == asoc)
++ break;
++ }
++}
++
+ /* Do accounting for the sndbuf space.
+ * Decrement the used sndbuf space of the corresponding association by the
+ * data size which was just transmitted(freed).
+@@ -6591,7 +6629,7 @@ static void sctp_wfree(struct sk_buff *skb)
+ sk_mem_uncharge(sk, skb->truesize);
+
+ sock_wfree(skb);
+- __sctp_write_space(asoc);
++ sctp_wake_up_waiters(sk, asoc);
+
+ sctp_association_put(asoc);
+ }
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index 6b36561a1b3b..3e5ac1948607 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -59,8 +59,11 @@ extern int sysctl_sctp_wmem[3];
+ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
+ int write,
+ void __user *buffer, size_t *lenp,
+-
+ loff_t *ppos);
++static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
++ void __user *buffer, size_t *lenp,
++ loff_t *ppos);
++
+ static struct ctl_table sctp_table[] = {
+ {
+ .procname = "sctp_mem",
+@@ -261,7 +264,7 @@ static struct ctl_table sctp_net_table[] = {
+ .data = &init_net.sctp.auth_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec,
++ .proc_handler = proc_sctp_do_auth,
+ },
+ {
+ .procname = "addr_scope_policy",
+@@ -342,6 +345,37 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
+ return ret;
+ }
+
++static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
++ void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ struct net *net = current->nsproxy->net_ns;
++ struct ctl_table tbl;
++ int new_value, ret;
++
++ memset(&tbl, 0, sizeof(struct ctl_table));
++ tbl.maxlen = sizeof(unsigned int);
++
++ if (write)
++ tbl.data = &new_value;
++ else
++ tbl.data = &net->sctp.auth_enable;
++
++ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
++
++ if (write) {
++ struct sock *sk = net->sctp.ctl_sock;
++
++ net->sctp.auth_enable = new_value;
++ /* Update the value in the control socket */
++ lock_sock(sk);
++ sctp_sk(sk)->ep->auth_enable = new_value;
++ release_sock(sk);
++ }
++
++ return ret;
++}
++
+ int sctp_sysctl_net_register(struct net *net)
+ {
+ struct ctl_table *table;
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 5adfd94c5b85..85d232bed87d 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1925,9 +1925,23 @@ static struct miscdevice vsock_device = {
+ .fops = &vsock_device_ops,
+ };
+
+-static int __vsock_core_init(void)
++int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
+ {
+- int err;
++ int err = mutex_lock_interruptible(&vsock_register_mutex);
++
++ if (err)
++ return err;
++
++ if (transport) {
++ err = -EBUSY;
++ goto err_busy;
++ }
++
++ /* Transport must be the owner of the protocol so that it can't
++ * unload while there are open sockets.
++ */
++ vsock_proto.owner = owner;
++ transport = t;
+
+ vsock_init_tables();
+
+@@ -1951,36 +1965,19 @@ static int __vsock_core_init(void)
+ goto err_unregister_proto;
+ }
+
++ mutex_unlock(&vsock_register_mutex);
+ return 0;
+
+ err_unregister_proto:
+ proto_unregister(&vsock_proto);
+ err_misc_deregister:
+ misc_deregister(&vsock_device);
+- return err;
+-}
+-
+-int vsock_core_init(const struct vsock_transport *t)
+-{
+- int retval = mutex_lock_interruptible(&vsock_register_mutex);
+- if (retval)
+- return retval;
+-
+- if (transport) {
+- retval = -EBUSY;
+- goto out;
+- }
+-
+- transport = t;
+- retval = __vsock_core_init();
+- if (retval)
+- transport = NULL;
+-
+-out:
++ transport = NULL;
++err_busy:
+ mutex_unlock(&vsock_register_mutex);
+- return retval;
++ return err;
+ }
+-EXPORT_SYMBOL_GPL(vsock_core_init);
++EXPORT_SYMBOL_GPL(__vsock_core_init);
+
+ void vsock_core_exit(void)
+ {
+@@ -2000,5 +1997,5 @@ EXPORT_SYMBOL_GPL(vsock_core_exit);
+
+ MODULE_AUTHOR("VMware, Inc.");
+ MODULE_DESCRIPTION("VMware Virtual Socket Family");
+-MODULE_VERSION("1.0.0.0-k");
++MODULE_VERSION("1.0.1.0-k");
+ MODULE_LICENSE("GPL v2");
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 8247979e8f64..78c2169a4f59 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -573,12 +573,16 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
+ if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
+ strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
+ strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
+- strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0)
++ strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
++ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
++ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
+ return 1;
+ if (info->hdr->e_machine == EM_PPC64)
+ /* Special register function linked on all modules during final link of .ko */
+ if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
+- strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0)
++ strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
++ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
++ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
+ return 1;
+ /* Do not ignore this symbol */
+ return 0;
diff --git a/1021_linux-3.12.22.patch b/1021_linux-3.12.22.patch
new file mode 100644
index 00000000..de68db30
--- /dev/null
+++ b/1021_linux-3.12.22.patch
@@ -0,0 +1,5096 @@
+diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
+index 5602eb71ad5d..e1ae127ed099 100644
+--- a/Documentation/input/elantech.txt
++++ b/Documentation/input/elantech.txt
+@@ -504,9 +504,12 @@ byte 5:
+ * reg_10
+
+ bit 7 6 5 4 3 2 1 0
+- 0 0 0 0 0 0 0 A
++ 0 0 0 0 R F T A
+
+ A: 1 = enable absolute tracking
++ T: 1 = enable two finger mode auto correct
++ F: 1 = disable ABS Position Filter
++ R: 1 = enable real hardware resolution
+
+ 6.2 Native absolute mode 6 byte packet format
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
+index 8148a47fc70e..52cf2752a53d 100644
+--- a/Documentation/ja_JP/HOWTO
++++ b/Documentation/ja_JP/HOWTO
+@@ -315,7 +315,7 @@ Andrew Morton が Linux-kernel メーリングリストにカーネルリリー
+ もし、3.x.y カーネルが存在しない場合には、番号が一番大きい 3.x が
+ 最新の安定版カーネルです。
+
+-3.x.y は "stable" チーム <stable@kernel.org> でメンテされており、必
++3.x.y は "stable" チーム <stable@vger.kernel.org> でメンテされており、必
+ 要に応じてリリースされます。通常のリリース期間は 2週間毎ですが、差し迫っ
+ た問題がなければもう少し長くなることもあります。セキュリティ関連の問題
+ の場合はこれに対してだいたいの場合、すぐにリリースがされます。
+diff --git a/Documentation/ja_JP/stable_kernel_rules.txt b/Documentation/ja_JP/stable_kernel_rules.txt
+index 14265837c4ce..9dbda9b5d21e 100644
+--- a/Documentation/ja_JP/stable_kernel_rules.txt
++++ b/Documentation/ja_JP/stable_kernel_rules.txt
+@@ -50,16 +50,16 @@ linux-2.6.29/Documentation/stable_kernel_rules.txt
+
+ -stable ツリーにパッチを送付する手続き-
+
+- - 上記の規則に従っているかを確認した後に、stable@kernel.org にパッチ
++ - 上記の規則に従っているかを確認した後に、stable@vger.kernel.org にパッチ
+ を送る。
+ - 送信者はパッチがキューに受け付けられた際には ACK を、却下された場合
+ には NAK を受け取る。この反応は開発者たちのスケジュールによって、数
+ 日かかる場合がある。
+ - もし受け取られたら、パッチは他の開発者たちと関連するサブシステムの
+ メンテナーによるレビューのために -stable キューに追加される。
+- - パッチに stable@kernel.org のアドレスが付加されているときには、それ
++ - パッチに stable@vger.kernel.org のアドレスが付加されているときには、それ
+ が Linus のツリーに入る時に自動的に stable チームに email される。
+- - セキュリティパッチはこのエイリアス (stable@kernel.org) に送られるべ
++ - セキュリティパッチはこのエイリアス (stable@vger.kernel.org) に送られるべ
+ きではなく、代わりに security@kernel.org のアドレスに送られる。
+
+ レビューサイクル-
+diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
+index 7fba5aab9ef9..7599eb38b764 100644
+--- a/Documentation/zh_CN/HOWTO
++++ b/Documentation/zh_CN/HOWTO
+@@ -237,7 +237,7 @@ kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开发遵循
+ 如果没有2.6.x.y版本内核存在,那么最新的2.6.x版本内核就相当于是当前的稳定
+ 版内核。
+
+-2.6.x.y版本由“稳定版”小组(邮件地址<stable@kernel.org>)维护,一般隔周发
++2.6.x.y版本由“稳定版”小组(邮件地址<stable@vger.kernel.org>)维护,一般隔周发
+ 布新版本。
+
+ 内核源码中的Documentation/stable_kernel_rules.txt文件具体描述了可被稳定
+diff --git a/Documentation/zh_CN/stable_kernel_rules.txt b/Documentation/zh_CN/stable_kernel_rules.txt
+index b5b9b0ab02fd..26ea5ed7cd9c 100644
+--- a/Documentation/zh_CN/stable_kernel_rules.txt
++++ b/Documentation/zh_CN/stable_kernel_rules.txt
+@@ -42,7 +42,7 @@ Documentation/stable_kernel_rules.txt 的中文翻译
+
+ 向稳定版代码树提交补丁的过程:
+
+- - 在确认了补丁符合以上的规则后,将补丁发送到stable@kernel.org。
++ - 在确认了补丁符合以上的规则后,将补丁发送到stable@vger.kernel.org。
+ - 如果补丁被接受到队列里,发送者会收到一个ACK回复,如果没有被接受,收
+ 到的是NAK回复。回复需要几天的时间,这取决于开发者的时间安排。
+ - 被接受的补丁会被加到稳定版本队列里,等待其他开发者的审查。
+diff --git a/Makefile b/Makefile
+index e4a8804bb609..ec5e153e2991 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 21
++SUBLEVEL = 22
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
+index bcf6d79a57ec..8c2fe44e4dfe 100644
+--- a/arch/arm/boot/dts/armada-xp-db.dts
++++ b/arch/arm/boot/dts/armada-xp-db.dts
+@@ -40,7 +40,7 @@
+ /* Device Bus parameters are required */
+
+ /* Read parameters */
+- devbus,bus-width = <8>;
++ devbus,bus-width = <16>;
+ devbus,turn-off-ps = <60000>;
+ devbus,badr-skew-ps = <0>;
+ devbus,acc-first-ps = <124000>;
+diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
+index 2298e4a910e2..e325e62909f9 100644
+--- a/arch/arm/boot/dts/armada-xp-gp.dts
++++ b/arch/arm/boot/dts/armada-xp-gp.dts
+@@ -49,7 +49,7 @@
+ /* Device Bus parameters are required */
+
+ /* Read parameters */
+- devbus,bus-width = <8>;
++ devbus,bus-width = <16>;
+ devbus,turn-off-ps = <60000>;
+ devbus,badr-skew-ps = <0>;
+ devbus,acc-first-ps = <124000>;
+diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+index 5695afcc04bf..d6cce8aa8c68 100644
+--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
++++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+@@ -37,7 +37,7 @@
+ /* Device Bus parameters are required */
+
+ /* Read parameters */
+- devbus,bus-width = <8>;
++ devbus,bus-width = <16>;
+ devbus,turn-off-ps = <60000>;
+ devbus,badr-skew-ps = <0>;
+ devbus,acc-first-ps = <124000>;
+diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
+index 4307e80b2d2e..dc72353de0b3 100644
+--- a/arch/arm/boot/dts/imx53.dtsi
++++ b/arch/arm/boot/dts/imx53.dtsi
+@@ -87,7 +87,7 @@
+ ipu: ipu@18000000 {
+ #crtc-cells = <1>;
+ compatible = "fsl,imx53-ipu";
+- reg = <0x18000000 0x080000000>;
++ reg = <0x18000000 0x08000000>;
+ interrupts = <11 10>;
+ clocks = <&clks 59>, <&clks 110>, <&clks 61>;
+ clock-names = "bus", "di0", "di1";
+diff --git a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
+index 6317e1d088b3..e650e35d120c 100644
+--- a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
++++ b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
+@@ -30,6 +30,16 @@
+ bootargs = "console=ttyS0,115200n8 earlyprintk";
+ };
+
++ mbus {
++ pcie-controller {
++ status = "okay";
++
++ pcie@1,0 {
++ status = "okay";
++ };
++ };
++ };
++
+ ocp@f1000000 {
+ pinctrl@10000 {
+ pmx_usb_led: pmx-usb-led {
+@@ -73,14 +83,6 @@
+ ehci@50000 {
+ status = "okay";
+ };
+-
+- pcie-controller {
+- status = "okay";
+-
+- pcie@1,0 {
+- status = "okay";
+- };
+- };
+ };
+
+ gpio-leds {
+diff --git a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
+index 06267a91de38..7c3f4bc4b7e4 100644
+--- a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
++++ b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
+@@ -4,6 +4,16 @@
+ / {
+ model = "ZyXEL NSA310";
+
++ mbus {
++ pcie-controller {
++ status = "okay";
++
++ pcie@1,0 {
++ status = "okay";
++ };
++ };
++ };
++
+ ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+@@ -69,14 +79,6 @@
+ reg = <0x5040000 0x2fc0000>;
+ };
+ };
+-
+- pcie-controller {
+- status = "okay";
+-
+- pcie@1,0 {
+- status = "okay";
+- };
+- };
+ };
+
+ gpio_poweroff {
+diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts
+index 7f3baf51a3a9..32dd55e5f4e6 100644
+--- a/arch/arm/boot/dts/ste-ccu8540.dts
++++ b/arch/arm/boot/dts/ste-ccu8540.dts
+@@ -18,6 +18,7 @@
+ compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
+
+ memory@0 {
++ device_type = "memory";
+ reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
+ };
+
+diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
+index 90c50d4b43f7..5d1286d51154 100644
+--- a/arch/arm/kernel/crash_dump.c
++++ b/arch/arm/kernel/crash_dump.c
+@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ if (!csize)
+ return 0;
+
+- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++ vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
+ if (!vaddr)
+ return -ENOMEM;
+
+diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
+index f565f9944af2..7548db2bfb8a 100644
+--- a/arch/arm/mach-orion5x/common.h
++++ b/arch/arm/mach-orion5x/common.h
+@@ -21,7 +21,7 @@ struct mv_sata_platform_data;
+ #define ORION_MBUS_DEVBUS_BOOT_ATTR 0x0f
+ #define ORION_MBUS_DEVBUS_TARGET(cs) 0x01
+ #define ORION_MBUS_DEVBUS_ATTR(cs) (~(1 << cs))
+-#define ORION_MBUS_SRAM_TARGET 0x00
++#define ORION_MBUS_SRAM_TARGET 0x09
+ #define ORION_MBUS_SRAM_ATTR 0x00
+
+ /*
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 5e9aec358306..31eb959e9aa8 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -51,7 +51,11 @@ int pmd_huge(pmd_t pmd)
+
+ int pud_huge(pud_t pud)
+ {
++#ifndef __PAGETABLE_PMD_FOLDED
+ return !(pud_val(pud) & PUD_TABLE_BIT);
++#else
++ return 0;
++#endif
+ }
+
+ int pmd_huge_support(void)
+diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
+index c90bfc6bf648..e355a4c10968 100644
+--- a/arch/metag/include/asm/barrier.h
++++ b/arch/metag/include/asm/barrier.h
+@@ -15,6 +15,7 @@ static inline void wr_fence(void)
+ volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
+ barrier();
+ *flushptr = 0;
++ barrier();
+ }
+
+ #else /* CONFIG_METAG_META21 */
+@@ -35,6 +36,7 @@ static inline void wr_fence(void)
+ *flushptr = 0;
+ *flushptr = 0;
+ *flushptr = 0;
++ barrier();
+ }
+
+ #endif /* !CONFIG_METAG_META21 */
+@@ -68,6 +70,7 @@ static inline void fence(void)
+ volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
+ barrier();
+ *flushptr = 0;
++ barrier();
+ }
+ #define smp_mb() fence()
+ #define smp_rmb() fence()
+diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
+index f16477d1f571..3be8581af495 100644
+--- a/arch/metag/include/asm/processor.h
++++ b/arch/metag/include/asm/processor.h
+@@ -22,6 +22,8 @@
+ /* Add an extra page of padding at the top of the stack for the guard page. */
+ #define STACK_TOP (TASK_SIZE - PAGE_SIZE)
+ #define STACK_TOP_MAX STACK_TOP
++/* Maximum virtual space for stack */
++#define STACK_SIZE_MAX (1 << 28) /* 256 MB */
+
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
+index 25fbfae06c1f..ab7dc01ad8c3 100644
+--- a/arch/mips/cavium-octeon/octeon-irq.c
++++ b/arch/mips/cavium-octeon/octeon-irq.c
+@@ -635,7 +635,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
+ cpumask_clear(&new_affinity);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+ }
+- __irq_set_affinity_locked(data, &new_affinity);
++ irq_set_affinity_locked(data, &new_affinity, false);
+ }
+
+ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
+diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
+index fac1f5b178eb..143b8a37b5e4 100644
+--- a/arch/mips/lantiq/dts/easy50712.dts
++++ b/arch/mips/lantiq/dts/easy50712.dts
+@@ -8,6 +8,7 @@
+ };
+
+ memory@0 {
++ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
+index 35eb874ab7f1..709f58132f5c 100644
+--- a/arch/mips/ralink/dts/mt7620a_eval.dts
++++ b/arch/mips/ralink/dts/mt7620a_eval.dts
+@@ -7,6 +7,7 @@
+ model = "Ralink MT7620A evaluation board";
+
+ memory@0 {
++ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
+index 322d7002595b..0a685db093d4 100644
+--- a/arch/mips/ralink/dts/rt2880_eval.dts
++++ b/arch/mips/ralink/dts/rt2880_eval.dts
+@@ -7,6 +7,7 @@
+ model = "Ralink RT2880 evaluation board";
+
+ memory@0 {
++ device_type = "memory";
+ reg = <0x8000000 0x2000000>;
+ };
+
+diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
+index 0ac73ea28198..ec9e9a035541 100644
+--- a/arch/mips/ralink/dts/rt3052_eval.dts
++++ b/arch/mips/ralink/dts/rt3052_eval.dts
+@@ -7,6 +7,7 @@
+ model = "Ralink RT3052 evaluation board";
+
+ memory@0 {
++ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
+index 2fa6b330bf4f..e8df21a5d10d 100644
+--- a/arch/mips/ralink/dts/rt3883_eval.dts
++++ b/arch/mips/ralink/dts/rt3883_eval.dts
+@@ -7,6 +7,7 @@
+ model = "Ralink RT3883 evaluation board";
+
+ memory@0 {
++ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
+index cc2290a3cace..c6ee86542fec 100644
+--- a/arch/parisc/include/asm/processor.h
++++ b/arch/parisc/include/asm/processor.h
+@@ -53,6 +53,8 @@
+ #define STACK_TOP TASK_SIZE
+ #define STACK_TOP_MAX DEFAULT_TASK_SIZE
+
++#define STACK_SIZE_MAX (1 << 30) /* 1 GB */
++
+ #endif
+
+ #ifndef __ASSEMBLY__
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 51cfb78d4061..994337bb529c 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -114,7 +114,9 @@ endif
+
+ CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
+
+-KBUILD_CPPFLAGS += -Iarch/$(ARCH)
++asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
++
++KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
+ KBUILD_AFLAGS += -Iarch/$(ARCH)
+ KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
+ CPP = $(CC) -E $(KBUILD_CFLAGS)
+diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
+index 0d2d0f03163b..e6d03c7a8031 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -378,11 +378,16 @@ n:
+ * ld rY,ADDROFF(name)(rX)
+ */
+ #ifdef __powerpc64__
++#ifdef HAVE_AS_ATHIGH
++#define __AS_ATHIGH high
++#else
++#define __AS_ATHIGH h
++#endif
+ #define LOAD_REG_IMMEDIATE(reg,expr) \
+ lis reg,(expr)@highest; \
+ ori reg,reg,(expr)@higher; \
+ rldicr reg,reg,32,31; \
+- oris reg,reg,(expr)@h; \
++ oris reg,reg,(expr)@__AS_ATHIGH; \
+ ori reg,reg,(expr)@l;
+
+ #define LOAD_REG_ADDR(reg,name) \
+diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
+index 611acdf30096..263e44503138 100644
+--- a/arch/powerpc/kernel/machine_kexec_64.c
++++ b/arch/powerpc/kernel/machine_kexec_64.c
+@@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
+ if (!cpu_online(cpu)) {
+ printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
+ cpu);
+- cpu_up(cpu);
++ WARN_ON(cpu_up(cpu));
+ }
+ }
+ }
+diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
+index 227c7fe4067f..b91083370bc6 100644
+--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
++++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
+@@ -493,7 +493,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
+ ret = ioda_eeh_phb_reset(hose, option);
+ } else {
+ bus = eeh_pe_bus_get(pe);
+- if (pci_is_root_bus(bus))
++ if (pci_is_root_bus(bus) ||
++ pci_is_root_bus(bus->parent))
+ ret = ioda_eeh_root_reset(hose, option);
+ else
+ ret = ioda_eeh_bridge_reset(hose, bus->self, option);
+diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
+index a8091216963b..68c05398bba9 100644
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+ {
++ ptep_clear_flush(vma, addr, ptep);
+ }
+
+ static inline int huge_pte_none(pte_t pte)
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index af1d14a9ebda..dcbbaa165bde 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -20,6 +20,8 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+
++int sysctl_ldt16 = 0;
++
+ #ifdef CONFIG_SMP
+ static void flush_ldt(void *current_mm)
+ {
+@@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ * IRET leaking the high bits of the kernel stack address.
+ */
+ #ifdef CONFIG_X86_64
+- if (!ldt_info.seg_32bit) {
++ if (!ldt_info.seg_32bit && !sysctl_ldt16) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
+index d6bfb876cfb0..f1d633a43f8e 100644
+--- a/arch/x86/vdso/vdso32-setup.c
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -41,6 +41,7 @@ enum {
+ #ifdef CONFIG_X86_64
+ #define vdso_enabled sysctl_vsyscall32
+ #define arch_setup_additional_pages syscall32_setup_pages
++extern int sysctl_ldt16;
+ #endif
+
+ /*
+@@ -380,6 +381,13 @@ static struct ctl_table abi_table2[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
++ {
++ .procname = "ldt16",
++ .data = &sysctl_ldt16,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
+ {}
+ };
+
+diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
+index adad92a44ba2..2f1b8d12952a 100644
+--- a/crypto/crypto_wq.c
++++ b/crypto/crypto_wq.c
+@@ -33,7 +33,7 @@ static void __exit crypto_wq_exit(void)
+ destroy_workqueue(kcrypto_wq);
+ }
+
+-module_init(crypto_wq_init);
++subsys_initcall(crypto_wq_init);
+ module_exit(crypto_wq_exit);
+
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index f29e06efa479..f99cb6a15c00 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -410,7 +410,6 @@ static int acpi_processor_add(struct acpi_device *device,
+ goto err;
+
+ pr->dev = dev;
+- dev->offline = pr->flags.need_hotplug_init;
+
+ /* Trigger the processor driver's .probe() if present. */
+ if (device_attach(dev) >= 0)
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index f37dec579712..16eb678c0f3d 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -345,6 +345,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+ },
+ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Dell Inspiron 7737",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
++ },
++ },
+
+ /*
+ * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+@@ -405,6 +413,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
+ },
+ },
++ /*
++ * Without this this EEEpc exports a non working WMI interface, with
++ * this it exports a working "good old" eeepc_laptop interface, fixing
++ * both brightness control, and rfkill not working.
++ */
++ {
++ .callback = dmi_enable_osi_linux,
++ .ident = "Asus EEE PC 1015PX",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 5708e44376fe..d2e069044a0f 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -836,7 +836,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
+ * or an index). Set the backlight to max_level in this case.
+ */
+ for (i = 2; i < br->count; i++)
+- if (level_old == br->levels[i])
++ if (level == br->levels[i])
+ break;
+ if (i == br->count || !level)
+ level = max_level;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 97ae08de4b52..d7f00adbc374 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -6329,6 +6329,8 @@ int ata_host_activate(struct ata_host *host, int irq,
+ static void ata_port_detach(struct ata_port *ap)
+ {
+ unsigned long flags;
++ struct ata_link *link;
++ struct ata_device *dev;
+
+ if (!ap->ops->error_handler)
+ goto skip_eh;
+@@ -6348,6 +6350,13 @@ static void ata_port_detach(struct ata_port *ap)
+ cancel_delayed_work_sync(&ap->hotplug_task);
+
+ skip_eh:
++ /* clean up zpodd on port removal */
++ ata_for_each_link(link, ap, HOST_FIRST) {
++ ata_for_each_dev(dev, link, ALL) {
++ if (zpodd_dev_enabled(dev))
++ zpodd_exit(dev);
++ }
++ }
+ if (ap->pmp_link) {
+ int i;
+ for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
+index d63ee8f41a4f..e3a49df958a3 100644
+--- a/drivers/ata/pata_at91.c
++++ b/drivers/ata/pata_at91.c
+@@ -408,12 +408,13 @@ static int pata_at91_probe(struct platform_device *pdev)
+
+ host->private_data = info;
+
+- return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
+- gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
+- irq_flags, &pata_at91_sht);
++ ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
++ gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
++ irq_flags, &pata_at91_sht);
++ if (ret)
++ goto err_put;
+
+- if (!ret)
+- return 0;
++ return 0;
+
+ err_put:
+ clk_put(info->mck);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 06051767393f..8a8d611f2021 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
+ static LIST_HEAD(deferred_probe_pending_list);
+ static LIST_HEAD(deferred_probe_active_list);
+ static struct workqueue_struct *deferred_wq;
++static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+
+ /**
+ * deferred_probe_work_func() - Retry probing devices in the active list.
+@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
+ * This functions moves all devices from the pending list to the active
+ * list and schedules the deferred probe workqueue to process them. It
+ * should be called anytime a driver is successfully bound to a device.
++ *
++ * Note, there is a race condition in multi-threaded probe. In the case where
++ * more than one device is probing at the same time, it is possible for one
++ * probe to complete successfully while another is about to defer. If the second
++ * depends on the first, then it will get put on the pending list after the
++ * trigger event has already occured and will be stuck there.
++ *
++ * The atomic 'deferred_trigger_count' is used to determine if a successful
++ * trigger has occurred in the midst of probing a driver. If the trigger count
++ * changes in the midst of a probe, then deferred processing should be triggered
++ * again.
+ */
+ static void driver_deferred_probe_trigger(void)
+ {
+@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
+ * into the active list so they can be retried by the workqueue
+ */
+ mutex_lock(&deferred_probe_mutex);
++ atomic_inc(&deferred_trigger_count);
+ list_splice_tail_init(&deferred_probe_pending_list,
+ &deferred_probe_active_list);
+ mutex_unlock(&deferred_probe_mutex);
+@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+ static int really_probe(struct device *dev, struct device_driver *drv)
+ {
+ int ret = 0;
++ int local_trigger_count = atomic_read(&deferred_trigger_count);
+
+ atomic_inc(&probe_count);
+ pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
+@@ -310,6 +324,9 @@ probe_failed:
+ /* Driver requested deferred probing */
+ dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
+ driver_deferred_probe_add(dev);
++ /* Did a trigger occur while probing? Need to re-trigger if yes */
++ if (local_trigger_count != atomic_read(&deferred_trigger_count))
++ driver_deferred_probe_trigger();
+ } else if (ret != -ENODEV && ret != -ENXIO) {
+ /* driver matched but the probe failed */
+ printk(KERN_WARNING
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 0a327f4154a2..2acabdaecec8 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -82,6 +82,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x04CA, 0x3004) },
+ { USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x04CA, 0x3006) },
++ { USB_DEVICE(0x04CA, 0x3007) },
+ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
+@@ -124,6 +125,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6e30356d3e42..6e6740b9521b 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -149,6 +149,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
+index 2394e9753ef5..b4bd72b6fdc8 100644
+--- a/drivers/bus/mvebu-mbus.c
++++ b/drivers/bus/mvebu-mbus.c
+@@ -222,12 +222,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
+ */
+ if ((u64)base < wend && end > wbase)
+ return 0;
+-
+- /*
+- * Check if target/attribute conflicts
+- */
+- if (target == wtarget && attr == wattr)
+- return 0;
+ }
+
+ return 1;
+diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
+index e53fc24c6af3..e1ddcf938519 100644
+--- a/drivers/char/ipmi/ipmi_kcs_sm.c
++++ b/drivers/char/ipmi/ipmi_kcs_sm.c
+@@ -251,8 +251,9 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
+ if (!GET_STATUS_OBF(status)) {
+ kcs->obf_timeout -= time;
+ if (kcs->obf_timeout < 0) {
+- start_error_recovery(kcs, "OBF not ready in time");
+- return 1;
++ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
++ start_error_recovery(kcs, "OBF not ready in time");
++ return 1;
+ }
+ return 0;
+ }
+diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
+index 197074a57754..4c1d9bbe3191 100644
+--- a/drivers/clk/tegra/clk-pll.c
++++ b/drivers/clk/tegra/clk-pll.c
+@@ -1587,12 +1587,14 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
+ val_aux = pll_readl(pll_params->aux_reg, pll);
+
+ if (val & PLL_BASE_ENABLE) {
+- if (!(val_aux & PLLE_AUX_PLLRE_SEL))
++ if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
++ (val_aux & PLLE_AUX_PLLP_SEL))
+ WARN(1, "pll_e enabled with unsupported parent %s\n",
+- (val & PLLE_AUX_PLLP_SEL) ? "pllp_out0" : "pll_ref");
++ (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
++ "pll_re_vco");
+ } else {
+- val_aux |= PLLE_AUX_PLLRE_SEL;
+- pll_writel(val, pll_params->aux_reg, pll);
++ val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
++ pll_writel(val_aux, pll_params->aux_reg, pll);
+ }
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
+index 9467da7dee49..406929d4ce2f 100644
+--- a/drivers/clk/tegra/clk-tegra114.c
++++ b/drivers/clk/tegra/clk-tegra114.c
+@@ -673,6 +673,7 @@ static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ /* PLLE special case: use cpcon field to store cml divider value */
+ {336000000, 100000000, 100, 21, 16, 11},
+ {312000000, 100000000, 200, 26, 24, 13},
++ {12000000, 100000000, 200, 1, 24, 13},
+ {0, 0, 0, 0, 0, 0},
+ };
+
+@@ -1501,7 +1502,7 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
+ clks[pll_re_out] = clk;
+
+ /* PLLE */
+- clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_re_vco",
++ clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_ref",
+ clk_base, 0, 100000000, &pll_e_params,
+ pll_e_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_e_out0", NULL);
+diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
+index 2dc8b41a339d..a535c7bf8574 100644
+--- a/drivers/clk/versatile/clk-vexpress-osc.c
++++ b/drivers/clk/versatile/clk-vexpress-osc.c
+@@ -102,7 +102,7 @@ void __init vexpress_osc_of_setup(struct device_node *node)
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+- goto error;
++ return;
+
+ osc->func = vexpress_config_func_get_by_node(node);
+ if (!osc->func) {
+diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
+index 62b0de6a1837..70f3a597ec57 100644
+--- a/drivers/clocksource/exynos_mct.c
++++ b/drivers/clocksource/exynos_mct.c
+@@ -414,8 +414,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
+ evt->set_mode = exynos4_tick_set_mode;
+ evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ evt->rating = 450;
+- clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
+- 0xf, 0x7fffffff);
+
+ exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
+
+@@ -428,9 +426,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
+ evt->irq);
+ return -EIO;
+ }
++ irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
+ } else {
+ enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
+ }
++ clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
++ 0xf, 0x7fffffff);
+
+ return 0;
+ }
+@@ -448,7 +449,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+ {
+ struct mct_clock_event_device *mevt;
+- unsigned int cpu;
+
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+@@ -459,12 +459,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
+ mevt = this_cpu_ptr(&percpu_mct_tick);
+ exynos4_local_timer_setup(&mevt->evt);
+ break;
+- case CPU_ONLINE:
+- cpu = (unsigned long)hcpu;
+- if (mct_int_type == MCT_INT_SPI)
+- irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
+- cpumask_of(cpu));
+- break;
+ case CPU_DYING:
+ mevt = this_cpu_ptr(&percpu_mct_tick);
+ exynos4_local_timer_stop(&mevt->evt);
+diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
+index 9f25f5296029..0eabd81e1a90 100644
+--- a/drivers/crypto/caam/error.c
++++ b/drivers/crypto/caam/error.c
+@@ -16,9 +16,13 @@
+ char *tmp; \
+ \
+ tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
+- sprintf(tmp, format, param); \
+- strcat(str, tmp); \
+- kfree(tmp); \
++ if (likely(tmp)) { \
++ sprintf(tmp, format, param); \
++ strcat(str, tmp); \
++ kfree(tmp); \
++ } else { \
++ strcat(str, "kmalloc failure in SPRINTFCAT"); \
++ } \
+ }
+
+ static void report_jump_idx(u32 status, char *outstr)
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 89eb89f22284..da87adf85f03 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -1569,11 +1569,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ /* Disable BLOCK interrupts as well */
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+
+- err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
+- IRQF_SHARED, "dw_dmac", dw);
+- if (err)
+- return err;
+-
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
+ sizeof(struct dw_desc), 4, 0);
+@@ -1584,6 +1579,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+
+ tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+
++ err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
++ "dw_dmac", dw);
++ if (err)
++ return err;
++
+ INIT_LIST_HEAD(&dw->dma.channels);
+ for (i = 0; i < nr_channels; i++) {
+ struct dw_dma_chan *dwc = &dw->chan[i];
+@@ -1686,6 +1686,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
+ dw_dma_off(dw);
+ dma_async_device_unregister(&dw->dma);
+
++ free_irq(chip->irq, dw);
+ tasklet_kill(&dw->tasklet);
+
+ list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index 536dcb8ba5fd..dea771435a19 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -205,12 +205,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
+
+ static void mv_chan_activate(struct mv_xor_chan *chan)
+ {
+- u32 activation;
+-
+ dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
+- activation = readl_relaxed(XOR_ACTIVATION(chan));
+- activation |= 0x1;
+- writel_relaxed(activation, XOR_ACTIVATION(chan));
++
++ /* writel ensures all descriptors are flushed before activation */
++ writel(BIT(0), XOR_ACTIVATION(chan));
+ }
+
+ static char mv_chan_is_busy(struct mv_xor_chan *chan)
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index b00b32c992b0..35066a9b535f 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3419,7 +3419,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ {
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- struct i915_vma *vma;
++ struct i915_vma *vma, *next;
+ int ret;
+
+ if (obj->cache_level == cache_level)
+@@ -3430,7 +3430,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ return -EBUSY;
+ }
+
+- list_for_each_entry(vma, &obj->vma_list, vma_link) {
++ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 5aa836e6e190..837cc6cd7472 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8688,11 +8688,22 @@ intel_pipe_config_compare(struct drm_device *dev,
+ PIPE_CONF_CHECK_I(requested_mode.hdisplay);
+ PIPE_CONF_CHECK_I(requested_mode.vdisplay);
+
+- PIPE_CONF_CHECK_I(gmch_pfit.control);
+- /* pfit ratios are autocomputed by the hw on gen4+ */
+- if (INTEL_INFO(dev)->gen < 4)
+- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+- PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
++ /*
++ * FIXME: BIOS likes to set up a cloned config with lvds+external
++ * screen. Since we don't yet re-compute the pipe config when moving
++ * just the lvds port away to another pipe the sw tracking won't match.
++ *
++ * Proper atomic modesets with recomputed global state will fix this.
++ * Until then just don't check gmch state for inherited modes.
++ */
++ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
++ PIPE_CONF_CHECK_I(gmch_pfit.control);
++ /* pfit ratios are autocomputed by the hw on gen4+ */
++ if (INTEL_INFO(dev)->gen < 4)
++ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
++ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
++ }
++
+ PIPE_CONF_CHECK_I(pch_pfit.enabled);
+ if (current_config->pch_pfit.enabled) {
+ PIPE_CONF_CHECK_I(pch_pfit.pos);
+@@ -10097,6 +10108,9 @@ static struct intel_quirk intel_quirks[] = {
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
++ /* Acer Aspire 5336 */
++ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
++
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+@@ -10228,15 +10242,6 @@ void intel_modeset_init(struct drm_device *dev)
+ intel_disable_fbc(dev);
+ }
+
+-static void
+-intel_connector_break_all_links(struct intel_connector *connector)
+-{
+- connector->base.dpms = DRM_MODE_DPMS_OFF;
+- connector->base.encoder = NULL;
+- connector->encoder->connectors_active = false;
+- connector->encoder->base.crtc = NULL;
+-}
+-
+ static void intel_enable_pipe_a(struct drm_device *dev)
+ {
+ struct intel_connector *connector;
+@@ -10318,8 +10323,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
+ if (connector->encoder->base.crtc != &crtc->base)
+ continue;
+
+- intel_connector_break_all_links(connector);
++ connector->base.dpms = DRM_MODE_DPMS_OFF;
++ connector->base.encoder = NULL;
+ }
++ /* multiple connectors may have the same encoder:
++ * handle them and break crtc link separately */
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ base.head)
++ if (connector->encoder->base.crtc == &crtc->base) {
++ connector->encoder->base.crtc = NULL;
++ connector->encoder->connectors_active = false;
++ }
+
+ WARN_ON(crtc->active);
+ crtc->base.enabled = false;
+@@ -10390,6 +10404,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
+ drm_get_encoder_name(&encoder->base));
+ encoder->disable(encoder);
+ }
++ encoder->base.crtc = NULL;
++ encoder->connectors_active = false;
+
+ /* Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+@@ -10400,8 +10416,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
+ base.head) {
+ if (connector->encoder != encoder)
+ continue;
+-
+- intel_connector_break_all_links(connector);
++ connector->base.dpms = DRM_MODE_DPMS_OFF;
++ connector->base.encoder = NULL;
+ }
+ }
+ /* Enabled encoders without active connectors will be fixed in
+@@ -10443,6 +10459,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
+ base.head) {
+ memset(&crtc->config, 0, sizeof(crtc->config));
+
++ crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
++
+ crtc->active = dev_priv->display.get_pipe_config(crtc,
+ &crtc->config);
+
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 7f2b384ac939..569c0c5ca450 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -204,7 +204,8 @@ struct intel_crtc_config {
+ * tracked with quirk flags so that fastboot and state checker can act
+ * accordingly.
+ */
+-#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
++#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
++#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
+ unsigned long quirks;
+
+ struct drm_display_mode requested_mode;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 26c2ea3e985c..71a831ae73e9 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -1592,6 +1592,16 @@ static void i9xx_update_wm(struct drm_device *dev)
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
++ if (IS_I915GM(dev) && enabled) {
++ struct intel_framebuffer *fb;
++
++ fb = to_intel_framebuffer(enabled->fb);
++
++ /* self-refresh seems busted with untiled */
++ if (fb->obj->tiling_mode == I915_TILING_NONE)
++ enabled = NULL;
++ }
++
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
+index ee7d6491f8cd..a9a015428b8b 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -166,6 +166,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+ {
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(0xffff));
++ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
++ _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+ }
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+index 39f47b950ad1..c14cb093a64e 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+@@ -54,8 +54,10 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
+
+ /* check that we're not already at the target duty cycle */
+ duty = fan->get(therm);
+- if (duty == target)
+- goto done;
++ if (duty == target) {
++ spin_unlock_irqrestore(&fan->lock, flags);
++ return 0;
++ }
+
+ /* smooth out the fanspeed increase/decrease */
+ if (!immediate && duty >= 0) {
+@@ -73,8 +75,15 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
+
+ nv_debug(therm, "FAN update: %d\n", duty);
+ ret = fan->set(therm, duty);
+- if (ret)
+- goto done;
++ if (ret) {
++ spin_unlock_irqrestore(&fan->lock, flags);
++ return ret;
++ }
++
++ /* fan speed updated, drop the fan lock before grabbing the
++ * alarm-scheduling lock and risking a deadlock
++ */
++ spin_unlock_irqrestore(&fan->lock, flags);
+
+ /* schedule next fan update, if not at target speed already */
+ if (list_empty(&fan->alarm.head) && target != duty) {
+@@ -92,8 +101,6 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
+ ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
+ }
+
+-done:
+- spin_unlock_irqrestore(&fan->lock, flags);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 200e8564c59d..efdb689024e3 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -419,9 +419,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+- if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
+- return false;
+-
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index 7848590f5568..fb072e6a361d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -586,9 +586,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ }
+
+ ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+- mutex_unlock(&chan->cli->mutex);
+ if (ret)
+ goto fail_unreserve;
++ mutex_unlock(&chan->cli->mutex);
+
+ /* Update the crtc struct and cleanup */
+ crtc->fb = fb;
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 51e947a97edf..79682ff51b63 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -5098,6 +5098,10 @@ int ci_dpm_init(struct radeon_device *rdev)
+ pi->mclk_dpm_key_disabled = 0;
+ pi->pcie_dpm_key_disabled = 0;
+
++ /* mclk dpm is unstable on some R7 260X cards */
++ if (rdev->pdev->device == 0x6658)
++ pi->mclk_dpm_key_disabled = 1;
++
+ pi->caps_sclk_ds = true;
+
+ pi->mclk_strobe_mode_threshold = 40000;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index bcefa1de3e97..bb7f2ae7683d 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -5956,6 +5956,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
+ WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
++ /* pflip */
++ if (rdev->num_crtc >= 2) {
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
++ }
++ if (rdev->num_crtc >= 4) {
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
++ }
++ if (rdev->num_crtc >= 6) {
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
++ }
+
+ /* dac hotplug */
+ WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
+@@ -6312,6 +6325,25 @@ int cik_irq_set(struct radeon_device *rdev)
+ WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ }
+
++ if (rdev->num_crtc >= 2) {
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ }
++ if (rdev->num_crtc >= 4) {
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ }
++ if (rdev->num_crtc >= 6) {
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ }
++
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+@@ -6348,6 +6380,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
+ rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+ rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
+
++ rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
++ EVERGREEN_CRTC0_REGISTER_OFFSET);
++ rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
++ EVERGREEN_CRTC1_REGISTER_OFFSET);
++ if (rdev->num_crtc >= 4) {
++ rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
++ EVERGREEN_CRTC2_REGISTER_OFFSET);
++ rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
++ EVERGREEN_CRTC3_REGISTER_OFFSET);
++ }
++ if (rdev->num_crtc >= 6) {
++ rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
++ EVERGREEN_CRTC4_REGISTER_OFFSET);
++ rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
++ EVERGREEN_CRTC5_REGISTER_OFFSET);
++ }
++
++ if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
++ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_CLEAR);
++ if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
++ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
+@@ -6358,6 +6413,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
+ WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+ if (rdev->num_crtc >= 4) {
++ if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
++ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_CLEAR);
++ if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
++ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+@@ -6369,6 +6430,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
+ }
+
+ if (rdev->num_crtc >= 6) {
++ if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
++ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_CLEAR);
++ if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
++ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+@@ -6720,6 +6787,15 @@ restart_ih:
+ break;
+ }
+ break;
++ case 8: /* D1 page flip */
++ case 10: /* D2 page flip */
++ case 12: /* D3 page flip */
++ case 14: /* D4 page flip */
++ case 16: /* D5 page flip */
++ case 18: /* D6 page flip */
++ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
++ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
++ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
+index d565f4076a23..0c6784f52410 100644
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -512,7 +512,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+- r = radeon_ring_lock(rdev, ring, 4);
++ r = radeon_ring_lock(rdev, ring, 5);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
+index 203d2a09a1f5..70e88498a1fd 100644
+--- a/drivers/gpu/drm/radeon/cikd.h
++++ b/drivers/gpu/drm/radeon/cikd.h
+@@ -868,6 +868,15 @@
+ # define DC_HPD6_RX_INTERRUPT (1 << 18)
+ #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
+
++/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
++#define GRPH_INT_STATUS 0x6858
++# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
++# define GRPH_PFLIP_INT_CLEAR (1 << 8)
++/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
++#define GRPH_INT_CONTROL 0x685c
++# define GRPH_PFLIP_INT_MASK (1 << 0)
++# define GRPH_PFLIP_INT_TYPE (1 << 8)
++
+ #define DAC_AUTODETECT_INT_CONTROL 0x67c8
+
+ #define DC_HPD1_INT_STATUS 0x601c
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index c429bb9b17b6..e1b2470d3443 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4276,7 +4276,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ u32 grbm_int_cntl = 0;
+- u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+ u32 dma_cntl, dma_cntl1 = 0;
+ u32 thermal_int = 0;
+@@ -4459,15 +4458,21 @@ int evergreen_irq_set(struct radeon_device *rdev)
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ }
+
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
+ if (rdev->num_crtc >= 4) {
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
+ }
+ if (rdev->num_crtc >= 6) {
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
+ }
+
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+@@ -4856,6 +4861,15 @@ restart_ih:
+ break;
+ }
+ break;
++ case 8: /* D1 page flip */
++ case 10: /* D2 page flip */
++ case 12: /* D3 page flip */
++ case 14: /* D4 page flip */
++ case 16: /* D5 page flip */
++ case 18: /* D6 page flip */
++ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
++ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
++ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 5af2729f2055..2c2b91f16ecf 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3371,7 +3371,6 @@ int r600_irq_set(struct radeon_device *rdev)
+ u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 grbm_int_cntl = 0;
+ u32 hdmi0, hdmi1;
+- u32 d1grph = 0, d2grph = 0;
+ u32 dma_cntl;
+ u32 thermal_int = 0;
+
+@@ -3480,8 +3479,8 @@ int r600_irq_set(struct radeon_device *rdev)
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DMA_CNTL, dma_cntl);
+ WREG32(DxMODE_INT_MASK, mode_int);
+- WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+- WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
++ WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
++ WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+@@ -3784,6 +3783,14 @@ restart_ih:
+ break;
+ }
+ break;
++ case 9: /* D1 pflip */
++ DRM_DEBUG("IH: D1 flip\n");
++ radeon_crtc_handle_flip(rdev, 0);
++ break;
++ case 11: /* D2 pflip */
++ DRM_DEBUG("IH: D2 flip\n");
++ radeon_crtc_handle_flip(rdev, 1);
++ break;
+ case 19: /* HPD/DAC hotplug */
+ switch (src_data) {
+ case 0:
+diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
+index 5513d8f06252..2df683aab9e9 100644
+--- a/drivers/gpu/drm/radeon/r600_dpm.c
++++ b/drivers/gpu/drm/radeon/r600_dpm.c
+@@ -158,16 +158,18 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
+ u32 line_time_us, vblank_lines;
+ u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+- radeon_crtc = to_radeon_crtc(crtc);
+- if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
+- radeon_crtc->hw_mode.clock;
+- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
+- radeon_crtc->hw_mode.crtc_vdisplay +
+- (radeon_crtc->v_border * 2);
+- vblank_time_us = vblank_lines * line_time_us;
+- break;
++ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ radeon_crtc = to_radeon_crtc(crtc);
++ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
++ line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
++ radeon_crtc->hw_mode.clock;
++ vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
++ radeon_crtc->hw_mode.crtc_vdisplay +
++ (radeon_crtc->v_border * 2);
++ vblank_time_us = vblank_lines * line_time_us;
++ break;
++ }
+ }
+ }
+
+@@ -181,14 +183,15 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
+ struct radeon_crtc *radeon_crtc;
+ u32 vrefresh = 0;
+
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+- radeon_crtc = to_radeon_crtc(crtc);
+- if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+- vrefresh = radeon_crtc->hw_mode.vrefresh;
+- break;
++ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ radeon_crtc = to_radeon_crtc(crtc);
++ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
++ vrefresh = radeon_crtc->hw_mode.vrefresh;
++ break;
++ }
+ }
+ }
+-
+ return vrefresh;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index b11433f75578..5c903a884bb8 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -715,6 +715,12 @@ struct cik_irq_stat_regs {
+ u32 disp_int_cont4;
+ u32 disp_int_cont5;
+ u32 disp_int_cont6;
++ u32 d1grph_int;
++ u32 d2grph_int;
++ u32 d3grph_int;
++ u32 d4grph_int;
++ u32 d5grph_int;
++ u32 d6grph_int;
+ };
+
+ union radeon_irq_stat_regs {
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index b8db0d7b5089..7c6e3fd70e65 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -525,6 +525,13 @@ static bool radeon_atpx_detect(void)
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+ }
+
++ /* some newer PX laptops mark the dGPU as a non-VGA display device */
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
++ vga_count++;
++
++ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
++ }
++
+ if (has_atpx && vga_count == 2) {
+ acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 2f7fd3ff12c0..0254a7596a55 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -281,6 +281,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+ u32 update_pending;
+ int vpos, hpos;
+
++ /* can happen during initialization */
++ if (radeon_crtc == NULL)
++ return;
++
+ spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ work = radeon_crtc->unpin_work;
+ if (work == NULL ||
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index a0ec4bb9d896..10fc97749a81 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1362,12 +1362,14 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
+
+ rdev->pm.active_crtcs = 0;
+ rdev->pm.active_crtc_count = 0;
+- list_for_each_entry(crtc,
+- &ddev->mode_config.crtc_list, head) {
+- radeon_crtc = to_radeon_crtc(crtc);
+- if (radeon_crtc->enabled) {
+- rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+- rdev->pm.active_crtc_count++;
++ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
++ list_for_each_entry(crtc,
++ &ddev->mode_config.crtc_list, head) {
++ radeon_crtc = to_radeon_crtc(crtc);
++ if (radeon_crtc->enabled) {
++ rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
++ rdev->pm.active_crtc_count++;
++ }
+ }
+ }
+
+@@ -1431,12 +1433,14 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
+ /* update active crtc counts */
+ rdev->pm.dpm.new_active_crtcs = 0;
+ rdev->pm.dpm.new_active_crtc_count = 0;
+- list_for_each_entry(crtc,
+- &ddev->mode_config.crtc_list, head) {
+- radeon_crtc = to_radeon_crtc(crtc);
+- if (crtc->enabled) {
+- rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+- rdev->pm.dpm.new_active_crtc_count++;
++ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
++ list_for_each_entry(crtc,
++ &ddev->mode_config.crtc_list, head) {
++ radeon_crtc = to_radeon_crtc(crtc);
++ if (crtc->enabled) {
++ rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
++ rdev->pm.dpm.new_active_crtc_count++;
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
+index 33858364fe89..7e48c359b04c 100644
+--- a/drivers/gpu/drm/radeon/radeon_ucode.h
++++ b/drivers/gpu/drm/radeon/radeon_ucode.h
+@@ -57,6 +57,9 @@
+ #define BTC_MC_UCODE_SIZE 6024
+ #define CAYMAN_MC_UCODE_SIZE 6037
+ #define SI_MC_UCODE_SIZE 7769
++#define TAHITI_MC_UCODE_SIZE 7808
++#define PITCAIRN_MC_UCODE_SIZE 7775
++#define VERDE_MC_UCODE_SIZE 7875
+ #define OLAND_MC_UCODE_SIZE 7863
+ #define CIK_MC_UCODE_SIZE 7866
+
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index 83936473f8e4..a656b1a7e10a 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -464,6 +464,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
+ cmd = radeon_get_ib_value(p, p->idx) >> 1;
+
+ if (cmd < 0x4) {
++ if (end <= start) {
++ DRM_ERROR("invalid reloc offset %X!\n", offset);
++ return -EINVAL;
++ }
+ if ((end - start) < buf_sizes[cmd]) {
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+ (unsigned)(end - start), buf_sizes[cmd]);
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 873e0a608948..c9f9c07f888d 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -39,30 +39,35 @@ MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
+ MODULE_FIRMWARE("radeon/TAHITI_me.bin");
+ MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
+ MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
++MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
+ MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
+ MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
+ MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
+ MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
+ MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
+ MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
++MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
+ MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
+ MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
+ MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
+ MODULE_FIRMWARE("radeon/VERDE_me.bin");
+ MODULE_FIRMWARE("radeon/VERDE_ce.bin");
+ MODULE_FIRMWARE("radeon/VERDE_mc.bin");
++MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
+ MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+ MODULE_FIRMWARE("radeon/VERDE_smc.bin");
+ MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
+ MODULE_FIRMWARE("radeon/OLAND_me.bin");
+ MODULE_FIRMWARE("radeon/OLAND_ce.bin");
+ MODULE_FIRMWARE("radeon/OLAND_mc.bin");
++MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
+ MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
+ MODULE_FIRMWARE("radeon/OLAND_smc.bin");
+ MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
+ MODULE_FIRMWARE("radeon/HAINAN_me.bin");
+ MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
+ MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
++MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
+ MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
+ MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
+
+@@ -1470,36 +1475,33 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
+ const __be32 *fw_data;
+ u32 running, blackout = 0;
+ u32 *io_mc_regs;
+- int i, ucode_size, regs_size;
++ int i, regs_size, ucode_size;
+
+ if (!rdev->mc_fw)
+ return -EINVAL;
+
++ ucode_size = rdev->mc_fw->size / 4;
++
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+- ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+- ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_VERDE:
+ default:
+ io_mc_regs = (u32 *)&verde_io_mc_regs;
+- ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+- ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAINAN:
+ io_mc_regs = (u32 *)&hainan_io_mc_regs;
+- ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ }
+@@ -1555,7 +1557,7 @@ static int si_init_microcode(struct radeon_device *rdev)
+ const char *chip_name;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
+- size_t smc_req_size;
++ size_t smc_req_size, mc2_req_size;
+ char fw_name[30];
+ int err;
+
+@@ -1570,6 +1572,7 @@ static int si_init_microcode(struct radeon_device *rdev)
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = SI_MC_UCODE_SIZE * 4;
++ mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
+ break;
+ case CHIP_PITCAIRN:
+@@ -1580,6 +1583,7 @@ static int si_init_microcode(struct radeon_device *rdev)
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = SI_MC_UCODE_SIZE * 4;
++ mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
+ break;
+ case CHIP_VERDE:
+@@ -1590,6 +1594,7 @@ static int si_init_microcode(struct radeon_device *rdev)
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = SI_MC_UCODE_SIZE * 4;
++ mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
+ break;
+ case CHIP_OLAND:
+@@ -1599,7 +1604,7 @@ static int si_init_microcode(struct radeon_device *rdev)
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+- mc_req_size = OLAND_MC_UCODE_SIZE * 4;
++ mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
+ break;
+ case CHIP_HAINAN:
+@@ -1609,7 +1614,7 @@ static int si_init_microcode(struct radeon_device *rdev)
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+- mc_req_size = OLAND_MC_UCODE_SIZE * 4;
++ mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
+ break;
+ default: BUG();
+@@ -1662,16 +1667,22 @@ static int si_init_microcode(struct radeon_device *rdev)
+ err = -EINVAL;
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+- if (err)
+- goto out;
+- if (rdev->mc_fw->size != mc_req_size) {
++ if (err) {
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
++ if (err)
++ goto out;
++ }
++ if ((rdev->mc_fw->size != mc_req_size) &&
++ (rdev->mc_fw->size != mc2_req_size)) {
+ printk(KERN_ERR
+ "si_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
++ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+@@ -5720,7 +5731,6 @@ int si_irq_set(struct radeon_device *rdev)
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 grbm_int_cntl = 0;
+- u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 dma_cntl, dma_cntl1;
+ u32 thermal_int = 0;
+
+@@ -5859,16 +5869,22 @@ int si_irq_set(struct radeon_device *rdev)
+ }
+
+ if (rdev->num_crtc >= 2) {
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
+ }
+ if (rdev->num_crtc >= 4) {
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
+ }
+ if (rdev->num_crtc >= 6) {
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ GRPH_PFLIP_INT_MASK);
+ }
+
+ if (!ASIC_IS_NODCE(rdev)) {
+@@ -6232,6 +6248,15 @@ restart_ih:
+ break;
+ }
+ break;
++ case 8: /* D1 page flip */
++ case 10: /* D2 page flip */
++ case 12: /* D3 page flip */
++ case 14: /* D4 page flip */
++ case 16: /* D5 page flip */
++ case 18: /* D6 page flip */
++ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
++ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
++ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
+index 7266805d9786..f680f5ffbdeb 100644
+--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
++++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
+@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev)
+ int r;
+
+ /* raise clocks while booting up the VCPU */
+- radeon_set_uvd_clocks(rdev, 53300, 40000);
++ if (rdev->family < CHIP_RV740)
++ radeon_set_uvd_clocks(rdev, 10000, 10000);
++ else
++ radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+ r = uvd_v1_0_start(rdev);
+ if (r)
+@@ -405,7 +408,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+ struct radeon_fence *fence = NULL;
+ int r;
+
+- r = radeon_set_uvd_clocks(rdev, 53300, 40000);
++ if (rdev->family < CHIP_RV740)
++ r = radeon_set_uvd_clocks(rdev, 10000, 10000);
++ else
++ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (r) {
+ DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+ return r;
+diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
+index b592eef1efcb..b083509325e4 100644
+--- a/drivers/gpu/host1x/hw/intr_hw.c
++++ b/drivers/gpu/host1x/hw/intr_hw.c
+@@ -48,7 +48,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
+ unsigned long reg;
+ int i, id;
+
+- for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
++ for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
+ reg = host1x_sync_readl(host,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+ for_each_set_bit(id, &reg, BITS_PER_LONG) {
+@@ -65,7 +65,7 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
+ {
+ u32 i;
+
+- for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
++ for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
+ host1x_sync_writel(host, 0xffffffffu,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
+ host1x_sync_writel(host, 0xffffffffu,
+diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
+index 142e1cb8dea7..361f50b221bd 100644
+--- a/drivers/hwmon/emc1403.c
++++ b/drivers/hwmon/emc1403.c
+@@ -162,7 +162,7 @@ static ssize_t store_hyst(struct device *dev,
+ if (retval < 0)
+ goto fail;
+
+- hyst = val - retval * 1000;
++ hyst = retval * 1000 - val;
+ hyst = DIV_ROUND_CLOSEST(hyst, 1000);
+ if (hyst < 0 || hyst > 255) {
+ retval = -ERANGE;
+@@ -295,7 +295,7 @@ static int emc1403_detect(struct i2c_client *client,
+ }
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
+- if (id != 0x01)
++ if (id < 0x01 || id > 0x04)
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
+index 5888feef1ac5..a4dd9bdf737b 100644
+--- a/drivers/i2c/busses/i2c-designware-core.c
++++ b/drivers/i2c/busses/i2c-designware-core.c
+@@ -418,6 +418,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+ */
+ dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
+
++ /* enforce disabled interrupts (due to HW issues) */
++ i2c_dw_disable_int(dev);
++
+ /* Enable the adapter */
+ __i2c_dw_enable(dev, true);
+
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index d2fe11da5e82..c8a42602205b 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -560,6 +560,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
+
+ ret = -EINVAL;
+ for (i = 0; i < num; i++) {
++ /* This HW can't send STOP after address phase */
++ if (msgs[i].len == 0) {
++ ret = -EOPNOTSUPP;
++ break;
++ }
++
+ /*-------------- spin lock -----------------*/
+ spin_lock_irqsave(&priv->lock, flags);
+
+@@ -624,7 +630,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
+
+ static u32 rcar_i2c_func(struct i2c_adapter *adap)
+ {
+- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++ /* This HW can't do SMBUS_QUICK and NOSTART */
++ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ }
+
+ static const struct i2c_algorithm rcar_i2c_algo = {
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index 3747b9bf67d6..f7d572363f6c 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -1200,10 +1200,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+
+- i2c->suspended = 0;
+ clk_prepare_enable(i2c->clk);
+ s3c24xx_i2c_init(i2c);
+ clk_disable_unprepare(i2c->clk);
++ i2c->suspended = 0;
+
+ return 0;
+ }
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index df7f1e1157ae..27a91768cc72 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -661,6 +661,7 @@ static int inv_mpu_probe(struct i2c_client *client,
+ {
+ struct inv_mpu6050_state *st;
+ struct iio_dev *indio_dev;
++ struct inv_mpu6050_platform_data *pdata;
+ int result;
+
+ if (!i2c_check_functionality(client->adapter,
+@@ -673,8 +674,10 @@ static int inv_mpu_probe(struct i2c_client *client,
+
+ st = iio_priv(indio_dev);
+ st->client = client;
+- st->plat_data = *(struct inv_mpu6050_platform_data
+- *)dev_get_platdata(&client->dev);
++ pdata = (struct inv_mpu6050_platform_data
++ *)dev_get_platdata(&client->dev);
++ if (pdata)
++ st->plat_data = *pdata;
+ /* power is turned on inside check chip type*/
+ result = inv_check_and_setup_chip(st, id);
+ if (result)
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 02e4d2efa208..e6737607a088 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -27,6 +27,7 @@
+ #include <target/target_core_base.h>
+ #include <target/target_core_fabric.h>
+ #include <target/iscsi/iscsi_transport.h>
++#include <linux/semaphore.h>
+
+ #include "isert_proto.h"
+ #include "ib_isert.h"
+@@ -572,11 +573,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ goto out_conn_dev;
+
+ mutex_lock(&isert_np->np_accept_mutex);
+- list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
++ list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
+ mutex_unlock(&isert_np->np_accept_mutex);
+
+- pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
+- wake_up(&isert_np->np_accept_wq);
++ pr_debug("isert_connect_request() up np_sem np: %p\n", np);
++ up(&isert_np->np_sem);
+ return 0;
+
+ out_conn_dev:
+@@ -2477,7 +2478,7 @@ isert_setup_np(struct iscsi_np *np,
+ pr_err("Unable to allocate struct isert_np\n");
+ return -ENOMEM;
+ }
+- init_waitqueue_head(&isert_np->np_accept_wq);
++ sema_init(&isert_np->np_sem, 0);
+ mutex_init(&isert_np->np_accept_mutex);
+ INIT_LIST_HEAD(&isert_np->np_accept_list);
+ init_completion(&isert_np->np_login_comp);
+@@ -2526,18 +2527,6 @@ out:
+ }
+
+ static int
+-isert_check_accept_queue(struct isert_np *isert_np)
+-{
+- int empty;
+-
+- mutex_lock(&isert_np->np_accept_mutex);
+- empty = list_empty(&isert_np->np_accept_list);
+- mutex_unlock(&isert_np->np_accept_mutex);
+-
+- return empty;
+-}
+-
+-static int
+ isert_rdma_accept(struct isert_conn *isert_conn)
+ {
+ struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
+@@ -2629,16 +2618,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+ int max_accept = 0, ret;
+
+ accept_wait:
+- ret = wait_event_interruptible(isert_np->np_accept_wq,
+- !isert_check_accept_queue(isert_np) ||
+- np->np_thread_state == ISCSI_NP_THREAD_RESET);
++ ret = down_interruptible(&isert_np->np_sem);
+ if (max_accept > 5)
+ return -ENODEV;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+- pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
++ pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+ return -ENODEV;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 52f4bf0d1a0f..ba695c33a2df 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -154,7 +154,7 @@ struct isert_device {
+ };
+
+ struct isert_np {
+- wait_queue_head_t np_accept_wq;
++ struct semaphore np_sem;
+ struct rdma_cm_id *np_cm_id;
+ struct mutex np_accept_mutex;
+ struct list_head np_accept_list;
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index 2626773ff29b..2dd1d0dd4f7d 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
+ static void *atkbd_platform_fixup_data;
+ static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
+
++/*
++ * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding
++ * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed.
++ */
++static bool atkbd_skip_deactivate;
++
+ static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
+ ssize_t (*handler)(struct atkbd *, char *));
+ static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
+@@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd)
+ * Make sure nothing is coming from the keyboard and disturbs our
+ * internal state.
+ */
+- atkbd_deactivate(atkbd);
++ if (!atkbd_skip_deactivate)
++ atkbd_deactivate(atkbd);
+
+ return 0;
+ }
+@@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
+ return 1;
+ }
+
++static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
++{
++ atkbd_skip_deactivate = true;
++ return 1;
++}
++
+ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
+ {
+ .matches = {
+@@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
+ .callback = atkbd_setup_scancode_fixup,
+ .driver_data = atkbd_oqo_01plus_scancode_fixup,
+ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
++ },
++ .callback = atkbd_deactivate_fixup,
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
++ },
++ .callback = atkbd_deactivate_fixup,
++ },
+ { }
+ };
+
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index ef1cf52f8bb9..230cdcf8e6fe 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -11,6 +11,7 @@
+ */
+
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/input.h>
+@@ -831,7 +832,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+ break;
+
+ case 3:
+- etd->reg_10 = 0x0b;
++ if (etd->set_hw_resolution)
++ etd->reg_10 = 0x0b;
++ else
++ etd->reg_10 = 0x03;
++
+ if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
+ rc = -1;
+
+@@ -1331,6 +1336,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
+ }
+
+ /*
++ * Some hw_version 3 models go into error state when we try to set bit 3 of r10
++ */
++static const struct dmi_system_id no_hw_res_dmi_table[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++ {
++ /* Gigabyte U2442 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
++ },
++ },
++#endif
++ { }
++};
++
++/*
+ * determine hardware version and set some properties according to it.
+ */
+ static int elantech_set_properties(struct elantech_data *etd)
+@@ -1389,6 +1410,9 @@ static int elantech_set_properties(struct elantech_data *etd)
+ */
+ etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
+
++ /* Enable real hardware resolution on hw_version 3 ? */
++ etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
++
+ return 0;
+ }
+
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 036a04abaef7..9e0e2a1f340d 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -130,6 +130,7 @@ struct elantech_data {
+ bool jumpy_cursor;
+ bool reports_pressure;
+ bool crc_enabled;
++ bool set_hw_resolution;
+ unsigned char hw_version;
+ unsigned int fw_version;
+ unsigned int single_finger_reports;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 3c511c4adaca..f6fbba53f5d5 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1552,7 +1552,7 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+ },
+- .driver_data = (int []){1024, 5056, 2058, 4832},
++ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo ThinkPad L540 */
+@@ -1563,6 +1563,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
++ /* Lenovo ThinkPad W540 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W540"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
+ /* Lenovo Yoga S1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 5d2edb4b60aa..22f656e125dd 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
+ iommu_flush_dte(iommu, devid);
+ if (devid != alias) {
+ irq_lookup_table[alias] = table;
+- set_dte_irq_entry(devid, table);
++ set_dte_irq_entry(alias, table);
+ iommu_flush_dte(iommu, alias);
+ }
+
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index d0e948084eaf..86df97f6fd27 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+ {
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+- unsigned int shift = (gic_irq(d) % 4) * 8;
+- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
++ unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
+ u32 val, mask, bit;
+
++ if (!force)
++ cpu = cpumask_any_and(mask_val, cpu_online_mask);
++ else
++ cpu = cpumask_first(mask_val);
++
+ if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index bb6f94898541..305e88a6f625 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -82,6 +82,15 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
+ (sizeof(struct led_pwm_data) * num_leds);
+ }
+
++static void led_pwm_cleanup(struct led_pwm_priv *priv)
++{
++ while (priv->num_leds--) {
++ led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
++ if (priv->leds[priv->num_leds].can_sleep)
++ cancel_work_sync(&priv->leds[priv->num_leds].work);
++ }
++}
++
+ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
+ {
+ struct device_node *node = pdev->dev.of_node;
+@@ -139,8 +148,7 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
+
+ return priv;
+ err:
+- while (priv->num_leds--)
+- led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
++ led_pwm_cleanup(priv);
+
+ return NULL;
+ }
+@@ -200,8 +208,8 @@ static int led_pwm_probe(struct platform_device *pdev)
+ return 0;
+
+ err:
+- while (i--)
+- led_classdev_unregister(&priv->leds[i].cdev);
++ priv->num_leds = i;
++ led_pwm_cleanup(priv);
+
+ return ret;
+ }
+@@ -209,13 +217,8 @@ err:
+ static int led_pwm_remove(struct platform_device *pdev)
+ {
+ struct led_pwm_priv *priv = platform_get_drvdata(pdev);
+- int i;
+
+- for (i = 0; i < priv->num_leds; i++) {
+- led_classdev_unregister(&priv->leds[i].cdev);
+- if (priv->leds[i].can_sleep)
+- cancel_work_sync(&priv->leds[i].work);
+- }
++ led_pwm_cleanup(priv);
+
+ return 0;
+ }
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 0fce0bc1a957..c513e5e4cde6 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -18,7 +18,6 @@
+ #include <linux/crypto.h>
+ #include <linux/workqueue.h>
+ #include <linux/backing-dev.h>
+-#include <linux/percpu.h>
+ #include <linux/atomic.h>
+ #include <linux/scatterlist.h>
+ #include <asm/page.h>
+@@ -44,6 +43,7 @@ struct convert_context {
+ unsigned int idx_out;
+ sector_t cc_sector;
+ atomic_t cc_pending;
++ struct ablkcipher_request *req;
+ };
+
+ /*
+@@ -105,15 +105,7 @@ struct iv_lmk_private {
+ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
+
+ /*
+- * Duplicated per-CPU state for cipher.
+- */
+-struct crypt_cpu {
+- struct ablkcipher_request *req;
+-};
+-
+-/*
+- * The fields in here must be read only after initialization,
+- * changing state should be in crypt_cpu.
++ * The fields in here must be read only after initialization.
+ */
+ struct crypt_config {
+ struct dm_dev *dev;
+@@ -143,12 +135,6 @@ struct crypt_config {
+ sector_t iv_offset;
+ unsigned int iv_size;
+
+- /*
+- * Duplicated per cpu state. Access through
+- * per_cpu_ptr() only.
+- */
+- struct crypt_cpu __percpu *cpu;
+-
+ /* ESSIV: struct crypto_cipher *essiv_tfm */
+ void *iv_private;
+ struct crypto_ablkcipher **tfms;
+@@ -184,11 +170,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
+ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
+ static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
+
+-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
+-{
+- return this_cpu_ptr(cc->cpu);
+-}
+-
+ /*
+ * Use this to access cipher attributes that are the same for each CPU.
+ */
+@@ -738,16 +719,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ static void crypt_alloc_req(struct crypt_config *cc,
+ struct convert_context *ctx)
+ {
+- struct crypt_cpu *this_cc = this_crypt_config(cc);
+ unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
+
+- if (!this_cc->req)
+- this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
++ if (!ctx->req)
++ ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+
+- ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
+- ablkcipher_request_set_callback(this_cc->req,
++ ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
++ ablkcipher_request_set_callback(ctx->req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+- kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
++ kcryptd_async_done, dmreq_of_req(cc, ctx->req));
+ }
+
+ /*
+@@ -756,7 +736,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
+ static int crypt_convert(struct crypt_config *cc,
+ struct convert_context *ctx)
+ {
+- struct crypt_cpu *this_cc = this_crypt_config(cc);
+ int r;
+
+ atomic_set(&ctx->cc_pending, 1);
+@@ -768,7 +747,7 @@ static int crypt_convert(struct crypt_config *cc,
+
+ atomic_inc(&ctx->cc_pending);
+
+- r = crypt_convert_block(cc, ctx, this_cc->req);
++ r = crypt_convert_block(cc, ctx, ctx->req);
+
+ switch (r) {
+ /* async */
+@@ -777,7 +756,7 @@ static int crypt_convert(struct crypt_config *cc,
+ INIT_COMPLETION(ctx->restart);
+ /* fall through*/
+ case -EINPROGRESS:
+- this_cc->req = NULL;
++ ctx->req = NULL;
+ ctx->cc_sector++;
+ continue;
+
+@@ -876,6 +855,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
+ io->sector = sector;
+ io->error = 0;
+ io->base_io = NULL;
++ io->ctx.req = NULL;
+ atomic_set(&io->io_pending, 0);
+
+ return io;
+@@ -901,6 +881,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
+ if (!atomic_dec_and_test(&io->io_pending))
+ return;
+
++ if (io->ctx.req)
++ mempool_free(io->ctx.req, cc->req_pool);
+ mempool_free(io, cc->io_pool);
+
+ if (likely(!base_io))
+@@ -1326,8 +1308,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
+ static void crypt_dtr(struct dm_target *ti)
+ {
+ struct crypt_config *cc = ti->private;
+- struct crypt_cpu *cpu_cc;
+- int cpu;
+
+ ti->private = NULL;
+
+@@ -1339,13 +1319,6 @@ static void crypt_dtr(struct dm_target *ti)
+ if (cc->crypt_queue)
+ destroy_workqueue(cc->crypt_queue);
+
+- if (cc->cpu)
+- for_each_possible_cpu(cpu) {
+- cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+- if (cpu_cc->req)
+- mempool_free(cpu_cc->req, cc->req_pool);
+- }
+-
+ crypt_free_tfms(cc);
+
+ if (cc->bs)
+@@ -1364,9 +1337,6 @@ static void crypt_dtr(struct dm_target *ti)
+ if (cc->dev)
+ dm_put_device(ti, cc->dev);
+
+- if (cc->cpu)
+- free_percpu(cc->cpu);
+-
+ kzfree(cc->cipher);
+ kzfree(cc->cipher_string);
+
+@@ -1421,13 +1391,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
+ if (tmp)
+ DMWARN("Ignoring unexpected additional cipher options");
+
+- cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
+- __alignof__(struct crypt_cpu));
+- if (!cc->cpu) {
+- ti->error = "Cannot allocate per cpu state";
+- goto bad_mem;
+- }
+-
+ /*
+ * For compatibility with the original dm-crypt mapping format, if
+ * only the cipher name is supplied, use cbc-plain.
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 015bc455cf1c..0ed6daf3b1e4 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8520,7 +8520,8 @@ static int md_notify_reboot(struct notifier_block *this,
+ if (mddev_trylock(mddev)) {
+ if (mddev->pers)
+ __md_stop_writes(mddev);
+- mddev->safemode = 2;
++ if (mddev->persistent)
++ mddev->safemode = 2;
+ mddev_unlock(mddev);
+ }
+ need_delay = 1;
+diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
+index e8a1ce204036..cdd7c1b7259b 100644
+--- a/drivers/media/i2c/ov7670.c
++++ b/drivers/media/i2c/ov7670.c
+@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
+ * windows that fall outside that.
+ */
+ for (i = 0; i < n_win_sizes; i++) {
+- struct ov7670_win_size *win = &info->devtype->win_sizes[index];
++ struct ov7670_win_size *win = &info->devtype->win_sizes[i];
+ if (info->min_width && win->width < info->min_width)
+ continue;
+ if (info->min_height && win->height < info->min_height)
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index d5a7a135f75d..703560fa5e73 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
+ struct media_entity *ent;
+ struct media_entity_desc u_ent;
+
++ memset(&u_ent, 0, sizeof(u_ent));
+ if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
+ return -EFAULT;
+
+diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
+index 3aecaf465094..f0c9c42867de 100644
+--- a/drivers/media/tuners/fc2580.c
++++ b/drivers/media/tuners/fc2580.c
+@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
+
+ f_ref = 2UL * priv->cfg->clock / r_val;
+ n_val = div_u64_rem(f_vco, f_ref, &k_val);
+- k_val_reg = 1UL * k_val * (1 << 20) / f_ref;
++ k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
+
+ ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
+ if (ret < 0)
+@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
+ if (ret < 0)
+ goto err;
+
+- ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \
+- fc2580_if_filter_lut[i].mul / 1000000000);
++ ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
++ fc2580_if_filter_lut[i].mul, 1000000000));
+ if (ret < 0)
+ goto err;
+
+diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h
+index be38a9e637e0..646c99452136 100644
+--- a/drivers/media/tuners/fc2580_priv.h
++++ b/drivers/media/tuners/fc2580_priv.h
+@@ -22,6 +22,7 @@
+ #define FC2580_PRIV_H
+
+ #include "fc2580.h"
++#include <linux/math64.h>
+
+ struct fc2580_reg_val {
+ u8 reg;
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index b63a5e584aa0..fca336b65351 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
+
+ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+ {
++ if (get_user(kp->type, &up->type))
++ return -EFAULT;
++
+ switch (kp->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
+
+ static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+ {
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
+- get_user(kp->type, &up->type))
+- return -EFAULT;
++ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
++ return -EFAULT;
+ return __get_v4l2_format32(kp, up);
+ }
+
+ static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+ {
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
+- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
+- return -EFAULT;
++ copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
++ return -EFAULT;
+ return __get_v4l2_format32(&kp->format, &up->format);
+ }
+
+diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
+index 110c03627051..b59a17fb7c3e 100644
+--- a/drivers/memory/mvebu-devbus.c
++++ b/drivers/memory/mvebu-devbus.c
+@@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus,
+ node->full_name);
+ return err;
+ }
+- /* Convert bit width to byte width */
+- r.bus_width /= 8;
++
++ /*
++ * The bus width is encoded into the register as 0 for 8 bits,
++ * and 1 for 16 bits, so we do the necessary conversion here.
++ */
++ if (r.bus_width == 8)
++ r.bus_width = 0;
++ else if (r.bus_width == 16)
++ r.bus_width = 1;
++ else {
++ dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
++ return -EINVAL;
++ }
+
+ err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
+ &r.badr_skew);
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index f8cff1f0b6b7..2b724fc4e306 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -623,20 +623,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
+ bss_conf->bssid);
+
+ /*
+- * Update the beacon. This is only required on USB devices. PCI
+- * devices fetch beacons periodically.
+- */
+- if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
+- rt2x00queue_update_beacon(rt2x00dev, vif);
+-
+- /*
+ * Start/stop beaconing.
+ */
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon && intf->enable_beacon) {
+- rt2x00queue_clear_beacon(rt2x00dev, vif);
+ rt2x00dev->intf_beaconing--;
+ intf->enable_beacon = false;
++ /*
++ * Clear beacon in the H/W for this vif. This is needed
++ * to disable beaconing on this particular interface
++ * and keep it running on other interfaces.
++ */
++ rt2x00queue_clear_beacon(rt2x00dev, vif);
+
+ if (rt2x00dev->intf_beaconing == 0) {
+ /*
+@@ -647,11 +645,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
+ rt2x00queue_stop_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+-
+-
+ } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
+ rt2x00dev->intf_beaconing++;
+ intf->enable_beacon = true;
++ /*
++ * Upload beacon to the H/W. This is only required on
++ * USB devices. PCI devices fetch beacons periodically.
++ */
++ if (rt2x00_is_usb(rt2x00dev))
++ rt2x00queue_update_beacon(rt2x00dev, vif);
+
+ if (rt2x00dev->intf_beaconing == 1) {
+ /*
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+index 324aa581938e..c3f2b55501ae 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+@@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ err = _rtl92cu_init_mac(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
+- return err;
++ goto exit;
+ }
+ err = rtl92c_download_fw(hw);
+ if (err) {
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index b4214cba58b7..fdd81f24a9cf 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -297,7 +297,7 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+ port->iowin_base = port->pcie->io.start + iobase;
+ port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
+ (port->bridge.iolimitupper << 16)) -
+- iobase);
++ iobase) + 1;
+
+ mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
+ port->iowin_base, port->iowin_size,
+@@ -331,7 +331,7 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+ port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
+ port->memwin_size =
+ (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+- port->memwin_base;
++ port->memwin_base + 1;
+
+ mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
+ port->memwin_base, port->memwin_size);
+diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
+index 58499277903a..6efc2ec5e4db 100644
+--- a/drivers/pci/hotplug/shpchp_ctrl.c
++++ b/drivers/pci/hotplug/shpchp_ctrl.c
+@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
+ return WRONG_BUS_FREQUENCY;
+ }
+
+- bsp = ctrl->pci_dev->bus->cur_bus_speed;
+- msp = ctrl->pci_dev->bus->max_bus_speed;
++ bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
++ msp = ctrl->pci_dev->subordinate->max_bus_speed;
+
+ /* Check if there are other slots or devices on the same bus */
+ if (!list_empty(&ctrl->pci_dev->subordinate->devices))
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 9e039c60c068..d254477372b9 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1370,7 +1370,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
+ */
+ int spi_setup(struct spi_device *spi)
+ {
+- unsigned bad_bits;
++ unsigned bad_bits, ugly_bits;
+ int status = 0;
+
+ /* check mode to prevent that DUAL and QUAD set at the same time
+@@ -1390,6 +1390,15 @@ int spi_setup(struct spi_device *spi)
+ * that aren't supported with their current master
+ */
+ bad_bits = spi->mode & ~spi->master->mode_bits;
++ ugly_bits = bad_bits &
++ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
++ if (ugly_bits) {
++ dev_warn(&spi->dev,
++ "setup: ignoring unsupported mode bits %x\n",
++ ugly_bits);
++ spi->mode &= ~ugly_bits;
++ bad_bits &= ~ugly_bits;
++ }
+ if (bad_bits) {
+ dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
+ bad_bits);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index b47c2be1c427..004e484a71cd 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1579,7 +1579,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ * Initiator is expecting a NopIN ping reply..
+ */
+ if (hdr->itt != RESERVED_ITT) {
+- BUG_ON(!cmd);
++ if (!cmd)
++ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
++ (unsigned char *)hdr);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index e5e39658034c..e31ec5cf0c36 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -799,10 +799,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
+ pr_err("emulate_write_cache not supported for pSCSI\n");
+ return -EINVAL;
+ }
+- if (dev->transport->get_write_cache) {
+- pr_warn("emulate_write_cache cannot be changed when underlying"
+- " HW reports WriteCacheEnabled, ignoring request\n");
+- return 0;
++ if (flag &&
++ dev->transport->get_write_cache) {
++ pr_err("emulate_write_cache not supported for this device\n");
++ return -EINVAL;
+ }
+
+ dev->dev_attrib.emulate_write_cache = flag;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 0b0009b5d4db..179141e03cb3 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1103,6 +1103,7 @@ void transport_init_se_cmd(
+ init_completion(&cmd->cmd_wait_comp);
+ init_completion(&cmd->task_stop_comp);
+ spin_lock_init(&cmd->t_state_lock);
++ kref_init(&cmd->cmd_kref);
+ cmd->transport_state = CMD_T_DEV_ACTIVE;
+
+ cmd->se_tfo = tfo;
+@@ -2293,7 +2294,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+ unsigned long flags;
+ int ret = 0;
+
+- kref_init(&se_cmd->cmd_kref);
+ /*
+ * Add a second kref if the fabric caller is expecting to handle
+ * fabric acknowledgement that requires two target_put_sess_cmd()
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index aa6db8f4ee18..bf9d2ac9c9ed 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -1520,7 +1520,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ status = serial8250_rx_chars(up, status);
+ }
+ serial8250_modem_status(up);
+- if (status & UART_LSR_THRE)
++ if (!up->dma && (status & UART_LSR_THRE))
+ serial8250_tx_chars(up);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
+index 7046769608d4..ab9096dc3849 100644
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -20,12 +20,15 @@ static void __dma_tx_complete(void *param)
+ struct uart_8250_port *p = param;
+ struct uart_8250_dma *dma = p->dma;
+ struct circ_buf *xmit = &p->port.state->xmit;
+-
+- dma->tx_running = 0;
++ unsigned long flags;
+
+ dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
++ spin_lock_irqsave(&p->port.lock, flags);
++
++ dma->tx_running = 0;
++
+ xmit->tail += dma->tx_size;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+ p->port.icount.tx += dma->tx_size;
+@@ -35,6 +38,8 @@ static void __dma_tx_complete(void *param)
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port))
+ serial8250_tx_dma(p);
++
++ spin_unlock_irqrestore(&p->port.lock, flags);
+ }
+
+ static void __dma_rx_complete(void *param)
+diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
+index 4cc4fd6d1473..dfd29438a11e 100644
+--- a/drivers/usb/gadget/at91_udc.c
++++ b/drivers/usb/gadget/at91_udc.c
+@@ -1710,16 +1710,6 @@ static int at91udc_probe(struct platform_device *pdev)
+ return -ENODEV;
+ }
+
+- if (pdev->num_resources != 2) {
+- DBG("invalid num_resources\n");
+- return -ENODEV;
+- }
+- if ((pdev->resource[0].flags != IORESOURCE_MEM)
+- || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
+- DBG("invalid resource type\n");
+- return -ENODEV;
+- }
+-
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
+index f2407b2e8a99..ce65c4ec6550 100644
+--- a/drivers/usb/host/ehci-fsl.c
++++ b/drivers/usb/host/ehci-fsl.c
+@@ -261,7 +261,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
+ break;
+ }
+
+- if (pdata->have_sysif_regs && pdata->controller_ver &&
++ if (pdata->have_sysif_regs &&
++ pdata->controller_ver > FSL_USB_VER_1_6 &&
+ (phy_mode == FSL_USB2_PHY_ULPI)) {
+ /* check PHY_CLK_VALID to get phy clk valid */
+ if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
+diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
+index 2347ab83f046..dcf570862502 100644
+--- a/drivers/usb/host/ohci-hub.c
++++ b/drivers/usb/host/ohci-hub.c
+@@ -90,6 +90,24 @@ __acquires(ohci->lock)
+ dl_done_list (ohci);
+ finish_unlinks (ohci, ohci_frame_no(ohci));
+
++ /*
++ * Some controllers don't handle "global" suspend properly if
++ * there are unsuspended ports. For these controllers, put all
++ * the enabled ports into suspend before suspending the root hub.
++ */
++ if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
++ __hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
++ int i;
++ unsigned temp;
++
++ for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
++ temp = ohci_readl(ohci, portstat);
++ if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
++ RH_PS_PES)
++ ohci_writel(ohci, RH_PS_PSS, portstat);
++ }
++ }
++
+ /* maybe resume can wake root hub */
+ if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
+ ohci->hc_control |= OHCI_CTRL_RWE;
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index 659cde1ed1ea..fd9f77bcd499 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -160,6 +160,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
+ ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
+ }
+
++ ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
+ return 0;
+ }
+
+diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
+index e2e5faa5a402..0b2e58c2dfef 100644
+--- a/drivers/usb/host/ohci.h
++++ b/drivers/usb/host/ohci.h
+@@ -405,6 +405,8 @@ struct ohci_hcd {
+ #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
+ #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
+ #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
++#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
++
+ // there are also chip quirks/bugs in init logic
+
+ struct work_struct nec_work; /* Worker for NEC quirk */
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 7ed681a714a5..6c0a542e8ec1 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -151,6 +151,21 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
++ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
+index 4ef2a80728f7..008d805c3d21 100644
+--- a/drivers/usb/storage/shuttle_usbat.c
++++ b/drivers/usb/storage/shuttle_usbat.c
+@@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf,
+ us->transport_name = "Shuttle USBAT";
+ us->transport = usbat_flash_transport;
+ us->transport_reset = usb_stor_CB_reset;
+- us->max_lun = 1;
++ us->max_lun = 0;
+
+ result = usb_stor_probe2(us);
+ return result;
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index adbeb255616a..042c83b01046 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -234,6 +234,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
++/* Reported by Daniele Forsi <dforsi@gmail.com> */
++UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
++ "Nokia",
++ "5300",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64 ),
++
++/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
++UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
++ "Nokia",
++ "305",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64),
++
+ /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
+ UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
+ "Nokia",
+diff --git a/fs/aio.c b/fs/aio.c
+index 12a3de0ee6da..04cd7686555d 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1299,10 +1299,8 @@ rw_common:
+ &iovec, compat)
+ : aio_setup_single_vector(req, rw, buf, &nr_segs,
+ iovec);
+- if (ret)
+- return ret;
+-
+- ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
++ if (!ret)
++ ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+ if (ret < 0) {
+ if (iovec != &inline_vec)
+ kfree(iovec);
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index 92ef341ba0cf..2a69bde8c61d 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
+ spin_lock(&active->d_lock);
+
+ /* Already gone? */
+- if (!d_count(active))
++ if ((int) d_count(active) <= 0)
+ goto next;
+
+ qstr = &active->d_name;
+@@ -230,7 +230,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
+
+ spin_lock(&expiring->d_lock);
+
+- /* Bad luck, we've already been dentry_iput */
++ /* We've already been dentry_iput or unlinked */
+ if (!expiring->d_inode)
+ goto next;
+
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 9bdeca12ae0e..02db009d1531 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -74,10 +74,15 @@ static int expand_corename(struct core_name *cn, int size)
+ static int cn_vprintf(struct core_name *cn, const char *fmt, va_list arg)
+ {
+ int free, need;
++ va_list arg_copy;
+
+ again:
+ free = cn->size - cn->used;
+- need = vsnprintf(cn->corename + cn->used, free, fmt, arg);
++
++ va_copy(arg_copy, arg);
++ need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
++ va_end(arg_copy);
++
+ if (need < free) {
+ cn->used += need;
+ return 0;
+diff --git a/fs/exec.c b/fs/exec.c
+index bb8afc1d1df4..95eef54de2b6 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -657,10 +657,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ unsigned long rlim_stack;
+
+ #ifdef CONFIG_STACK_GROWSUP
+- /* Limit stack size to 1GB */
++ /* Limit stack size */
+ stack_base = rlimit_max(RLIMIT_STACK);
+- if (stack_base > (1 << 30))
+- stack_base = 1 << 30;
++ if (stack_base > STACK_SIZE_MAX)
++ stack_base = STACK_SIZE_MAX;
+
+ /* Make sure we didn't let the argument array grow too large. */
+ if (vma->vm_end - vma->vm_start > stack_base)
+diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
+index 8a50b3c18093..e15bcbd5043c 100644
+--- a/fs/nfsd/nfs4acl.c
++++ b/fs/nfsd/nfs4acl.c
+@@ -385,8 +385,10 @@ sort_pacl(struct posix_acl *pacl)
+ * by uid/gid. */
+ int i, j;
+
+- if (pacl->a_count <= 4)
+- return; /* no users or groups */
++ /* no users or groups */
++ if (!pacl || pacl->a_count <= 4)
++ return;
++
+ i = 1;
+ while (pacl->a_entries[i].e_tag == ACL_USER)
+ i++;
+@@ -513,13 +515,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
+
+ /*
+ * ACLs with no ACEs are treated differently in the inheritable
+- * and effective cases: when there are no inheritable ACEs, we
+- * set a zero-length default posix acl:
++ * and effective cases: when there are no inheritable ACEs,
++ * calls ->set_acl with a NULL ACL structure.
+ */
+- if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
+- pacl = posix_acl_alloc(0, GFP_KERNEL);
+- return pacl ? pacl : ERR_PTR(-ENOMEM);
+- }
++ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
++ return NULL;
++
+ /*
+ * When there are no effective ACEs, the following will end
+ * up setting a 3-element effective posix ACL with all
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 5cbdf38ffc66..ded7af3c45e1 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1071,6 +1071,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
+ return NULL;
+ }
+ clp->cl_name.len = name.len;
++ INIT_LIST_HEAD(&clp->cl_sessions);
++ idr_init(&clp->cl_stateids);
++ atomic_set(&clp->cl_refcount, 0);
++ clp->cl_cb_state = NFSD4_CB_UNKNOWN;
++ INIT_LIST_HEAD(&clp->cl_idhash);
++ INIT_LIST_HEAD(&clp->cl_openowners);
++ INIT_LIST_HEAD(&clp->cl_delegations);
++ INIT_LIST_HEAD(&clp->cl_lru);
++ INIT_LIST_HEAD(&clp->cl_callbacks);
++ INIT_LIST_HEAD(&clp->cl_revoked);
++ spin_lock_init(&clp->cl_lock);
++ rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
+ return clp;
+ }
+
+@@ -1088,6 +1100,7 @@ free_client(struct nfs4_client *clp)
+ WARN_ON_ONCE(atomic_read(&ses->se_ref));
+ free_session(ses);
+ }
++ rpc_destroy_wait_queue(&clp->cl_cb_waitq);
+ free_svc_cred(&clp->cl_cred);
+ kfree(clp->cl_name.data);
+ idr_destroy(&clp->cl_stateids);
+@@ -1335,7 +1348,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
+ if (clp == NULL)
+ return NULL;
+
+- INIT_LIST_HEAD(&clp->cl_sessions);
+ ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
+ if (ret) {
+ spin_lock(&nn->client_lock);
+@@ -1343,20 +1355,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
+ spin_unlock(&nn->client_lock);
+ return NULL;
+ }
+- idr_init(&clp->cl_stateids);
+- atomic_set(&clp->cl_refcount, 0);
+- clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+- INIT_LIST_HEAD(&clp->cl_idhash);
+- INIT_LIST_HEAD(&clp->cl_openowners);
+- INIT_LIST_HEAD(&clp->cl_delegations);
+- INIT_LIST_HEAD(&clp->cl_lru);
+- INIT_LIST_HEAD(&clp->cl_callbacks);
+- INIT_LIST_HEAD(&clp->cl_revoked);
+- spin_lock_init(&clp->cl_lock);
+ nfsd4_init_callback(&clp->cl_cb_null);
+ clp->cl_time = get_seconds();
+ clear_bit(0, &clp->cl_cb_slot_busy);
+- rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
+ copy_verf(clp, verf);
+ rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
+ gen_confirm(clp);
+@@ -3695,9 +3696,16 @@ out:
+ static __be32
+ nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+- if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
++ struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
++
++ if (check_for_locks(stp->st_file, lo))
+ return nfserr_locks_held;
+- release_lock_stateid(stp);
++ /*
++ * Currently there's a 1-1 lock stateid<->lockowner
++ * correspondance, and we have to delete the lockowner when we
++ * delete the lock stateid:
++ */
++ unhash_lockowner(lo);
+ return nfs_ok;
+ }
+
+@@ -4141,6 +4149,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
+
+ if (!same_owner_str(&lo->lo_owner, owner, clid))
+ return false;
++ if (list_empty(&lo->lo_owner.so_stateids)) {
++ WARN_ON_ONCE(1);
++ return false;
++ }
+ lst = list_first_entry(&lo->lo_owner.so_stateids,
+ struct nfs4_ol_stateid, st_perstateowner);
+ return lst->st_file->fi_inode == inode;
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index 8bd2135b7f82..3542f1f814e2 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -158,6 +158,12 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
+ umode_t mode = 0;
+ int not_equiv = 0;
+
++ /*
++ * A null ACL can always be presented as mode bits.
++ */
++ if (!acl)
++ return 0;
++
+ FOREACH_ACL_ENTRY(pa, acl, pe) {
+ switch (pa->e_tag) {
+ case ACL_USER_OBJ:
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 9f15c0064c50..e68db4d534cb 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -524,6 +524,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
+ extern int ftrace_arch_read_dyn_info(char *buf, int size);
+
+ extern int skip_trace(unsigned long ip);
++extern void ftrace_module_init(struct module *mod);
+
+ extern void ftrace_disable_daemon(void);
+ extern void ftrace_enable_daemon(void);
+@@ -533,6 +534,7 @@ static inline int ftrace_force_update(void) { return 0; }
+ static inline void ftrace_disable_daemon(void) { }
+ static inline void ftrace_enable_daemon(void) { }
+ static inline void ftrace_release_mod(struct module *mod) {}
++static inline void ftrace_module_init(struct module *mod) {}
+ static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+ {
+ return -EINVAL;
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index 5cd0f0949927..998f4dfedecf 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -40,6 +40,7 @@ extern struct fs_struct init_fs;
+
+ #define INIT_SIGNALS(sig) { \
+ .nr_threads = 1, \
++ .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
+ .shared_pending = { \
+ .list = LIST_HEAD_INIT(sig.shared_pending.list), \
+@@ -213,6 +214,7 @@ extern struct task_group root_task_group;
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
+ }, \
+ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
++ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
+ INIT_IDS \
+ INIT_PERF_EVENTS(tsk) \
+ INIT_TRACE_IRQFLAGS \
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 5e865b554940..623ab2d787d9 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -198,7 +198,40 @@ static inline int check_wakeup_irqs(void) { return 0; }
+
+ extern cpumask_var_t irq_default_affinity;
+
+-extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
++/* Internal implementation. Use the helpers below */
++extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
++ bool force);
++
++/**
++ * irq_set_affinity - Set the irq affinity of a given irq
++ * @irq: Interrupt to set affinity
++ * @mask: cpumask
++ *
++ * Fails if cpumask does not contain an online CPU
++ */
++static inline int
++irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
++{
++ return __irq_set_affinity(irq, cpumask, false);
++}
++
++/**
++ * irq_force_affinity - Force the irq affinity of a given irq
++ * @irq: Interrupt to set affinity
++ * @mask: cpumask
++ *
++ * Same as irq_set_affinity, but without checking the mask against
++ * online cpus.
++ *
++ * Solely for low level cpu hotplug code, where we need to make per
++ * cpu interrupts affine before the cpu becomes online.
++ */
++static inline int
++irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
++{
++ return __irq_set_affinity(irq, cpumask, true);
++}
++
+ extern int irq_can_set_affinity(unsigned int irq);
+ extern int irq_select_affinity(unsigned int irq);
+
+@@ -234,6 +267,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
+ return -EINVAL;
+ }
+
++static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
++{
++ return 0;
++}
++
+ static inline int irq_can_set_affinity(unsigned int irq)
+ {
+ return 0;
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 56bb0dc8b7d4..896824eeacf1 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -380,7 +380,8 @@ extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
+
+ extern void irq_cpu_online(void);
+ extern void irq_cpu_offline(void);
+-extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
++extern int irq_set_affinity_locked(struct irq_data *data,
++ const struct cpumask *cpumask, bool force);
+
+ #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
+ void irq_move_irq(struct irq_data *data);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 8594b065d3a8..0827bec7d82f 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -470,6 +470,7 @@ struct signal_struct {
+ atomic_t sigcnt;
+ atomic_t live;
+ int nr_threads;
++ struct list_head thread_head;
+
+ wait_queue_head_t wait_chldexit; /* for wait4() */
+
+@@ -1148,6 +1149,7 @@ struct task_struct {
+ /* PID/PID hash table linkage. */
+ struct pid_link pids[PIDTYPE_MAX];
+ struct list_head thread_group;
++ struct list_head thread_node;
+
+ struct completion *vfork_done; /* for vfork() */
+ int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+@@ -2181,6 +2183,16 @@ extern bool current_is_single_threaded(void);
+ #define while_each_thread(g, t) \
+ while ((t = next_thread(t)) != g)
+
++#define __for_each_thread(signal, t) \
++ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
++
++#define for_each_thread(p, t) \
++ __for_each_thread((p)->signal, t)
++
++/* Careful: this is a double loop, 'break' won't work as expected. */
++#define for_each_process_thread(p, t) \
++ for_each_process(p) for_each_thread(p, t)
++
+ static inline int get_nr_threads(struct task_struct *tsk)
+ {
+ return tsk->signal->nr_threads;
+diff --git a/include/trace/events/module.h b/include/trace/events/module.h
+index 161932737416..ca298c7157ae 100644
+--- a/include/trace/events/module.h
++++ b/include/trace/events/module.h
+@@ -78,7 +78,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
+
+ TP_fast_assign(
+ __entry->ip = ip;
+- __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
++ __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
+ __assign_str(name, mod->name);
+ ),
+
+diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
+index 73bde4eaf16c..da106879b021 100644
+--- a/include/uapi/drm/tegra_drm.h
++++ b/include/uapi/drm/tegra_drm.h
+@@ -105,7 +105,6 @@ struct drm_tegra_submit {
+ __u32 num_waitchks;
+ __u32 waitchk_mask;
+ __u32 timeout;
+- __u32 pad;
+ __u64 syncpts;
+ __u64 cmdbufs;
+ __u64 relocs;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index dcde2c4b61d0..81b3d6789ee8 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -74,6 +74,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
+ __this_cpu_dec(process_counts);
+ }
+ list_del_rcu(&p->thread_group);
++ list_del_rcu(&p->thread_node);
+ }
+
+ /*
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 458953ca4d50..11a23afc6ee5 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1038,6 +1038,11 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+ sig->nr_threads = 1;
+ atomic_set(&sig->live, 1);
+ atomic_set(&sig->sigcnt, 1);
++
++ /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
++ sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
++ tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
++
+ init_waitqueue_head(&sig->wait_chldexit);
+ sig->curr_target = tsk;
+ init_sigpending(&sig->shared_pending);
+@@ -1476,6 +1481,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ atomic_inc(&current->signal->sigcnt);
+ list_add_tail_rcu(&p->thread_group,
+ &p->group_leader->thread_group);
++ list_add_tail_rcu(&p->thread_node,
++ &p->signal->thread_head);
+ }
+ attach_pid(p, PIDTYPE_PID);
+ nr_threads++;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index d8347b7a064f..f94695c9d38b 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -594,6 +594,55 @@ void exit_pi_state_list(struct task_struct *curr)
+ raw_spin_unlock_irq(&curr->pi_lock);
+ }
+
++/*
++ * We need to check the following states:
++ *
++ * Waiter | pi_state | pi->owner | uTID | uODIED | ?
++ *
++ * [1] NULL | --- | --- | 0 | 0/1 | Valid
++ * [2] NULL | --- | --- | >0 | 0/1 | Valid
++ *
++ * [3] Found | NULL | -- | Any | 0/1 | Invalid
++ *
++ * [4] Found | Found | NULL | 0 | 1 | Valid
++ * [5] Found | Found | NULL | >0 | 1 | Invalid
++ *
++ * [6] Found | Found | task | 0 | 1 | Valid
++ *
++ * [7] Found | Found | NULL | Any | 0 | Invalid
++ *
++ * [8] Found | Found | task | ==taskTID | 0/1 | Valid
++ * [9] Found | Found | task | 0 | 0 | Invalid
++ * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
++ *
++ * [1] Indicates that the kernel can acquire the futex atomically. We
++ * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
++ *
++ * [2] Valid, if TID does not belong to a kernel thread. If no matching
++ * thread is found then it indicates that the owner TID has died.
++ *
++ * [3] Invalid. The waiter is queued on a non PI futex
++ *
++ * [4] Valid state after exit_robust_list(), which sets the user space
++ * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
++ *
++ * [5] The user space value got manipulated between exit_robust_list()
++ * and exit_pi_state_list()
++ *
++ * [6] Valid state after exit_pi_state_list() which sets the new owner in
++ * the pi_state but cannot access the user space value.
++ *
++ * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
++ *
++ * [8] Owner and user space value match
++ *
++ * [9] There is no transient state which sets the user space TID to 0
++ * except exit_robust_list(), but this is indicated by the
++ * FUTEX_OWNER_DIED bit. See [4]
++ *
++ * [10] There is no transient state which leaves owner and user space
++ * TID out of sync.
++ */
+ static int
+ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ union futex_key *key, struct futex_pi_state **ps)
+@@ -609,12 +658,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ plist_for_each_entry_safe(this, next, head, list) {
+ if (match_futex(&this->key, key)) {
+ /*
+- * Another waiter already exists - bump up
+- * the refcount and return its pi_state:
++ * Sanity check the waiter before increasing
++ * the refcount and attaching to it.
+ */
+ pi_state = this->pi_state;
+ /*
+- * Userspace might have messed up non-PI and PI futexes
++ * Userspace might have messed up non-PI and
++ * PI futexes [3]
+ */
+ if (unlikely(!pi_state))
+ return -EINVAL;
+@@ -622,34 +672,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ WARN_ON(!atomic_read(&pi_state->refcount));
+
+ /*
+- * When pi_state->owner is NULL then the owner died
+- * and another waiter is on the fly. pi_state->owner
+- * is fixed up by the task which acquires
+- * pi_state->rt_mutex.
+- *
+- * We do not check for pid == 0 which can happen when
+- * the owner died and robust_list_exit() cleared the
+- * TID.
++ * Handle the owner died case:
+ */
+- if (pid && pi_state->owner) {
++ if (uval & FUTEX_OWNER_DIED) {
++ /*
++ * exit_pi_state_list sets owner to NULL and
++ * wakes the topmost waiter. The task which
++ * acquires the pi_state->rt_mutex will fixup
++ * owner.
++ */
++ if (!pi_state->owner) {
++ /*
++ * No pi state owner, but the user
++ * space TID is not 0. Inconsistent
++ * state. [5]
++ */
++ if (pid)
++ return -EINVAL;
++ /*
++ * Take a ref on the state and
++ * return. [4]
++ */
++ goto out_state;
++ }
++
+ /*
+- * Bail out if user space manipulated the
+- * futex value.
++ * If TID is 0, then either the dying owner
++ * has not yet executed exit_pi_state_list()
++ * or some waiter acquired the rtmutex in the
++ * pi state, but did not yet fixup the TID in
++ * user space.
++ *
++ * Take a ref on the state and return. [6]
+ */
+- if (pid != task_pid_vnr(pi_state->owner))
++ if (!pid)
++ goto out_state;
++ } else {
++ /*
++ * If the owner died bit is not set,
++ * then the pi_state must have an
++ * owner. [7]
++ */
++ if (!pi_state->owner)
+ return -EINVAL;
+ }
+
++ /*
++ * Bail out if user space manipulated the
++ * futex value. If pi state exists then the
++ * owner TID must be the same as the user
++ * space TID. [9/10]
++ */
++ if (pid != task_pid_vnr(pi_state->owner))
++ return -EINVAL;
++
++ out_state:
+ atomic_inc(&pi_state->refcount);
+ *ps = pi_state;
+-
+ return 0;
+ }
+ }
+
+ /*
+ * We are the first waiter - try to look up the real owner and attach
+- * the new pi_state to it, but bail out when TID = 0
++ * the new pi_state to it, but bail out when TID = 0 [1]
+ */
+ if (!pid)
+ return -ESRCH;
+@@ -657,6 +743,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ if (!p)
+ return -ESRCH;
+
++ if (!p->mm) {
++ put_task_struct(p);
++ return -EPERM;
++ }
++
+ /*
+ * We need to look at the task state flags to figure out,
+ * whether the task is exiting. To protect against the do_exit
+@@ -677,6 +768,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ return ret;
+ }
+
++ /*
++ * No existing pi state. First waiter. [2]
++ */
+ pi_state = alloc_pi_state();
+
+ /*
+@@ -748,10 +842,18 @@ retry:
+ return -EDEADLK;
+
+ /*
+- * Surprise - we got the lock. Just return to userspace:
++ * Surprise - we got the lock, but we do not trust user space at all.
+ */
+- if (unlikely(!curval))
+- return 1;
++ if (unlikely(!curval)) {
++ /*
++ * We verify whether there is kernel state for this
++ * futex. If not, we can safely assume, that the 0 ->
++ * TID transition is correct. If state exists, we do
++ * not bother to fixup the user space state as it was
++ * corrupted already.
++ */
++ return futex_top_waiter(hb, key) ? -EINVAL : 1;
++ }
+
+ uval = curval;
+
+@@ -881,6 +983,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ struct task_struct *new_owner;
+ struct futex_pi_state *pi_state = this->pi_state;
+ u32 uninitialized_var(curval), newval;
++ int ret = 0;
+
+ if (!pi_state)
+ return -EINVAL;
+@@ -904,23 +1007,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ new_owner = this->task;
+
+ /*
+- * We pass it to the next owner. (The WAITERS bit is always
+- * kept enabled while there is PI state around. We must also
+- * preserve the owner died bit.)
++ * We pass it to the next owner. The WAITERS bit is always
++ * kept enabled while there is PI state around. We cleanup the
++ * owner died bit, because we are the owner.
+ */
+- if (!(uval & FUTEX_OWNER_DIED)) {
+- int ret = 0;
++ newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+
+- newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+-
+- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+- ret = -EFAULT;
+- else if (curval != uval)
+- ret = -EINVAL;
+- if (ret) {
+- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+- return ret;
+- }
++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++ ret = -EFAULT;
++ else if (curval != uval)
++ ret = -EINVAL;
++ if (ret) {
++ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
++ return ret;
+ }
+
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
+@@ -1199,7 +1298,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
+ *
+ * Return:
+ * 0 - failed to acquire the lock atomically;
+- * 1 - acquired the lock;
++ * >0 - acquired the lock, return value is vpid of the top_waiter
+ * <0 - error
+ */
+ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+@@ -1210,7 +1309,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+ {
+ struct futex_q *top_waiter = NULL;
+ u32 curval;
+- int ret;
++ int ret, vpid;
+
+ if (get_futex_value_locked(&curval, pifutex))
+ return -EFAULT;
+@@ -1238,11 +1337,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+ * the contended case or if set_waiters is 1. The pi_state is returned
+ * in ps in contended cases.
+ */
++ vpid = task_pid_vnr(top_waiter->task);
+ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
+ set_waiters);
+- if (ret == 1)
++ if (ret == 1) {
+ requeue_pi_wake_futex(top_waiter, key2, hb2);
+-
++ return vpid;
++ }
+ return ret;
+ }
+
+@@ -1274,10 +1375,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ struct futex_hash_bucket *hb1, *hb2;
+ struct plist_head *head1;
+ struct futex_q *this, *next;
+- u32 curval2;
+
+ if (requeue_pi) {
+ /*
++ * Requeue PI only works on two distinct uaddrs. This
++ * check is only valid for private futexes. See below.
++ */
++ if (uaddr1 == uaddr2)
++ return -EINVAL;
++
++ /*
+ * requeue_pi requires a pi_state, try to allocate it now
+ * without any locks in case it fails.
+ */
+@@ -1315,6 +1422,15 @@ retry:
+ if (unlikely(ret != 0))
+ goto out_put_key1;
+
++ /*
++ * The check above which compares uaddrs is not sufficient for
++ * shared futexes. We need to compare the keys:
++ */
++ if (requeue_pi && match_futex(&key1, &key2)) {
++ ret = -EINVAL;
++ goto out_put_keys;
++ }
++
+ hb1 = hash_futex(&key1);
+ hb2 = hash_futex(&key2);
+
+@@ -1360,16 +1476,25 @@ retry_private:
+ * At this point the top_waiter has either taken uaddr2 or is
+ * waiting on it. If the former, then the pi_state will not
+ * exist yet, look it up one more time to ensure we have a
+- * reference to it.
++ * reference to it. If the lock was taken, ret contains the
++ * vpid of the top waiter task.
+ */
+- if (ret == 1) {
++ if (ret > 0) {
+ WARN_ON(pi_state);
+ drop_count++;
+ task_count++;
+- ret = get_futex_value_locked(&curval2, uaddr2);
+- if (!ret)
+- ret = lookup_pi_state(curval2, hb2, &key2,
+- &pi_state);
++ /*
++ * If we acquired the lock, then the user
++ * space value of uaddr2 should be vpid. It
++ * cannot be changed by the top waiter as it
++ * is blocked on hb2 lock if it tries to do
++ * so. If something fiddled with it behind our
++ * back the pi state lookup might unearth
++ * it. So we rather use the known value than
++ * rereading and handing potential crap to
++ * lookup_pi_state.
++ */
++ ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
+ }
+
+ switch (ret) {
+@@ -2139,9 +2264,10 @@ retry:
+ /*
+ * To avoid races, try to do the TID -> 0 atomic transition
+ * again. If it succeeds then we can return without waking
+- * anyone else up:
++ * anyone else up. We only try this if neither the waiters nor
++ * the owner died bit are set.
+ */
+- if (!(uval & FUTEX_OWNER_DIED) &&
++ if (!(uval & ~FUTEX_TID_MASK) &&
+ cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
+ goto pi_faulted;
+ /*
+@@ -2173,11 +2299,9 @@ retry:
+ /*
+ * No waiters - kernel unlocks the futex:
+ */
+- if (!(uval & FUTEX_OWNER_DIED)) {
+- ret = unlock_futex_pi(uaddr, uval);
+- if (ret == -EFAULT)
+- goto pi_faulted;
+- }
++ ret = unlock_futex_pi(uaddr, uval);
++ if (ret == -EFAULT)
++ goto pi_faulted;
+
+ out_unlock:
+ spin_unlock(&hb->lock);
+@@ -2336,6 +2460,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ if (ret)
+ goto out_key2;
+
++ /*
++ * The check above which compares uaddrs is not sufficient for
++ * shared futexes. We need to compare the keys:
++ */
++ if (match_futex(&q.key, &key2)) {
++ ret = -EINVAL;
++ goto out_put_keys;
++ }
++
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 383319bae3f7..aa149222cd8e 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -246,6 +246,11 @@ again:
+ goto again;
+ }
+ timer->base = new_base;
++ } else {
++ if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
++ cpu = this_cpu;
++ goto again;
++ }
+ }
+ return new_base;
+ }
+@@ -581,6 +586,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+
+ cpu_base->expires_next.tv64 = expires_next.tv64;
+
++ /*
++ * If a hang was detected in the last timer interrupt then we
++ * leave the hang delay active in the hardware. We want the
++ * system to make progress. That also prevents the following
++ * scenario:
++ * T1 expires 50ms from now
++ * T2 expires 5s from now
++ *
++ * T1 is removed, so this code is called and would reprogram
++ * the hardware to 5s from now. Any hrtimer_start after that
++ * will not reprogram the hardware due to hang_detected being
++ * set. So we'd effectivly block all timers until the T2 event
++ * fires.
++ */
++ if (cpu_base->hang_detected)
++ return;
++
+ if (cpu_base->expires_next.tv64 != KTIME_MAX)
+ tick_program_event(cpu_base->expires_next, 1);
+ }
+@@ -980,11 +1002,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ /* Remove an active timer from the queue: */
+ ret = remove_hrtimer(timer, base);
+
+- /* Switch the timer base, if necessary: */
+- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+-
+ if (mode & HRTIMER_MODE_REL) {
+- tim = ktime_add_safe(tim, new_base->get_time());
++ tim = ktime_add_safe(tim, base->get_time());
+ /*
+ * CONFIG_TIME_LOW_RES is a temporary way for architectures
+ * to signal that they simply return xtime in
+@@ -999,6 +1018,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+
+ hrtimer_set_expires_range_ns(timer, tim, delta_ns);
+
++ /* Switch the timer base, if necessary: */
++ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
++
+ timer_stats_hrtimer_set_start_info(timer);
+
+ leftmost = enqueue_hrtimer(timer, new_base);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 4c84746a840b..9e31fa71908d 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -150,7 +150,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ int ret;
+
+- ret = chip->irq_set_affinity(data, mask, false);
++ ret = chip->irq_set_affinity(data, mask, force);
+ switch (ret) {
+ case IRQ_SET_MASK_OK:
+ cpumask_copy(data->affinity, mask);
+@@ -162,7 +162,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ return ret;
+ }
+
+-int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
++int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
++ bool force)
+ {
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ struct irq_desc *desc = irq_data_to_desc(data);
+@@ -172,7 +173,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+ return -EINVAL;
+
+ if (irq_can_move_pcntxt(data)) {
+- ret = irq_do_set_affinity(data, mask, false);
++ ret = irq_do_set_affinity(data, mask, force);
+ } else {
+ irqd_set_move_pending(data);
+ irq_copy_pending(desc, mask);
+@@ -187,13 +188,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+ return ret;
+ }
+
+-/**
+- * irq_set_affinity - Set the irq affinity of a given irq
+- * @irq: Interrupt to set affinity
+- * @mask: cpumask
+- *
+- */
+-int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
++int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+ {
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+@@ -203,7 +198,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+- ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
++ ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return ret;
+ }
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 355e13af62c5..4c9dcffd1750 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -1681,6 +1681,14 @@ int kernel_kexec(void)
+ kexec_in_progress = true;
+ kernel_restart_prepare(NULL);
+ migrate_to_reboot_cpu();
++
++ /*
++ * migrate_to_reboot_cpu() disables CPU hotplug assuming that
++ * no further code needs to use CPU hotplug (which is true in
++ * the reboot case). However, the kexec path depends on using
++ * CPU hotplug again; so re-enable it here.
++ */
++ cpu_hotplug_enable();
+ printk(KERN_EMERG "Starting new kernel\n");
+ machine_shutdown();
+ }
+diff --git a/kernel/module.c b/kernel/module.c
+index dc582749fa13..7b15ff67c5aa 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3296,6 +3296,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
+
+ dynamic_debug_setup(info->debug, info->num_debug);
+
++ /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
++ ftrace_module_init(mod);
++
+ /* Finally it's fully formed, ready to start executing. */
+ err = complete_formation(mod, info);
+ if (err)
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 4296d13db3d1..4addfa27f67d 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -822,7 +822,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
+
+ bit = find_last_bit(&mask, BITS_PER_LONG);
+
+- mask = (1 << bit) - 1;
++ mask = (1UL << bit) - 1;
+
+ expires_limit = expires_limit & ~(mask);
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index e66411fb55b3..a8642bac843e 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -4252,16 +4252,11 @@ static void ftrace_init_module(struct module *mod,
+ ftrace_process_locs(mod, start, end);
+ }
+
+-static int ftrace_module_notify_enter(struct notifier_block *self,
+- unsigned long val, void *data)
++void ftrace_module_init(struct module *mod)
+ {
+- struct module *mod = data;
+-
+- if (val == MODULE_STATE_COMING)
+- ftrace_init_module(mod, mod->ftrace_callsites,
+- mod->ftrace_callsites +
+- mod->num_ftrace_callsites);
+- return 0;
++ ftrace_init_module(mod, mod->ftrace_callsites,
++ mod->ftrace_callsites +
++ mod->num_ftrace_callsites);
+ }
+
+ static int ftrace_module_notify_exit(struct notifier_block *self,
+@@ -4275,11 +4270,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
+ return 0;
+ }
+ #else
+-static int ftrace_module_notify_enter(struct notifier_block *self,
+- unsigned long val, void *data)
+-{
+- return 0;
+-}
+ static int ftrace_module_notify_exit(struct notifier_block *self,
+ unsigned long val, void *data)
+ {
+@@ -4287,11 +4277,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
+ }
+ #endif /* CONFIG_MODULES */
+
+-struct notifier_block ftrace_module_enter_nb = {
+- .notifier_call = ftrace_module_notify_enter,
+- .priority = INT_MAX, /* Run before anything that can use kprobes */
+-};
+-
+ struct notifier_block ftrace_module_exit_nb = {
+ .notifier_call = ftrace_module_notify_exit,
+ .priority = INT_MIN, /* Run after anything that can remove kprobes */
+@@ -4328,10 +4313,6 @@ void __init ftrace_init(void)
+ __start_mcount_loc,
+ __stop_mcount_loc);
+
+- ret = register_module_notifier(&ftrace_module_enter_nb);
+- if (ret)
+- pr_warning("Failed to register trace ftrace module enter notifier\n");
+-
+ ret = register_module_notifier(&ftrace_module_exit_nb);
+ if (ret)
+ pr_warning("Failed to register trace ftrace module exit notifier\n");
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 60fee69c37be..cea58300f650 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1901,6 +1901,12 @@ static void send_mayday(struct work_struct *work)
+
+ /* mayday mayday mayday */
+ if (list_empty(&pwq->mayday_node)) {
++ /*
++ * If @pwq is for an unbound wq, its base ref may be put at
++ * any time due to an attribute change. Pin @pwq until the
++ * rescuer is done with it.
++ */
++ get_pwq(pwq);
+ list_add_tail(&pwq->mayday_node, &wq->maydays);
+ wake_up_process(wq->rescuer->task);
+ }
+@@ -2383,6 +2389,7 @@ static int rescuer_thread(void *__rescuer)
+ struct worker *rescuer = __rescuer;
+ struct workqueue_struct *wq = rescuer->rescue_wq;
+ struct list_head *scheduled = &rescuer->scheduled;
++ bool should_stop;
+
+ set_user_nice(current, RESCUER_NICE_LEVEL);
+
+@@ -2394,11 +2401,15 @@ static int rescuer_thread(void *__rescuer)
+ repeat:
+ set_current_state(TASK_INTERRUPTIBLE);
+
+- if (kthread_should_stop()) {
+- __set_current_state(TASK_RUNNING);
+- rescuer->task->flags &= ~PF_WQ_WORKER;
+- return 0;
+- }
++ /*
++ * By the time the rescuer is requested to stop, the workqueue
++ * shouldn't have any work pending, but @wq->maydays may still have
++ * pwq(s) queued. This can happen by non-rescuer workers consuming
++ * all the work items before the rescuer got to them. Go through
++ * @wq->maydays processing before acting on should_stop so that the
++ * list is always empty on exit.
++ */
++ should_stop = kthread_should_stop();
+
+ /* see whether any pwq is asking for help */
+ spin_lock_irq(&wq_mayday_lock);
+@@ -2430,6 +2441,12 @@ repeat:
+ process_scheduled_works(rescuer);
+
+ /*
++ * Put the reference grabbed by send_mayday(). @pool won't
++ * go away while we're holding its lock.
++ */
++ put_pwq(pwq);
++
++ /*
+ * Leave this pool. If keep_working() is %true, notify a
+ * regular worker; otherwise, we end up with 0 concurrency
+ * and stalling the execution.
+@@ -2444,6 +2461,12 @@ repeat:
+
+ spin_unlock_irq(&wq_mayday_lock);
+
++ if (should_stop) {
++ __set_current_state(TASK_RUNNING);
++ rescuer->task->flags &= ~PF_WQ_WORKER;
++ return 0;
++ }
++
+ /* rescuers should never participate in concurrency management */
+ WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
+ schedule();
+@@ -4090,7 +4113,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
+ if (!pwq) {
+ pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
+ wq->name);
+- goto out_unlock;
++ mutex_lock(&wq->mutex);
++ goto use_dfl_pwq;
+ }
+
+ /*
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 15429b92ff98..213d1b4aafd7 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1820,13 +1820,18 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ break;
+ };
+ points = oom_badness(task, memcg, NULL, totalpages);
+- if (points > chosen_points) {
+- if (chosen)
+- put_task_struct(chosen);
+- chosen = task;
+- chosen_points = points;
+- get_task_struct(chosen);
+- }
++ if (!points || points < chosen_points)
++ continue;
++ /* Prefer thread group leaders for display purposes */
++ if (points == chosen_points &&
++ thread_group_leader(chosen))
++ continue;
++
++ if (chosen)
++ put_task_struct(chosen);
++ chosen = task;
++ chosen_points = points;
++ get_task_struct(chosen);
+ }
+ css_task_iter_end(&it);
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 5ea3cf734138..9f1b0ff6cb65 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1085,15 +1085,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ return 0;
+ } else if (PageHuge(hpage)) {
+ /*
+- * Check "just unpoisoned", "filter hit", and
+- * "race with other subpage."
++ * Check "filter hit" and "race with other subpage."
+ */
+ lock_page(hpage);
+- if (!PageHWPoison(hpage)
+- || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+- || (p != hpage && TestSetPageHWPoison(hpage))) {
+- atomic_long_sub(nr_pages, &num_poisoned_pages);
+- return 0;
++ if (PageHWPoison(hpage)) {
++ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
++ || (p != hpage && TestSetPageHWPoison(hpage))) {
++ atomic_long_sub(nr_pages, &num_poisoned_pages);
++ unlock_page(hpage);
++ return 0;
++ }
+ }
+ set_page_hwpoison_huge_page(hpage);
+ res = dequeue_hwpoisoned_huge_page(hpage);
+diff --git a/mm/memory.c b/mm/memory.c
+index 22e67a2c955b..99fe3aa1035c 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1929,12 +1929,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long address, unsigned int fault_flags)
+ {
+ struct vm_area_struct *vma;
++ vm_flags_t vm_flags;
+ int ret;
+
+ vma = find_extend_vma(mm, address);
+ if (!vma || address < vma->vm_start)
+ return -EFAULT;
+
++ vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
++ if (!(vm_flags & vma->vm_flags))
++ return -EFAULT;
++
+ ret = handle_mm_fault(mm, vma, address, fault_flags);
+ if (ret & VM_FAULT_ERROR) {
+ if (ret & VM_FAULT_OOM)
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 0843feb66f3d..05f1180e9f21 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
+ break;
+ if (pmd_trans_huge(*old_pmd)) {
+ int err = 0;
+- if (extent == HPAGE_PMD_SIZE)
++ if (extent == HPAGE_PMD_SIZE) {
++ VM_BUG_ON(vma->vm_file || !vma->anon_vma);
++ /* See comment in move_ptes() */
++ if (need_rmap_locks)
++ anon_vma_lock_write(vma->anon_vma);
+ err = move_huge_pmd(vma, new_vma, old_addr,
+ new_addr, old_end,
+ old_pmd, new_pmd);
++ if (need_rmap_locks)
++ anon_vma_unlock_write(vma->anon_vma);
++ }
+ if (err > 0) {
+ need_flush = true;
+ continue;
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index e73f01c56d10..a9b5b7ffc476 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -47,19 +47,21 @@ static DEFINE_SPINLOCK(zone_scan_lock);
+ #ifdef CONFIG_NUMA
+ /**
+ * has_intersects_mems_allowed() - check task eligiblity for kill
+- * @tsk: task struct of which task to consider
++ * @start: task struct of which task to consider
+ * @mask: nodemask passed to page allocator for mempolicy ooms
+ *
+ * Task eligibility is determined by whether or not a candidate task, @tsk,
+ * shares the same mempolicy nodes as current if it is bound by such a policy
+ * and whether or not it has the same set of allowed cpuset nodes.
+ */
+-static bool has_intersects_mems_allowed(struct task_struct *tsk,
++static bool has_intersects_mems_allowed(struct task_struct *start,
+ const nodemask_t *mask)
+ {
+- struct task_struct *start = tsk;
++ struct task_struct *tsk;
++ bool ret = false;
+
+- do {
++ rcu_read_lock();
++ for_each_thread(start, tsk) {
+ if (mask) {
+ /*
+ * If this is a mempolicy constrained oom, tsk's
+@@ -67,19 +69,20 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
+ * mempolicy intersects current, otherwise it may be
+ * needlessly killed.
+ */
+- if (mempolicy_nodemask_intersects(tsk, mask))
+- return true;
++ ret = mempolicy_nodemask_intersects(tsk, mask);
+ } else {
+ /*
+ * This is not a mempolicy constrained oom, so only
+ * check the mems of tsk's cpuset.
+ */
+- if (cpuset_mems_allowed_intersects(current, tsk))
+- return true;
++ ret = cpuset_mems_allowed_intersects(current, tsk);
+ }
+- } while_each_thread(start, tsk);
++ if (ret)
++ break;
++ }
++ rcu_read_unlock();
+
+- return false;
++ return ret;
+ }
+ #else
+ static bool has_intersects_mems_allowed(struct task_struct *tsk,
+@@ -97,16 +100,21 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
+ */
+ struct task_struct *find_lock_task_mm(struct task_struct *p)
+ {
+- struct task_struct *t = p;
++ struct task_struct *t;
+
+- do {
++ rcu_read_lock();
++
++ for_each_thread(p, t) {
+ task_lock(t);
+ if (likely(t->mm))
+- return t;
++ goto found;
+ task_unlock(t);
+- } while_each_thread(p, t);
++ }
++ t = NULL;
++found:
++ rcu_read_unlock();
+
+- return NULL;
++ return t;
+ }
+
+ /* return true if the task is not adequate as candidate victim task. */
+@@ -301,7 +309,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
+ unsigned long chosen_points = 0;
+
+ rcu_read_lock();
+- do_each_thread(g, p) {
++ for_each_process_thread(g, p) {
+ unsigned int points;
+
+ switch (oom_scan_process_thread(p, totalpages, nodemask,
+@@ -319,11 +327,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
+ break;
+ };
+ points = oom_badness(p, NULL, nodemask, totalpages);
+- if (points > chosen_points) {
+- chosen = p;
+- chosen_points = points;
+- }
+- } while_each_thread(g, p);
++ if (!points || points < chosen_points)
++ continue;
++ /* Prefer thread group leaders for display purposes */
++ if (points == chosen_points && thread_group_leader(chosen))
++ continue;
++
++ chosen = p;
++ chosen_points = points;
++ }
+ if (chosen)
+ get_task_struct(chosen);
+ rcu_read_unlock();
+@@ -406,7 +418,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ {
+ struct task_struct *victim = p;
+ struct task_struct *child;
+- struct task_struct *t = p;
++ struct task_struct *t;
+ struct mm_struct *mm;
+ unsigned int victim_points = 0;
+ static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
+@@ -437,7 +449,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ * still freeing memory.
+ */
+ read_lock(&tasklist_lock);
+- do {
++ for_each_thread(p, t) {
+ list_for_each_entry(child, &t->children, sibling) {
+ unsigned int child_points;
+
+@@ -455,13 +467,11 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ get_task_struct(victim);
+ }
+ }
+- } while_each_thread(p, t);
++ }
+ read_unlock(&tasklist_lock);
+
+- rcu_read_lock();
+ p = find_lock_task_mm(victim);
+ if (!p) {
+- rcu_read_unlock();
+ put_task_struct(victim);
+ return;
+ } else if (victim != p) {
+@@ -487,6 +497,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ * That thread will now get access to memory reserves since it has a
+ * pending fatal signal.
+ */
++ rcu_read_lock();
+ for_each_process(p)
+ if (p->mm == mm && !same_thread_group(p, victim) &&
+ !(p->flags & PF_KTHREAD)) {
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 7106cb1aca8e..8f6daa62206d 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -593,14 +593,14 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
+ * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
+ * => fast response on large errors; small oscillation near setpoint
+ */
+-static inline long long pos_ratio_polynom(unsigned long setpoint,
++static long long pos_ratio_polynom(unsigned long setpoint,
+ unsigned long dirty,
+ unsigned long limit)
+ {
+ long long pos_ratio;
+ long x;
+
+- x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
++ x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+ limit - setpoint + 1);
+ pos_ratio = x;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+@@ -842,7 +842,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+ x_intercept = bdi_setpoint + span;
+
+ if (bdi_dirty < x_intercept - span / 4) {
+- pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
++ pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
+ x_intercept - bdi_setpoint + 1);
+ } else
+ pos_ratio /= 4;
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 8c8e08f3a692..25e2ea52db82 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -612,7 +612,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
+ chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
+ sizeof(chunk->map[0]));
+ if (!chunk->map) {
+- kfree(chunk);
++ pcpu_mem_free(chunk, pcpu_chunk_struct_size);
+ return NULL;
+ }
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index a3af2b750e96..2eeb6643d78a 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2993,6 +2993,12 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
+ if (!conn)
+ goto unlock;
+
++ /* For BR/EDR the necessary steps are taken through the
++ * auth_complete event.
++ */
++ if (conn->type != LE_LINK)
++ goto unlock;
++
+ if (!ev->status)
+ conn->sec_level = conn->pending_sec_level;
+
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 464303f61730..ce83d07eb419 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -556,7 +556,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
+ return r;
+ }
+
+-static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
++static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, bool more)
+ {
+ int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
+@@ -569,6 +569,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+ return ret;
+ }
+
++static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
++ int offset, size_t size, bool more)
++{
++ int ret;
++ struct kvec iov;
++
++ /* sendpage cannot properly handle pages with page_count == 0,
++ * we need to fallback to sendmsg if that's the case */
++ if (page_count(page) >= 1)
++ return __ceph_tcp_sendpage(sock, page, offset, size, more);
++
++ iov.iov_base = kmap(page) + offset;
++ iov.iov_len = size;
++ ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
++ kunmap(page);
++
++ return ret;
++}
+
+ /*
+ * Shutdown/close the socket for the given connection.
+diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
+index 742815518b0f..4cfb3bd1677c 100644
+--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
+@@ -22,7 +22,6 @@
+ #endif
+ #include <net/netfilter/nf_conntrack_zones.h>
+
+-/* Returns new sk_buff, or NULL */
+ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
+ {
+ int err;
+@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
+ err = ip_defrag(skb, user);
+ local_bh_enable();
+
+- if (!err)
++ if (!err) {
+ ip_send_check(ip_hdr(skb));
++ skb->local_df = 1;
++ }
+
+ return err;
+ }
+diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
+index 95f3f1da0d7f..d38e6a8d8b9f 100644
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ };
++ int err;
+
+ dst = ip6_route_output(net, skb->sk, &fl6);
+- if (dst->error) {
++ err = dst->error;
++ if (err) {
+ IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
+ dst_release(dst);
+- return dst->error;
++ return err;
+ }
+
+ /* Drop old route. */
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index ab0fbb458c11..23c13165ce83 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -311,6 +311,7 @@ struct ieee80211_roc_work {
+
+ bool started, abort, hw_begun, notified;
+ bool to_be_freed;
++ bool on_channel;
+
+ unsigned long hw_start_time;
+
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index e41c477c6d9f..cd8d55c99ceb 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -3691,18 +3691,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
+
+ sdata_lock(sdata);
+
+- if (ifmgd->auth_data) {
++ if (ifmgd->auth_data || ifmgd->assoc_data) {
++ const u8 *bssid = ifmgd->auth_data ?
++ ifmgd->auth_data->bss->bssid :
++ ifmgd->assoc_data->bss->bssid;
++
+ /*
+- * If we are trying to authenticate while suspending, cfg80211
+- * won't know and won't actually abort those attempts, thus we
+- * need to do that ourselves.
++ * If we are trying to authenticate / associate while suspending,
++ * cfg80211 won't know and won't actually abort those attempts,
++ * thus we need to do that ourselves.
+ */
+- ieee80211_send_deauth_disassoc(sdata,
+- ifmgd->auth_data->bss->bssid,
++ ieee80211_send_deauth_disassoc(sdata, bssid,
+ IEEE80211_STYPE_DEAUTH,
+ WLAN_REASON_DEAUTH_LEAVING,
+ false, frame_buf);
+- ieee80211_destroy_auth_data(sdata, false);
++ if (ifmgd->assoc_data)
++ ieee80211_destroy_assoc_data(sdata, false);
++ if (ifmgd->auth_data)
++ ieee80211_destroy_auth_data(sdata, false);
+ cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+ IEEE80211_DEAUTH_FRAME_LEN);
+ }
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 6fb38558a5e6..7a17decd27f9 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
+ container_of(work, struct ieee80211_roc_work, work.work);
+ struct ieee80211_sub_if_data *sdata = roc->sdata;
+ struct ieee80211_local *local = sdata->local;
+- bool started;
++ bool started, on_channel;
+
+ mutex_lock(&local->mtx);
+
+@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
+ if (!roc->started) {
+ struct ieee80211_roc_work *dep;
+
+- /* start this ROC */
+- ieee80211_offchannel_stop_vifs(local);
++ WARN_ON(local->use_chanctx);
++
++ /* If actually operating on the desired channel (with at least
++ * 20 MHz channel width) don't stop all the operations but still
++ * treat it as though the ROC operation started properly, so
++ * other ROC operations won't interfere with this one.
++ */
++ roc->on_channel = roc->chan == local->_oper_chandef.chan &&
++ local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
++ local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
+
+- /* switch channel etc */
++ /* start this ROC */
+ ieee80211_recalc_idle(local);
+
+- local->tmp_channel = roc->chan;
+- ieee80211_hw_config(local, 0);
++ if (!roc->on_channel) {
++ ieee80211_offchannel_stop_vifs(local);
++
++ local->tmp_channel = roc->chan;
++ ieee80211_hw_config(local, 0);
++ }
+
+ /* tell userspace or send frame */
+ ieee80211_handle_roc_started(roc);
+@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
+ finish:
+ list_del(&roc->list);
+ started = roc->started;
++ on_channel = roc->on_channel;
+ ieee80211_roc_notify_destroy(roc, !roc->abort);
+
+- if (started) {
++ if (started && !on_channel) {
+ ieee80211_flush_queues(local, NULL);
+
+ local->tmp_channel = NULL;
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 20e86a95dc4e..2f844eec9c6d 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -242,7 +242,6 @@ void cfg80211_conn_work(struct work_struct *work)
+ NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ false, NULL);
+- cfg80211_sme_free(wdev);
+ }
+ wdev_unlock(wdev);
+ }
+@@ -646,6 +645,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+ cfg80211_unhold_bss(bss_from_pub(bss));
+ cfg80211_put_bss(wdev->wiphy, bss);
+ }
++ cfg80211_sme_free(wdev);
+ return;
+ }
+
+diff --git a/security/device_cgroup.c b/security/device_cgroup.c
+index c123628d3f84..e34818192c8d 100644
+--- a/security/device_cgroup.c
++++ b/security/device_cgroup.c
+@@ -319,57 +319,139 @@ static int devcgroup_seq_read(struct cgroup_subsys_state *css,
+ }
+
+ /**
+- * may_access - verifies if a new exception is part of what is allowed
+- * by a dev cgroup based on the default policy +
+- * exceptions. This is used to make sure a child cgroup
+- * won't have more privileges than its parent or to
+- * verify if a certain access is allowed.
+- * @dev_cgroup: dev cgroup to be tested against
+- * @refex: new exception
+- * @behavior: behavior of the exception
++ * match_exception - iterates the exception list trying to match a rule
++ * based on type, major, minor and access type. It is
++ * considered a match if an exception is found that
++ * will contain the entire range of provided parameters.
++ * @exceptions: list of exceptions
++ * @type: device type (DEV_BLOCK or DEV_CHAR)
++ * @major: device file major number, ~0 to match all
++ * @minor: device file minor number, ~0 to match all
++ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
++ *
++ * returns: true in case it matches an exception completely
+ */
+-static bool may_access(struct dev_cgroup *dev_cgroup,
+- struct dev_exception_item *refex,
+- enum devcg_behavior behavior)
++static bool match_exception(struct list_head *exceptions, short type,
++ u32 major, u32 minor, short access)
+ {
+ struct dev_exception_item *ex;
+- bool match = false;
+
+- rcu_lockdep_assert(rcu_read_lock_held() ||
+- lockdep_is_held(&devcgroup_mutex),
+- "device_cgroup::may_access() called without proper synchronization");
++ list_for_each_entry_rcu(ex, exceptions, list) {
++ if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
++ continue;
++ if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
++ continue;
++ if (ex->major != ~0 && ex->major != major)
++ continue;
++ if (ex->minor != ~0 && ex->minor != minor)
++ continue;
++ /* provided access cannot have more than the exception rule */
++ if (access & (~ex->access))
++ continue;
++ return true;
++ }
++ return false;
++}
++
++/**
++ * match_exception_partial - iterates the exception list trying to match a rule
++ * based on type, major, minor and access type. It is
++ * considered a match if an exception's range is
++ * found to contain *any* of the devices specified by
++ * provided parameters. This is used to make sure no
++ * extra access is being granted that is forbidden by
++ * any of the exception list.
++ * @exceptions: list of exceptions
++ * @type: device type (DEV_BLOCK or DEV_CHAR)
++ * @major: device file major number, ~0 to match all
++ * @minor: device file minor number, ~0 to match all
++ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
++ *
++ * returns: true in case the provided range mat matches an exception completely
++ */
++static bool match_exception_partial(struct list_head *exceptions, short type,
++ u32 major, u32 minor, short access)
++{
++ struct dev_exception_item *ex;
+
+- list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
+- if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
++ list_for_each_entry_rcu(ex, exceptions, list) {
++ if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+ continue;
+- if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
++ if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+ continue;
+- if (ex->major != ~0 && ex->major != refex->major)
++ /*
++ * We must be sure that both the exception and the provided
++ * range aren't masking all devices
++ */
++ if (ex->major != ~0 && major != ~0 && ex->major != major)
+ continue;
+- if (ex->minor != ~0 && ex->minor != refex->minor)
++ if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
+ continue;
+- if (refex->access & (~ex->access))
++ /*
++ * In order to make sure the provided range isn't matching
++ * an exception, all its access bits shouldn't match the
++ * exception's access bits
++ */
++ if (!(access & ex->access))
+ continue;
+- match = true;
+- break;
++ return true;
+ }
++ return false;
++}
++
++/**
++ * verify_new_ex - verifies if a new exception is part of what is allowed
++ * by a dev cgroup based on the default policy +
++ * exceptions. This is used to make sure a child cgroup
++ * won't have more privileges than its parent
++ * @dev_cgroup: dev cgroup to be tested against
++ * @refex: new exception
++ * @behavior: behavior of the exception's dev_cgroup
++ */
++static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
++ struct dev_exception_item *refex,
++ enum devcg_behavior behavior)
++{
++ bool match = false;
++
++ rcu_lockdep_assert(rcu_read_lock_held() ||
++ lockdep_is_held(&devcgroup_mutex),
++ "device_cgroup:verify_new_ex called without proper synchronization");
+
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ if (behavior == DEVCG_DEFAULT_ALLOW) {
+- /* the exception will deny access to certain devices */
++ /*
++ * new exception in the child doesn't matter, only
++ * adding extra restrictions
++ */
+ return true;
+ } else {
+- /* the exception will allow access to certain devices */
++ /*
++ * new exception in the child will add more devices
++ * that can be acessed, so it can't match any of
++ * parent's exceptions, even slightly
++ */
++ match = match_exception_partial(&dev_cgroup->exceptions,
++ refex->type,
++ refex->major,
++ refex->minor,
++ refex->access);
++
+ if (match)
+- /*
+- * a new exception allowing access shouldn't
+- * match an parent's exception
+- */
+ return false;
+ return true;
+ }
+ } else {
+- /* only behavior == DEVCG_DEFAULT_DENY allowed here */
++ /*
++ * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
++ * the new exception will add access to more devices and must
++ * be contained completely in an parent's exception to be
++ * allowed
++ */
++ match = match_exception(&dev_cgroup->exceptions, refex->type,
++ refex->major, refex->minor,
++ refex->access);
++
+ if (match)
+ /* parent has an exception that matches the proposed */
+ return true;
+@@ -391,7 +473,38 @@ static int parent_has_perm(struct dev_cgroup *childcg,
+
+ if (!parent)
+ return 1;
+- return may_access(parent, ex, childcg->behavior);
++ return verify_new_ex(parent, ex, childcg->behavior);
++}
++
++/**
++ * parent_allows_removal - verify if it's ok to remove an exception
++ * @childcg: child cgroup from where the exception will be removed
++ * @ex: exception being removed
++ *
++ * When removing an exception in cgroups with default ALLOW policy, it must
++ * be checked if removing it will give the child cgroup more access than the
++ * parent.
++ *
++ * Return: true if it's ok to remove exception, false otherwise
++ */
++static bool parent_allows_removal(struct dev_cgroup *childcg,
++ struct dev_exception_item *ex)
++{
++ struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
++
++ if (!parent)
++ return true;
++
++ /* It's always allowed to remove access to devices */
++ if (childcg->behavior == DEVCG_DEFAULT_DENY)
++ return true;
++
++ /*
++ * Make sure you're not removing part or a whole exception existing in
++ * the parent cgroup
++ */
++ return !match_exception_partial(&parent->exceptions, ex->type,
++ ex->major, ex->minor, ex->access);
+ }
+
+ /**
+@@ -629,17 +742,21 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
+
+ switch (filetype) {
+ case DEVCG_ALLOW:
+- if (!parent_has_perm(devcgroup, &ex))
+- return -EPERM;
+ /*
+ * If the default policy is to allow by default, try to remove
+ * an matching exception instead. And be silent about it: we
+ * don't want to break compatibility
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
++ /* Check if the parent allows removing it first */
++ if (!parent_allows_removal(devcgroup, &ex))
++ return -EPERM;
+ dev_exception_rm(devcgroup, &ex);
+- return 0;
++ break;
+ }
++
++ if (!parent_has_perm(devcgroup, &ex))
++ return -EPERM;
+ rc = dev_exception_add(devcgroup, &ex);
+ break;
+ case DEVCG_DENY:
+@@ -720,18 +837,18 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
+ {
+ struct dev_cgroup *dev_cgroup;
+- struct dev_exception_item ex;
+- int rc;
+-
+- memset(&ex, 0, sizeof(ex));
+- ex.type = type;
+- ex.major = major;
+- ex.minor = minor;
+- ex.access = access;
++ bool rc;
+
+ rcu_read_lock();
+ dev_cgroup = task_devcgroup(current);
+- rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
++ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
++ /* Can't match any of the exceptions, even partially */
++ rc = !match_exception_partial(&dev_cgroup->exceptions,
++ type, major, minor, access);
++ else
++ /* Need to match completely one exception to be allowed */
++ rc = match_exception(&dev_cgroup->exceptions, type, major,
++ minor, access);
+ rcu_read_unlock();
+
+ if (!rc)
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index b5c4c2e4360b..ee1a6ff120a2 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -3986,6 +3986,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ /* Lynx Point */
+ { PCI_DEVICE(0x8086, 0x8c20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++ /* 9 Series */
++ { PCI_DEVICE(0x8086, 0x8ca0),
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ /* Wellsburg */
+ { PCI_DEVICE(0x8086, 0x8d20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 23e0bc6d6568..2949c8d34d33 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1023,8 +1023,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ AMP_OUT_UNMUTE);
+
+ eld = &per_pin->sink_eld;
+- if (!eld->monitor_present)
++ if (!eld->monitor_present) {
++ hdmi_set_channel_count(codec, per_pin->cvt_nid, channels);
+ return;
++ }
+
+ if (!non_pcm && per_pin->chmap_set)
+ ca = hdmi_manual_channel_allocation(channels, per_pin->chmap);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 871f8518626f..ea16dc456352 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -154,6 +154,7 @@ static struct reg_default wm8962_reg[] = {
+ { 40, 0x0000 }, /* R40 - SPKOUTL volume */
+ { 41, 0x0000 }, /* R41 - SPKOUTR volume */
+
++ { 49, 0x0010 }, /* R49 - Class D Control 1 */
+ { 51, 0x0003 }, /* R51 - Class D Control 2 */
+
+ { 56, 0x0506 }, /* R56 - Clocking 4 */
+@@ -795,7 +796,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
+ case WM8962_ALC2:
+ case WM8962_THERMAL_SHUTDOWN_STATUS:
+ case WM8962_ADDITIONAL_CONTROL_4:
+- case WM8962_CLASS_D_CONTROL_1:
+ case WM8962_DC_SERVO_6:
+ case WM8962_INTERRUPT_STATUS_1:
+ case WM8962_INTERRUPT_STATUS_2:
+@@ -2901,13 +2901,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
+ static int wm8962_mute(struct snd_soc_dai *dai, int mute)
+ {
+ struct snd_soc_codec *codec = dai->codec;
+- int val;
++ int val, ret;
+
+ if (mute)
+- val = WM8962_DAC_MUTE;
++ val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT;
+ else
+ val = 0;
+
++ /**
++ * The DAC mute bit is mirrored in two registers, update both to keep
++ * the register cache consistent.
++ */
++ ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1,
++ WM8962_DAC_MUTE_ALT, val);
++ if (ret < 0)
++ return ret;
++
+ return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1,
+ WM8962_DAC_MUTE, val);
+ }
+diff --git a/sound/soc/codecs/wm8962.h b/sound/soc/codecs/wm8962.h
+index a1a5d5294c19..910aafd09d21 100644
+--- a/sound/soc/codecs/wm8962.h
++++ b/sound/soc/codecs/wm8962.h
+@@ -1954,6 +1954,10 @@
+ #define WM8962_SPKOUTL_ENA_MASK 0x0040 /* SPKOUTL_ENA */
+ #define WM8962_SPKOUTL_ENA_SHIFT 6 /* SPKOUTL_ENA */
+ #define WM8962_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
++#define WM8962_DAC_MUTE_ALT 0x0010 /* DAC_MUTE */
++#define WM8962_DAC_MUTE_ALT_MASK 0x0010 /* DAC_MUTE */
++#define WM8962_DAC_MUTE_ALT_SHIFT 4 /* DAC_MUTE */
++#define WM8962_DAC_MUTE_ALT_WIDTH 1 /* DAC_MUTE */
+ #define WM8962_SPKOUTL_PGA_MUTE 0x0002 /* SPKOUTL_PGA_MUTE */
+ #define WM8962_SPKOUTL_PGA_MUTE_MASK 0x0002 /* SPKOUTL_PGA_MUTE */
+ #define WM8962_SPKOUTL_PGA_MUTE_SHIFT 1 /* SPKOUTL_PGA_MUTE */
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 5ecacaa90b53..2d30a9e6aaed 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -91,6 +91,7 @@ struct snd_usb_endpoint {
+ unsigned int curframesize; /* current packet size in frames (for capture) */
+ unsigned int syncmaxsize; /* sync endpoint packet size */
+ unsigned int fill_max:1; /* fill max packet size always */
++ unsigned int udh01_fb_quirk:1; /* corrupted feedback data */
+ unsigned int datainterval; /* log_2 of data packet interval */
+ unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
+ unsigned char silence_value;
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 93e970f2b3c0..ba106c6c2d3a 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -470,6 +470,10 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
+ ep->syncinterval = 3;
+
+ ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
++
++ if (chip->usb_id == USB_ID(0x0644, 0x8038) /* TEAC UD-H01 */ &&
++ ep->syncmaxsize == 4)
++ ep->udh01_fb_quirk = 1;
+ }
+
+ list_add_tail(&ep->list, &chip->ep_list);
+@@ -1078,7 +1082,16 @@ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
+ if (f == 0)
+ return;
+
+- if (unlikely(ep->freqshift == INT_MIN)) {
++ if (unlikely(sender->udh01_fb_quirk)) {
++ /*
++ * The TEAC UD-H01 firmware sometimes changes the feedback value
++ * by +/- 0x1.0000.
++ */
++ if (f < ep->freqn - 0x8000)
++ f += 0x10000;
++ else if (f > ep->freqn + 0x8000)
++ f -= 0x10000;
++ } else if (unlikely(ep->freqshift == INT_MIN)) {
+ /*
+ * The first time we see a feedback value, determine its format
+ * by shifting it left or right until it matches the nominal
diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
new file mode 100644
index 00000000..cc15cd51
--- /dev/null
+++ b/1500_XATTR_USER_PREFIX.patch
@@ -0,0 +1,54 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+This patch adds support for a restricted user-controlled namespace on
+tmpfs filesystem used to house PaX flags. The namespace must be of the
+form user.pax.* and its value cannot exceed a size of 8 bytes.
+
+This is needed even on all Gentoo systems so that XATTR_PAX flags
+are preserved for users who might build packages using portage on
+a tmpfs system with a non-hardened kernel and then switch to a
+hardened kernel with XATTR_PAX enabled.
+
+The namespace is added to any user with Extended Attribute support
+enabled for tmpfs. Users who do not enable xattrs will not have
+the XATTR_PAX flags preserved.
+
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index e4629b9..6958086 100644
+--- a/include/uapi/linux/xattr.h
++++ b/include/uapi/linux/xattr.h
+@@ -63,5 +63,9 @@
+ #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
+ #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
+
+ #endif /* _UAPI_LINUX_XATTR_H */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 1c44af7..f23bb1b 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2201,6 +2201,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ static int shmem_xattr_validate(const char *name)
+ {
+ struct { const char *prefix; size_t len; } arr[] = {
++ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
+ { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
+ { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
+ };
+@@ -2256,6 +2257,12 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+ if (err)
+ return err;
+
++ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
++ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++ return -EOPNOTSUPP;
++ if (size > 8)
++ return -EINVAL;
++ }
+ return simple_xattr_set(&info->xattrs, name, value, size, flags);
+ }
+
diff --git a/1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch b/1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch
new file mode 100644
index 00000000..34c25306
--- /dev/null
+++ b/1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch
@@ -0,0 +1,56 @@
+From 6a96e15096da6e7491107321cfa660c7c2aa119d Mon Sep 17 00:00:00 2001
+From: Paul Moore <pmoore@redhat.com>
+Date: Tue, 28 Jan 2014 14:45:41 -0500
+Subject: [PATCH 1/2] selinux: add SOCK_DIAG_BY_FAMILY to the list of netlink
+ message types
+
+The SELinux AF_NETLINK/NETLINK_SOCK_DIAG socket class was missing the
+SOCK_DIAG_BY_FAMILY definition which caused SELINUX_ERR messages when
+the ss tool was run.
+
+ # ss
+ Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port
+ u_str ESTAB 0 0 * 14189 * 14190
+ u_str ESTAB 0 0 * 14145 * 14144
+ u_str ESTAB 0 0 * 14151 * 14150
+ {...}
+ # ausearch -m SELINUX_ERR
+ ----
+ time->Thu Jan 23 11:11:16 2014
+ type=SYSCALL msg=audit(1390493476.445:374):
+ arch=c000003e syscall=44 success=yes exit=40
+ a0=3 a1=7fff03aa11f0 a2=28 a3=0 items=0 ppid=1852 pid=1895
+ auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0
+ tty=pts0 ses=1 comm="ss" exe="/usr/sbin/ss"
+ subj=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 key=(null)
+ type=SELINUX_ERR msg=audit(1390493476.445:374):
+ SELinux: unrecognized netlink message type=20 for sclass=32
+
+Signed-off-by: Paul Moore <pmoore@redhat.com>
+---
+ security/selinux/nlmsgtab.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
+index 332ac8a..2df7b90 100644
+--- a/security/selinux/nlmsgtab.c
++++ b/security/selinux/nlmsgtab.c
+@@ -17,6 +17,7 @@
+ #include <linux/inet_diag.h>
+ #include <linux/xfrm.h>
+ #include <linux/audit.h>
++#include <linux/sock_diag.h>
+
+ #include "flask.h"
+ #include "av_permissions.h"
+@@ -78,6 +79,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
+ {
+ { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
++ { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ };
+
+ static struct nlmsg_perm nlmsg_xfrm_perms[] =
+--
+1.9.2
+
diff --git a/1700_enable-thinkpad-micled.patch b/1700_enable-thinkpad-micled.patch
new file mode 100644
index 00000000..51ca55dc
--- /dev/null
+++ b/1700_enable-thinkpad-micled.patch
@@ -0,0 +1,23 @@
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -5050,8 +5050,10 @@
+ "tpacpi::unknown_led2",
+ "tpacpi::unknown_led3",
+ "tpacpi::thinkvantage",
++ "tpacpi::unknown_led4",
++ "tpacpi::micmute",
+ };
+-#define TPACPI_SAFE_LEDS 0x1081U
++#define TPACPI_SAFE_LEDS 0x5081U
+
+ static inline bool tpacpi_is_led_restricted(const unsigned int led)
+ {
+@@ -5274,7 +5276,7 @@
+ { /* Lenovo */
+ .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+- .quirks = 0x1fffU,
++ .quirks = 0x5fffU,
+ },
+ { /* IBM ThinkPads with no EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
diff --git a/1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch b/1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch
new file mode 100644
index 00000000..67dedaff
--- /dev/null
+++ b/1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch
@@ -0,0 +1,32 @@
+From 34d651f7979e35fde9a4f77adc26a7e8c1e3e54a Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Tue, 10 Dec 2013 10:05:10 -0500
+Subject: [PATCH] Modify pipe_write to first call sb_start_write_try() and upon
+ encountering a frozen fs, skip the time update. See kernel bug #65701 and
+ Gentoo Kernel bug #493002.
+
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/pipe.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 0e0752e..78fd0d0 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -663,10 +663,11 @@ out:
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ }
+- if (ret > 0) {
++ if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
+ int err = file_update_time(filp);
+ if (err)
+ ret = err;
++ sb_end_write(file_inode(filp)->i_sb);
+ }
+ return ret;
+ }
+--
+1.8.3.2
+
diff --git a/2400_kcopy-patch-for-infiniband-driver.patch b/2400_kcopy-patch-for-infiniband-driver.patch
new file mode 100644
index 00000000..759f451e
--- /dev/null
+++ b/2400_kcopy-patch-for-infiniband-driver.patch
@@ -0,0 +1,731 @@
+From 1f52075d672a9bdd0069b3ea68be266ef5c229bd Mon Sep 17 00:00:00 2001
+From: Alexey Shvetsov <alexxy@gentoo.org>
+Date: Tue, 17 Jan 2012 21:08:49 +0400
+Subject: [PATCH] [kcopy] Add kcopy driver
+
+Add kcopy driver from qlogic to implement zero copy for infiniband psm
+userspace driver
+
+Signed-off-by: Alexey Shvetsov <alexxy@gentoo.org>
+---
+ drivers/char/Kconfig | 2 +
+ drivers/char/Makefile | 2 +
+ drivers/char/kcopy/Kconfig | 17 ++
+ drivers/char/kcopy/Makefile | 4 +
+ drivers/char/kcopy/kcopy.c | 646 +++++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 671 insertions(+)
+ create mode 100644 drivers/char/kcopy/Kconfig
+ create mode 100644 drivers/char/kcopy/Makefile
+ create mode 100644 drivers/char/kcopy/kcopy.c
+
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index ee94686..5b81449 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -6,6 +6,8 @@ menu "Character devices"
+
+ source "drivers/tty/Kconfig"
+
++source "drivers/char/kcopy/Kconfig"
++
+ config DEVKMEM
+ bool "/dev/kmem virtual device support"
+ default y
+diff --git a/drivers/char/Makefile b/drivers/char/Makefile
+index 0dc5d7c..be519d6 100644
+--- a/drivers/char/Makefile
++++ b/drivers/char/Makefile
+@@ -62,3 +62,5 @@
+ js-rtc-y = rtc.o
+
+ obj-$(CONFIG_TILE_SROM) += tile-srom.o
++
++obj-$(CONFIG_KCOPY) += kcopy/
+diff --git a/drivers/char/kcopy/Kconfig b/drivers/char/kcopy/Kconfig
+new file mode 100644
+index 0000000..453ae52
+--- /dev/null
++++ b/drivers/char/kcopy/Kconfig
+@@ -0,0 +1,17 @@
++#
++# KCopy character device configuration
++#
++
++menu "KCopy"
++
++config KCOPY
++ tristate "Memory-to-memory copies using kernel assist"
++ default m
++ ---help---
++ High-performance inter-process memory copies. Can often save a
++ memory copy to shared memory in the application. Useful at least
++ for MPI applications where the point-to-point nature of vmsplice
++ and pipes can be a limiting factor in performance.
++
++endmenu
++
+diff --git a/drivers/char/kcopy/Makefile b/drivers/char/kcopy/Makefile
+new file mode 100644
+index 0000000..9cb269b
+--- /dev/null
++++ b/drivers/char/kcopy/Makefile
+@@ -0,0 +1,4 @@
++#
++# Makefile for the kernel character device drivers.
++#
++obj-$(CONFIG_KCOPY) += kcopy.o
+diff --git a/drivers/char/kcopy/kcopy.c b/drivers/char/kcopy/kcopy.c
+new file mode 100644
+index 0000000..a9f915c
+--- /dev/null
++++ b/drivers/char/kcopy/kcopy.c
+@@ -0,0 +1,646 @@
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/mutex.h>
++#include <linux/mman.h>
++#include <linux/highmem.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/rbtree.h>
++#include <linux/rcupdate.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Arthur Jones <arthur.jones@qlogic.com>");
++MODULE_DESCRIPTION("QLogic kcopy driver");
++
++#define KCOPY_ABI 1
++#define KCOPY_MAX_MINORS 64
++
++struct kcopy_device {
++ struct cdev cdev;
++ struct class *class;
++ struct device *devp[KCOPY_MAX_MINORS];
++ dev_t dev;
++
++ struct kcopy_file *kf[KCOPY_MAX_MINORS];
++ struct mutex open_lock;
++};
++
++static struct kcopy_device kcopy_dev;
++
++/* per file data / one of these is shared per minor */
++struct kcopy_file {
++ int count;
++
++ /* pid indexed */
++ struct rb_root live_map_tree;
++
++ struct mutex map_lock;
++};
++
++struct kcopy_map_entry {
++ int count;
++ struct task_struct *task;
++ pid_t pid;
++ struct kcopy_file *file; /* file backpointer */
++
++ struct list_head list; /* free map list */
++ struct rb_node node; /* live map tree */
++};
++
++#define KCOPY_GET_SYSCALL 1
++#define KCOPY_PUT_SYSCALL 2
++#define KCOPY_ABI_SYSCALL 3
++
++struct kcopy_syscall {
++ __u32 tag;
++ pid_t pid;
++ __u64 n;
++ __u64 src;
++ __u64 dst;
++};
++
++static const void __user *kcopy_syscall_src(const struct kcopy_syscall *ks)
++{
++ return (const void __user *) (unsigned long) ks->src;
++}
++
++static void __user *kcopy_syscall_dst(const struct kcopy_syscall *ks)
++{
++ return (void __user *) (unsigned long) ks->dst;
++}
++
++static unsigned long kcopy_syscall_n(const struct kcopy_syscall *ks)
++{
++ return (unsigned long) ks->n;
++}
++
++static struct kcopy_map_entry *kcopy_create_entry(struct kcopy_file *file)
++{
++ struct kcopy_map_entry *kme =
++ kmalloc(sizeof(struct kcopy_map_entry), GFP_KERNEL);
++
++ if (!kme)
++ return NULL;
++
++ kme->count = 1;
++ kme->file = file;
++ kme->task = current;
++ kme->pid = current->tgid;
++ INIT_LIST_HEAD(&kme->list);
++
++ return kme;
++}
++
++static struct kcopy_map_entry *
++kcopy_lookup_pid(struct rb_root *root, pid_t pid)
++{
++ struct rb_node *node = root->rb_node;
++
++ while (node) {
++ struct kcopy_map_entry *kme =
++ container_of(node, struct kcopy_map_entry, node);
++
++ if (pid < kme->pid)
++ node = node->rb_left;
++ else if (pid > kme->pid)
++ node = node->rb_right;
++ else
++ return kme;
++ }
++
++ return NULL;
++}
++
++static int kcopy_insert(struct rb_root *root, struct kcopy_map_entry *kme)
++{
++ struct rb_node **new = &(root->rb_node);
++ struct rb_node *parent = NULL;
++
++ while (*new) {
++ struct kcopy_map_entry *tkme =
++ container_of(*new, struct kcopy_map_entry, node);
++
++ parent = *new;
++ if (kme->pid < tkme->pid)
++ new = &((*new)->rb_left);
++ else if (kme->pid > tkme->pid)
++ new = &((*new)->rb_right);
++ else {
++ printk(KERN_INFO "!!! debugging: bad rb tree !!!\n");
++ return -EINVAL;
++ }
++ }
++
++ rb_link_node(&kme->node, parent, new);
++ rb_insert_color(&kme->node, root);
++
++ return 0;
++}
++
++static int kcopy_open(struct inode *inode, struct file *filp)
++{
++ int ret;
++ const int minor = iminor(inode);
++ struct kcopy_file *kf = NULL;
++ struct kcopy_map_entry *kme;
++ struct kcopy_map_entry *okme;
++
++ if (minor < 0 || minor >= KCOPY_MAX_MINORS)
++ return -ENODEV;
++
++ mutex_lock(&kcopy_dev.open_lock);
++
++ if (!kcopy_dev.kf[minor]) {
++ kf = kmalloc(sizeof(struct kcopy_file), GFP_KERNEL);
++
++ if (!kf) {
++ ret = -ENOMEM;
++ goto bail;
++ }
++
++ kf->count = 1;
++ kf->live_map_tree = RB_ROOT;
++ mutex_init(&kf->map_lock);
++ kcopy_dev.kf[minor] = kf;
++ } else {
++ if (filp->f_flags & O_EXCL) {
++ ret = -EBUSY;
++ goto bail;
++ }
++ kcopy_dev.kf[minor]->count++;
++ }
++
++ kme = kcopy_create_entry(kcopy_dev.kf[minor]);
++ if (!kme) {
++ ret = -ENOMEM;
++ goto err_free_kf;
++ }
++
++ kf = kcopy_dev.kf[minor];
++
++ mutex_lock(&kf->map_lock);
++
++ okme = kcopy_lookup_pid(&kf->live_map_tree, kme->pid);
++ if (okme) {
++ /* pid already exists... */
++ okme->count++;
++ kfree(kme);
++ kme = okme;
++ } else
++ ret = kcopy_insert(&kf->live_map_tree, kme);
++
++ mutex_unlock(&kf->map_lock);
++
++ filp->private_data = kme;
++
++ ret = 0;
++ goto bail;
++
++err_free_kf:
++ if (kf) {
++ kcopy_dev.kf[minor] = NULL;
++ kfree(kf);
++ }
++bail:
++ mutex_unlock(&kcopy_dev.open_lock);
++ return ret;
++}
++
++static int kcopy_flush(struct file *filp, fl_owner_t id)
++{
++ struct kcopy_map_entry *kme = filp->private_data;
++ struct kcopy_file *kf = kme->file;
++
++ if (file_count(filp) == 1) {
++ mutex_lock(&kf->map_lock);
++ kme->count--;
++
++ if (!kme->count) {
++ rb_erase(&kme->node, &kf->live_map_tree);
++ kfree(kme);
++ }
++ mutex_unlock(&kf->map_lock);
++ }
++
++ return 0;
++}
++
++static int kcopy_release(struct inode *inode, struct file *filp)
++{
++ const int minor = iminor(inode);
++
++ mutex_lock(&kcopy_dev.open_lock);
++ kcopy_dev.kf[minor]->count--;
++ if (!kcopy_dev.kf[minor]->count) {
++ kfree(kcopy_dev.kf[minor]);
++ kcopy_dev.kf[minor] = NULL;
++ }
++ mutex_unlock(&kcopy_dev.open_lock);
++
++ return 0;
++}
++
++static void kcopy_put_pages(struct page **pages, int npages)
++{
++ int j;
++
++ for (j = 0; j < npages; j++)
++ put_page(pages[j]);
++}
++
++static int kcopy_validate_task(struct task_struct *p)
++{
++ return p && (uid_eq(current_euid(), task_euid(p)) || uid_eq(current_euid(), task_uid(p)));
++}
++
++static int kcopy_get_pages(struct kcopy_file *kf, pid_t pid,
++ struct page **pages, void __user *addr,
++ int write, size_t npages)
++{
++ int err;
++ struct mm_struct *mm;
++ struct kcopy_map_entry *rkme;
++
++ mutex_lock(&kf->map_lock);
++
++ rkme = kcopy_lookup_pid(&kf->live_map_tree, pid);
++ if (!rkme || !kcopy_validate_task(rkme->task)) {
++ err = -EINVAL;
++ goto bail_unlock;
++ }
++
++ mm = get_task_mm(rkme->task);
++ if (unlikely(!mm)) {
++ err = -ENOMEM;
++ goto bail_unlock;
++ }
++
++ down_read(&mm->mmap_sem);
++ err = get_user_pages(rkme->task, mm,
++ (unsigned long) addr, npages, write, 0,
++ pages, NULL);
++
++ if (err < npages && err > 0) {
++ kcopy_put_pages(pages, err);
++ err = -ENOMEM;
++ } else if (err == npages)
++ err = 0;
++
++ up_read(&mm->mmap_sem);
++
++ mmput(mm);
++
++bail_unlock:
++ mutex_unlock(&kf->map_lock);
++
++ return err;
++}
++
++static unsigned long kcopy_copy_pages_from_user(void __user *src,
++ struct page **dpages,
++ unsigned doff,
++ unsigned long n)
++{
++ struct page *dpage = *dpages;
++ char *daddr = kmap(dpage);
++ int ret = 0;
++
++ while (1) {
++ const unsigned long nleft = PAGE_SIZE - doff;
++ const unsigned long nc = (n < nleft) ? n : nleft;
++
++ /* if (copy_from_user(daddr + doff, src, nc)) { */
++ if (__copy_from_user_nocache(daddr + doff, src, nc)) {
++ ret = -EFAULT;
++ goto bail;
++ }
++
++ n -= nc;
++ if (n == 0)
++ break;
++
++ doff += nc;
++ doff &= ~PAGE_MASK;
++ if (doff == 0) {
++ kunmap(dpage);
++ dpages++;
++ dpage = *dpages;
++ daddr = kmap(dpage);
++ }
++
++ src += nc;
++ }
++
++bail:
++ kunmap(dpage);
++
++ return ret;
++}
++
++static unsigned long kcopy_copy_pages_to_user(void __user *dst,
++ struct page **spages,
++ unsigned soff,
++ unsigned long n)
++{
++ struct page *spage = *spages;
++ const char *saddr = kmap(spage);
++ int ret = 0;
++
++ while (1) {
++ const unsigned long nleft = PAGE_SIZE - soff;
++ const unsigned long nc = (n < nleft) ? n : nleft;
++
++ if (copy_to_user(dst, saddr + soff, nc)) {
++ ret = -EFAULT;
++ goto bail;
++ }
++
++ n -= nc;
++ if (n == 0)
++ break;
++
++ soff += nc;
++ soff &= ~PAGE_MASK;
++ if (soff == 0) {
++ kunmap(spage);
++ spages++;
++ spage = *spages;
++ saddr = kmap(spage);
++ }
++
++ dst += nc;
++ }
++
++bail:
++ kunmap(spage);
++
++ return ret;
++}
++
++static unsigned long kcopy_copy_to_user(void __user *dst,
++ struct kcopy_file *kf, pid_t pid,
++ void __user *src,
++ unsigned long n)
++{
++ struct page **pages;
++ const int pages_len = PAGE_SIZE / sizeof(struct page *);
++ int ret = 0;
++
++ pages = (struct page **) __get_free_page(GFP_KERNEL);
++ if (!pages) {
++ ret = -ENOMEM;
++ goto bail;
++ }
++
++ while (n) {
++ const unsigned long soff = (unsigned long) src & ~PAGE_MASK;
++ const unsigned long spages_left =
++ (soff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ const unsigned long spages_cp =
++ min_t(unsigned long, spages_left, pages_len);
++ const unsigned long sbytes =
++ PAGE_SIZE - soff + (spages_cp - 1) * PAGE_SIZE;
++ const unsigned long nbytes = min_t(unsigned long, sbytes, n);
++
++ ret = kcopy_get_pages(kf, pid, pages, src, 0, spages_cp);
++ if (unlikely(ret))
++ goto bail_free;
++
++ ret = kcopy_copy_pages_to_user(dst, pages, soff, nbytes);
++ kcopy_put_pages(pages, spages_cp);
++ if (ret)
++ goto bail_free;
++ dst = (char *) dst + nbytes;
++ src = (char *) src + nbytes;
++
++ n -= nbytes;
++ }
++
++bail_free:
++ free_page((unsigned long) pages);
++bail:
++ return ret;
++}
++
++static unsigned long kcopy_copy_from_user(const void __user *src,
++ struct kcopy_file *kf, pid_t pid,
++ void __user *dst,
++ unsigned long n)
++{
++ struct page **pages;
++ const int pages_len = PAGE_SIZE / sizeof(struct page *);
++ int ret = 0;
++
++ pages = (struct page **) __get_free_page(GFP_KERNEL);
++ if (!pages) {
++ ret = -ENOMEM;
++ goto bail;
++ }
++
++ while (n) {
++ const unsigned long doff = (unsigned long) dst & ~PAGE_MASK;
++ const unsigned long dpages_left =
++ (doff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ const unsigned long dpages_cp =
++ min_t(unsigned long, dpages_left, pages_len);
++ const unsigned long dbytes =
++ PAGE_SIZE - doff + (dpages_cp - 1) * PAGE_SIZE;
++ const unsigned long nbytes = min_t(unsigned long, dbytes, n);
++
++ ret = kcopy_get_pages(kf, pid, pages, dst, 1, dpages_cp);
++ if (unlikely(ret))
++ goto bail_free;
++
++ ret = kcopy_copy_pages_from_user((void __user *) src,
++ pages, doff, nbytes);
++ kcopy_put_pages(pages, dpages_cp);
++ if (ret)
++ goto bail_free;
++
++ dst = (char *) dst + nbytes;
++ src = (char *) src + nbytes;
++
++ n -= nbytes;
++ }
++
++bail_free:
++ free_page((unsigned long) pages);
++bail:
++ return ret;
++}
++
++static int kcopy_do_get(struct kcopy_map_entry *kme, pid_t pid,
++ const void __user *src, void __user *dst,
++ unsigned long n)
++{
++ struct kcopy_file *kf = kme->file;
++ int ret = 0;
++
++ if (n == 0) {
++ ret = -EINVAL;
++ goto bail;
++ }
++
++ ret = kcopy_copy_to_user(dst, kf, pid, (void __user *) src, n);
++
++bail:
++ return ret;
++}
++
++static int kcopy_do_put(struct kcopy_map_entry *kme, const void __user *src,
++ pid_t pid, void __user *dst,
++ unsigned long n)
++{
++ struct kcopy_file *kf = kme->file;
++ int ret = 0;
++
++ if (n == 0) {
++ ret = -EINVAL;
++ goto bail;
++ }
++
++ ret = kcopy_copy_from_user(src, kf, pid, (void __user *) dst, n);
++
++bail:
++ return ret;
++}
++
++static int kcopy_do_abi(u32 __user *dst)
++{
++ u32 val = KCOPY_ABI;
++ int err;
++
++ err = put_user(val, dst);
++ if (err)
++ return -EFAULT;
++
++ return 0;
++}
++
++ssize_t kcopy_write(struct file *filp, const char __user *data, size_t cnt,
++ loff_t *o)
++{
++ struct kcopy_map_entry *kme = filp->private_data;
++ struct kcopy_syscall ks;
++ int err = 0;
++ const void __user *src;
++ void __user *dst;
++ unsigned long n;
++
++ if (cnt != sizeof(struct kcopy_syscall)) {
++ err = -EINVAL;
++ goto bail;
++ }
++
++ err = copy_from_user(&ks, data, cnt);
++ if (unlikely(err))
++ goto bail;
++
++ src = kcopy_syscall_src(&ks);
++ dst = kcopy_syscall_dst(&ks);
++ n = kcopy_syscall_n(&ks);
++ if (ks.tag == KCOPY_GET_SYSCALL)
++ err = kcopy_do_get(kme, ks.pid, src, dst, n);
++ else if (ks.tag == KCOPY_PUT_SYSCALL)
++ err = kcopy_do_put(kme, src, ks.pid, dst, n);
++ else if (ks.tag == KCOPY_ABI_SYSCALL)
++ err = kcopy_do_abi(dst);
++ else
++ err = -EINVAL;
++
++bail:
++ return err ? err : cnt;
++}
++
++static const struct file_operations kcopy_fops = {
++ .owner = THIS_MODULE,
++ .open = kcopy_open,
++ .release = kcopy_release,
++ .flush = kcopy_flush,
++ .write = kcopy_write,
++};
++
++static int __init kcopy_init(void)
++{
++ int ret;
++ const char *name = "kcopy";
++ int i;
++ int ninit = 0;
++
++ mutex_init(&kcopy_dev.open_lock);
++
++ ret = alloc_chrdev_region(&kcopy_dev.dev, 0, KCOPY_MAX_MINORS, name);
++ if (ret)
++ goto bail;
++
++ kcopy_dev.class = class_create(THIS_MODULE, (char *) name);
++
++ if (IS_ERR(kcopy_dev.class)) {
++ ret = PTR_ERR(kcopy_dev.class);
++ printk(KERN_ERR "kcopy: Could not create "
++ "device class (err %d)\n", -ret);
++ goto bail_chrdev;
++ }
++
++ cdev_init(&kcopy_dev.cdev, &kcopy_fops);
++ ret = cdev_add(&kcopy_dev.cdev, kcopy_dev.dev, KCOPY_MAX_MINORS);
++ if (ret < 0) {
++ printk(KERN_ERR "kcopy: Could not add cdev (err %d)\n",
++ -ret);
++ goto bail_class;
++ }
++
++ for (i = 0; i < KCOPY_MAX_MINORS; i++) {
++ char devname[8];
++ const int minor = MINOR(kcopy_dev.dev) + i;
++ const dev_t dev = MKDEV(MAJOR(kcopy_dev.dev), minor);
++
++ snprintf(devname, sizeof(devname), "kcopy%02d", i);
++ kcopy_dev.devp[i] =
++ device_create(kcopy_dev.class, NULL,
++ dev, NULL, devname);
++
++ if (IS_ERR(kcopy_dev.devp[i])) {
++ ret = PTR_ERR(kcopy_dev.devp[i]);
++ printk(KERN_ERR "kcopy: Could not create "
++ "devp %d (err %d)\n", i, -ret);
++ goto bail_cdev_add;
++ }
++
++ ninit++;
++ }
++
++ ret = 0;
++ goto bail;
++
++bail_cdev_add:
++ for (i = 0; i < ninit; i++)
++ device_unregister(kcopy_dev.devp[i]);
++
++ cdev_del(&kcopy_dev.cdev);
++bail_class:
++ class_destroy(kcopy_dev.class);
++bail_chrdev:
++ unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
++bail:
++ return ret;
++}
++
++static void __exit kcopy_fini(void)
++{
++ int i;
++
++ for (i = 0; i < KCOPY_MAX_MINORS; i++)
++ device_unregister(kcopy_dev.devp[i]);
++
++ cdev_del(&kcopy_dev.cdev);
++ class_destroy(kcopy_dev.class);
++ unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
++}
++
++module_init(kcopy_init);
++module_exit(kcopy_fini);
+--
+1.7.10
+
diff --git a/2700_ThinkPad-30-brightness-control-fix.patch b/2700_ThinkPad-30-brightness-control-fix.patch
new file mode 100644
index 00000000..b548c6dc
--- /dev/null
+++ b/2700_ThinkPad-30-brightness-control-fix.patch
@@ -0,0 +1,67 @@
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..6c242ed 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -269,6 +276,61 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ },
+
+ /*
++ * The following Lenovo models have a broken workaround in the
++ * acpi_video backlight implementation to meet the Windows 8
++ * requirement of 101 backlight levels. Reverting to pre-Win8
++ * behavior fixes the problem.
++ */
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad L430",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad T430s",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad T530",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad W530",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad X1 Carbon",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad X230",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
++ },
++ },
++
++ /*
+ * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ * Linux ignores it, except for the machines enumerated below.
+ */
+
diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
new file mode 100644
index 00000000..4c89adfe
--- /dev/null
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -0,0 +1,29 @@
+--- a/init/do_mounts.c 2013-01-25 19:11:11.609802424 -0500
++++ b/init/do_mounts.c 2013-01-25 19:14:20.606053568 -0500
+@@ -461,7 +461,10 @@ void __init change_floppy(char *fmt, ...
+ va_start(args, fmt);
+ vsprintf(buf, fmt, args);
+ va_end(args);
+- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++ if (saved_root_name[0])
++ fd = sys_open(saved_root_name, O_RDWR | O_NDELAY, 0);
++ else
++ fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, FDEJECT, 0);
+ sys_close(fd);
+@@ -505,7 +508,13 @@ void __init mount_root(void)
+ #endif
+ #ifdef CONFIG_BLOCK
+ create_dev("/dev/root", ROOT_DEV);
+- mount_block_root("/dev/root", root_mountflags);
++ if (saved_root_name[0]) {
++ create_dev(saved_root_name, ROOT_DEV);
++ mount_block_root(saved_root_name, root_mountflags);
++ } else {
++ create_dev("/dev/root", ROOT_DEV);
++ mount_block_root("/dev/root", root_mountflags);
++ }
+ #endif
+ }
+
diff --git a/2905_s2disk-resume-image-fix.patch b/2905_s2disk-resume-image-fix.patch
new file mode 100644
index 00000000..7e95d298
--- /dev/null
+++ b/2905_s2disk-resume-image-fix.patch
@@ -0,0 +1,24 @@
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index fb32636..d968882 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -575,7 +575,8 @@
+ call_usermodehelper_freeinfo(sub_info);
+ return -EINVAL;
+ }
+- helper_lock();
++ if (!(current->flags & PF_FREEZER_SKIP))
++ helper_lock();
+ if (!khelper_wq || usermodehelper_disabled) {
+ retval = -EBUSY;
+ goto out;
+@@ -611,7 +612,8 @@ wait_done:
+ out:
+ call_usermodehelper_freeinfo(sub_info);
+ unlock:
+- helper_unlock();
++ if (!(current->flags & PF_FREEZER_SKIP))
++ helper_unlock();
+ return retval;
+ }
+ EXPORT_SYMBOL(call_usermodehelper_exec);
diff --git a/4200_fbcondecor-0.9.6.patch b/4200_fbcondecor-0.9.6.patch
new file mode 100644
index 00000000..b0283450
--- /dev/null
+++ b/4200_fbcondecor-0.9.6.patch
@@ -0,0 +1,2179 @@
+diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
+index 30a7054..9b6a733 100644
+--- a/Documentation/fb/00-INDEX
++++ b/Documentation/fb/00-INDEX
+@@ -21,6 +21,8 @@ ep93xx-fb.txt
+ - info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ - intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++ - info on the Framebuffer Console Decoration
+ framebuffer.txt
+ - introduction to frame buffer devices.
+ gxfb.txt
+diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
+new file mode 100644
+index 0000000..15889f3
+--- /dev/null
++++ b/Documentation/fb/fbcondecor.txt
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++ http://dev.gentoo.org/~spock/projects/splashutils/
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++ standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem
++ is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the
++ userspace helper to find a background image appropriate for the specified
++ theme and the current resolution. The userspace helper should respond by
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes:
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++ values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++ values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc:
++Virtual console number.
++
++origin:
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data:
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++ Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++ Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++ Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++ Michal Januszewski <spock@gentoo.org>
++
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 95952c8..b55db6d 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -16,4 +16,8 @@ obj-$(CONFIG_PCI) += pci/
+ obj-$(CONFIG_PARISC) += parisc/
+ obj-$(CONFIG_RAPIDIO) += rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y += tty/
++obj-y += char/
+ obj-y += video/
+ obj-y += idle/
+@@ -37,11 +41,6 @@ obj-$(CONFIG_XEN) += xen/
+ # regulators early, since some subsystems rely on them to initialize
+ obj-$(CONFIG_REGULATOR) += regulator/
+
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y += tty/
+-obj-y += char/
+-
+ # gpu/ comes after char for AGP vs DRM startup
+ obj-y += gpu/
+
+diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
+index a290be5..3a4ca32 100644
+--- a/drivers/video/Kconfig
++++ b/drivers/video/Kconfig
+@@ -1229,7 +1229,6 @@ config FB_MATROX
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+- select FB_TILEBLITTING
+ select FB_MACMODES if PPC_PMAC
+ ---help---
+ Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index c2d11fe..1be9de4 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -120,6 +120,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+ such that other users of the framebuffer will remain normally
+ oriented.
+
++config FB_CON_DECOR
++ bool "Support for the Framebuffer Console Decorations"
++ depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++ default n
++ ---help---
++ This option enables support for framebuffer console decorations which
++ makes it possible to display images in the background of the system
++ consoles. Note that userspace utilities are necessary in order to take
++ advantage of these features. Refer to Documentation/fb/fbcondecor.txt
++ for more information.
++
++ If unsure, say N.
++
+ config STI_CONSOLE
+ bool "STI text console"
+ depends on PARISC
+--- a/drivers/video/console/Makefile 2013-08-26 14:02:39.905817618 -0400
++++ b/drivers/video/console/Makefile 2013-08-26 14:05:06.258848595 -0400
+@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE) +=
+ fbcon_ccw.o
+ endif
+
++obj-$(CONFIG_FB_CON_DECOR) += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI) += sticore.o
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 28b1a83..33712c0 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+
+ /*
+ * Accelerated handlers.
+@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
++ if (fbcon_decor_active(info, vc)) {
++ area.sx += vc->vc_decor.tx;
++ area.sy += vc->vc_decor.ty;
++ area.dx += vc->vc_decor.tx;
++ area.dy += vc->vc_decor.ty;
++ }
++
+ info->fbops->fb_copyarea(info, &area);
+ }
+
+@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ cursor.image.depth = 1;
+ cursor.rop = ROP_XOR;
+
+- if (info->fbops->fb_cursor)
+- err = info->fbops->fb_cursor(info, &cursor);
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_cursor(info, &cursor);
++ } else {
++ if (info->fbops->fb_cursor)
++ err = info->fbops->fb_cursor(info, &cursor);
+
+- if (err)
+- soft_cursor(info, &cursor);
++ if (err)
++ soft_cursor(info, &cursor);
++ }
+
+ ops->cursor_reset = 0;
+ }
+diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
+new file mode 100644
+index 0000000..09381d3
+--- /dev/null
++++ b/drivers/video/console/cfbcondecor.c
+@@ -0,0 +1,471 @@
++/*
++ * linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ * Copyright (C) 2004 Michal Januszewski <spock@gentoo.org>
++ *
++ * Code based upon "Bootdecor" (C) 2001-2003
++ * Volker Poplawski <volker@poplawski.de>,
++ * Stefan Reinauer <stepan@suse.de>,
++ * Steffen Winterfeldt <snwint@suse.de>,
++ * Michael Schroeder <mls@suse.de>,
++ * Ken Wimer <wimer@suse.de>.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift,bpp,type) \
++ do { \
++ if (d & (0x80 >> (shift))) \
++ dd2[(shift)] = fgx; \
++ else \
++ dd2[(shift)] = transparent ? *(type *)decor_src : bgx; \
++ decor_src += (bpp); \
++ } while (0) \
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++ u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++ int i, j, k;
++ int minlen = min(min(info->var.red.length, info->var.green.length),
++ info->var.blue.length);
++ u32 col;
++
++ for (j = i = 0; i < 16; i++) {
++ k = color_table[i];
++
++ col = ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.red.offset);
++ col |= ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.green.offset);
++ col |= ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.blue.offset);
++ ((u32 *)info->pseudo_palette)[k] = col;
++ }
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++ int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
++{
++ unsigned int x, y;
++ u32 dd;
++ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++ unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++ unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++ u16 dd2[4];
++
++ u8* decor_src = (u8 *)(info->bgdecor.data + ds);
++ u8* dst = (u8 *)(info->screen_base + d);
++
++ if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++ return;
++
++ for (y = 0; y < height; y++) {
++ switch (info->var.bits_per_pixel) {
++
++ case 32:
++ for (x = 0; x < width; x++) {
++
++ if ((x & 7) == 0)
++ d = *src++;
++ if (d & 0x80)
++ dd = fgx;
++ else
++ dd = transparent ?
++ *(u32 *)decor_src : bgx;
++
++ d <<= 1;
++ decor_src += 4;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ break;
++ case 24:
++ for (x = 0; x < width; x++) {
++
++ if ((x & 7) == 0)
++ d = *src++;
++ if (d & 0x80)
++ dd = fgx;
++ else
++ dd = transparent ?
++ (*(u32 *)decor_src & 0xffffff) : bgx;
++
++ d <<= 1;
++ decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++ fb_writew(dd & 0xffff, dst);
++ dst += 2;
++ fb_writeb((dd >> 16), dst);
++#else
++ fb_writew(dd >> 8, dst);
++ dst += 2;
++ fb_writeb(dd & 0xff, dst);
++#endif
++ dst++;
++ }
++ break;
++ case 16:
++ for (x = 0; x < width; x += 2) {
++ if ((x & 7) == 0)
++ d = *src++;
++
++ parse_pixel(0, 2, u16);
++ parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++ dd = dd2[0] | (dd2[1] << 16);
++#else
++ dd = dd2[1] | (dd2[0] << 16);
++#endif
++ d <<= 2;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ break;
++
++ case 8:
++ for (x = 0; x < width; x += 4) {
++ if ((x & 7) == 0)
++ d = *src++;
++
++ parse_pixel(0, 1, u8);
++ parse_pixel(1, 1, u8);
++ parse_pixel(2, 1, u8);
++ parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++ dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++ dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++ d <<= 4;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ }
++
++ dst += info->fix.line_length - width * bytespp;
++ decor_src += (info->var.xres - width) * bytespp;
++ }
++}
++
++#define cc2cx(a) \
++ ((info->fix.visual == FB_VISUAL_TRUECOLOR || \
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? \
++ ((u32*)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++ const unsigned short *s, int count, int yy, int xx)
++{
++ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++ struct fbcon_ops *ops = info->fbcon_par;
++ int fg_color, bg_color, transparent;
++ u8 *src;
++ u32 bgx, fgx;
++ u16 c = scr_readw(s);
++
++ fg_color = get_color(vc, info, c, 1);
++ bg_color = get_color(vc, info, c, 0);
++
++ /* Don't paint the background image if console is blanked */
++ transparent = ops->blank_state ? 0 :
++ (vc->vc_decor.bg_color == bg_color);
++
++ xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++ yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++ fgx = cc2cx(fg_color);
++ bgx = cc2cx(bg_color);
++
++ while (count--) {
++ c = scr_readw(s++);
++ src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++ ((vc->vc_font.width + 7) >> 3);
++
++ fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++ vc->vc_font.width, src, fgx, bgx, transparent);
++ xx += vc->vc_font.width;
++ }
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++ int i;
++ unsigned int dsize, s_pitch;
++ struct fbcon_ops *ops = info->fbcon_par;
++ struct vc_data* vc;
++ u8 *src;
++
++ /* we really don't need any cursors while the console is blanked */
++ if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++ return;
++
++ vc = vc_cons[ops->currcon].d;
++
++ src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++ if (!src)
++ return;
++
++ s_pitch = (cursor->image.width + 7) >> 3;
++ dsize = s_pitch * cursor->image.height;
++ if (cursor->enable) {
++ switch (cursor->rop) {
++ case ROP_XOR:
++ for (i = 0; i < dsize; i++)
++ src[i] = cursor->image.data[i] ^ cursor->mask[i];
++ break;
++ case ROP_COPY:
++ default:
++ for (i = 0; i < dsize; i++)
++ src[i] = cursor->image.data[i] & cursor->mask[i];
++ break;
++ }
++ } else
++ memcpy(src, cursor->image.data, dsize);
++
++ fbcon_decor_renderc(info,
++ cursor->image.dy + vc->vc_decor.ty,
++ cursor->image.dx + vc->vc_decor.tx,
++ cursor->image.height,
++ cursor->image.width,
++ (u8*)src,
++ cc2cx(cursor->image.fg_color),
++ cc2cx(cursor->image.bg_color),
++ cursor->image.bg_color == vc->vc_decor.bg_color);
++
++ kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++ u32 bgx, int bpp)
++{
++ int i;
++
++ if (bpp == 8)
++ bgx |= bgx << 8;
++ if (bpp == 16 || bpp == 8)
++ bgx |= bgx << 16;
++
++ while (height-- > 0) {
++ u8 *p = dst;
++
++ switch (bpp) {
++
++ case 32:
++ for (i=0; i < width; i++) {
++ fb_writel(bgx, p); p += 4;
++ }
++ break;
++ case 24:
++ for (i=0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++ fb_writew((bgx & 0xffff),(u16*)p); p += 2;
++ fb_writeb((bgx >> 16),p++);
++#else
++ fb_writew((bgx >> 8),(u16*)p); p += 2;
++ fb_writeb((bgx & 0xff),p++);
++#endif
++ }
++ case 16:
++ for (i=0; i < width/4; i++) {
++ fb_writel(bgx,p); p += 4;
++ fb_writel(bgx,p); p += 4;
++ }
++ if (width & 2) {
++ fb_writel(bgx,p); p += 4;
++ }
++ if (width & 1)
++ fb_writew(bgx,(u16*)p);
++ break;
++ case 8:
++ for (i=0; i < width/4; i++) {
++ fb_writel(bgx,p); p += 4;
++ }
++
++ if (width & 2) {
++ fb_writew(bgx,p); p += 2;
++ }
++ if (width & 1)
++ fb_writeb(bgx,(u8*)p);
++ break;
++
++ }
++ dst += dstbytes;
++ }
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++ int srclinebytes, int bpp)
++{
++ int i;
++
++ while (height-- > 0) {
++ u32 *p = (u32 *)dst;
++ u32 *q = (u32 *)src;
++
++ switch (bpp) {
++
++ case 32:
++ for (i=0; i < width; i++)
++ fb_writel(*q++, p++);
++ break;
++ case 24:
++ for (i=0; i < (width*3/4); i++)
++ fb_writel(*q++, p++);
++ if ((width*3) % 4) {
++ if (width & 2) {
++ fb_writeb(*(u8*)q, (u8*)p);
++ } else if (width & 1) {
++ fb_writew(*(u16*)q, (u16*)p);
++ fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
++ }
++ }
++ break;
++ case 16:
++ for (i=0; i < width/4; i++) {
++ fb_writel(*q++, p++);
++ fb_writel(*q++, p++);
++ }
++ if (width & 2)
++ fb_writel(*q++, p++);
++ if (width & 1)
++ fb_writew(*(u16*)q, (u16*)p);
++ break;
++ case 8:
++ for (i=0; i < width/4; i++)
++ fb_writel(*q++, p++);
++
++ if (width & 2) {
++ fb_writew(*(u16*)q, (u16*)p);
++ q = (u32*) ((u16*)q + 1);
++ p = (u32*) ((u16*)p + 1);
++ }
++ if (width & 1)
++ fb_writeb(*(u8*)q, (u8*)p);
++ break;
++ }
++
++ dst += linebytes;
++ src += srclinebytes;
++ }
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++ int width)
++{
++ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++ int d = sy * info->fix.line_length + sx * bytespp;
++ int ds = (sy * info->var.xres + sx) * bytespp;
++
++ fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++ height, width, info->fix.line_length, info->var.xres * bytespp,
++ info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++ int height, int width)
++{
++ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++ struct fbcon_ops *ops = info->fbcon_par;
++ u8 *dst;
++ int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++ transparent = (vc->vc_decor.bg_color == bg_color);
++ sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++ sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++ height *= vc->vc_font.height;
++ width *= vc->vc_font.width;
++
++ /* Don't paint the background image if console is blanked */
++ if (transparent && !ops->blank_state) {
++ decorfill(info, sy, sx, height, width);
++ } else {
++ dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++ sx * ((info->var.bits_per_pixel + 7) >> 3));
++ decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++ info->var.bits_per_pixel);
++ }
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++ int bottom_only)
++{
++ unsigned int tw = vc->vc_cols*vc->vc_font.width;
++ unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++ if (!bottom_only) {
++ /* top margin */
++ decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++ /* left margin */
++ decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++ /* right margin */
++ decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th,
++ info->var.xres - vc->vc_decor.tx - tw);
++ }
++ decorfill(info, vc->vc_decor.ty + th, 0,
++ info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y,
++ int sx, int dx, int width)
++{
++ u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++ u16 *s = d + (dx - sx);
++ u16 *start = d;
++ u16 *ls = d;
++ u16 *le = d + width;
++ u16 c;
++ int x = dx;
++ u16 attr = 1;
++
++ do {
++ c = scr_readw(d);
++ if (attr != (c & 0xff00)) {
++ attr = c & 0xff00;
++ if (d > start) {
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++ x += d - start;
++ start = d;
++ }
++ }
++ if (s >= ls && s < le && c == scr_readw(s)) {
++ if (d > start) {
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++ x += d - start + 1;
++ start = d + 1;
++ } else {
++ x++;
++ start++;
++ }
++ }
++ s++;
++ d++;
++ } while (d < le);
++ if (d > start)
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++ if (blank) {
++ decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++ info->fix.line_length, 0, info->var.bits_per_pixel);
++ } else {
++ update_screen(vc);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++}
++
+From ea6ca92753106f1e0773acd1f18c71ae79a6f9b0 Mon Sep 17 00:00:00 2001
+From: Mike Pagano <mpagano@gentoo.org>
+Date: Tue, 27 Aug 2013 07:58:05 -0400
+Subject: [PATCH] gbcondecor port
+
+---
+ drivers/video/console/fbcon.c | 167 ++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 143 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index cd8a802..666556c 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -79,6 +79,7 @@
+ #include <asm/irq.h>
+
+ #include "fbcon.h"
++#include "fbcondecor.h"
+
+ #ifdef FBCONDEBUG
+ # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -94,7 +95,7 @@ enum {
+
+ static struct display fb_display[MAX_NR_CONSOLES];
+
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+
+ static int logo_lines;
+@@ -286,7 +287,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+ !vt_force_oops_output(vc);
+ }
+
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ u16 c, int is_fg)
+ {
+ int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
+ info_idx = -1;
+ } else {
+ fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++ fbcon_decor_init();
++#endif
+ }
+
+ return err;
+@@ -1007,6 +1011,12 @@ static const char *fbcon_startup(void)
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
++
++ if (fbcon_decor_active(info, vc)) {
++ cols = vc->vc_decor.twidth / vc->vc_font.width;
++ rows = vc->vc_decor.theight / vc->vc_font.height;
++ }
++
+ vc_resize(vc, cols, rows);
+
+ DPRINTK("mode: %s\n", info->fix.id);
+@@ -1036,7 +1046,7 @@ static void fbcon_init(struct vc_data *vc, int init)
+ cap = info->flags;
+
+ if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+- (info->fix.type == FB_TYPE_TEXT))
++ (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ logo = 0;
+
+ if (var_to_display(p, &info->var, info))
+@@ -1260,6 +1270,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
+ fbcon_clear_margins(vc, 0);
+ }
+
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_clear(vc, info, sy, sx, height, width);
++ return;
++ }
++
+ /* Split blits that cross physical y_wrap boundary */
+
+ y_break = p->vrows - p->yscroll;
+@@ -1279,10 +1294,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
+ struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if (!fbcon_is_inactive(vc, info))
+- ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+- get_color(vc, info, scr_readw(s), 1),
+- get_color(vc, info, scr_readw(s), 0));
++ if (!fbcon_is_inactive(vc, info)) {
++
++ if (fbcon_decor_active(info, vc))
++ fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++ else
++ ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++ get_color(vc, info, scr_readw(s), 1),
++ get_color(vc, info, scr_readw(s), 0));
++ }
+ }
+
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1297,9 +1317,6 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+ {
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+-
+- if (!fbcon_is_inactive(vc, info))
+- ops->clear_margins(vc, info, bottom_only);
+ }
+
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1819,7 +1836,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ count = vc->vc_rows;
+ if (softback_top)
+ fbcon_softback_note(vc, t, count);
+- if (logo_shown >= 0)
++ if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ goto redraw_up;
+ switch (p->scrollmode) {
+ case SCROLL_MOVE:
+@@ -1912,6 +1929,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ count = vc->vc_rows;
+ if (logo_shown >= 0)
+ goto redraw_down;
++ if (fbcon_decor_active(info, vc))
++ goto redraw_down;
+ switch (p->scrollmode) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2060,6 +2079,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
+ }
+ return;
+ }
++
++ if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++ /* must use slower redraw bmove to keep background pic intact */
++ fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++ return;
++ }
++
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+ }
+@@ -2130,8 +2156,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ var.yres = virt_h * virt_fh;
+ x_diff = info->var.xres - var.xres;
+ y_diff = info->var.yres - var.yres;
+- if (x_diff < 0 || x_diff > virt_fw ||
+- y_diff < 0 || y_diff > virt_fh) {
++ if ((x_diff < 0 || x_diff > virt_fw ||
++ y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ const struct fb_videomode *mode;
+
+ DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2168,6 +2194,22 @@ static int fbcon_switch(struct vc_data *vc)
+ info = registered_fb[con2fb_map[vc->vc_num]];
+ ops = info->fbcon_par;
+
++ prev_console = ops->currcon;
++ if (prev_console != -1)
++ old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++ if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++ struct vc_data *vc_curr = vc_cons[prev_console].d;
++ if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++ /* Clear the screen to avoid displaying funky colors during
++ * * palette updates. */
++ memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
++ 0, info->var.yres * info->fix.line_length);
++ }
++ }
++#endif
++
+ if (softback_top) {
+ if (softback_lines)
+ fbcon_set_origin(vc);
+@@ -2185,9 +2227,6 @@ static int fbcon_switch(struct vc_data *vc)
+ logo_shown = FBCON_LOGO_CANSHOW;
+ }
+
+- prev_console = ops->currcon;
+- if (prev_console != -1)
+- old_info = registered_fb[con2fb_map[prev_console]];
+ /*
+ * FIXME: If we have multiple fbdev's loaded, we need to
+ * update all info->currcon. Perhaps, we can place this
+@@ -2231,6 +2270,18 @@ static int fbcon_switch(struct vc_data *vc)
+ fbcon_del_cursor_timer(old_info);
+ }
+
++ if (fbcon_decor_active_vc(vc)) {
++ struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++ if (!vc_curr->vc_decor.theme ||
++ strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++ (fbcon_decor_active_nores(info, vc_curr) &&
++ !fbcon_decor_active(info, vc_curr))) {
++ fbcon_decor_disable(vc, 0);
++ fbcon_decor_call_helper("modechange", vc->vc_num);
++ }
++ }
++
+ if (fbcon_is_inactive(vc, info) ||
+ ops->blank_state != FB_BLANK_UNBLANK)
+ fbcon_del_cursor_timer(info);
+@@ -2344,10 +2395,14 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+ ops->blank_state = blank;
+ fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ ops->cursor_flash = (!blank);
+-
+- if (!(info->flags & FBINFO_MISC_USEREVENT))
+- if (fb_blank(info, blank))
+- fbcon_generic_blank(vc, info, blank);
++ if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++ if (fb_blank(info, blank)) {
++ if (fbcon_decor_active(info, vc))
++ fbcon_decor_blank(vc, info, blank);
++ else
++ fbcon_generic_blank(vc, info, blank);
++ }
++ }
+ }
+
+ if (!blank)
+@@ -2522,10 +2577,18 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ }
+
+ if (resize) {
++ /* reset wrap/pan */
+ int cols, rows;
+
+ cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++ if (fbcon_decor_active(info, vc)) {
++ info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++ cols = vc->vc_decor.twidth;
++ rows = vc->vc_decor.theight;
++ }
++
+ cols /= w;
+ rows /= h;
+ vc_resize(vc, cols, rows);
+@@ -2657,7 +2720,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ int i, j, k, depth;
+ u8 val;
+
+- if (fbcon_is_inactive(vc, info))
++ if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++ || vc->vc_num != fg_console
++#endif
++ )
+ return -EINVAL;
+
+ if (!CON_IS_VISIBLE(vc))
+@@ -2683,7 +2750,49 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ } else
+ fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+
+- return fb_set_cmap(&palette_cmap, info);
++ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++ u16 *red, *green, *blue;
++ int minlen = min(min(info->var.red.length, info->var.green.length),
++ info->var.blue.length);
++ int h;
++
++ struct fb_cmap cmap = {
++ .start = 0,
++ .len = (1 << minlen),
++ .red = NULL,
++ .green = NULL,
++ .blue = NULL,
++ .transp = NULL
++ };
++
++ red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++ if (!red)
++ goto out;
++
++ green = red + 256;
++ blue = green + 256;
++ cmap.red = red;
++ cmap.green = green;
++ cmap.blue = blue;
++
++ for (i = 0; i < cmap.len; i++) {
++ red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++ }
++
++ h = fb_set_cmap(&cmap, info);
++ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++ kfree(red);
++
++ return h;
++
++ } else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++ fb_set_cmap(&info->bgdecor.cmap, info);
++
++out: return fb_set_cmap(&palette_cmap, info);
+ }
+
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+@@ -2909,7 +3018,13 @@ static void fbcon_modechanged(struct fb_info *info)
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
+- vc_resize(vc, cols, rows);
++ if (!fbcon_decor_active_nores(info, vc)) {
++ vc_resize(vc, cols, rows);
++ } else {
++ fbcon_decor_disable(vc, 0);
++ fbcon_decor_call_helper("modechange", vc->vc_num);
++ }
++
+ updatescrollmode(p, info, vc);
+ scrollback_max = 0;
+ scrollback_current = 0;
+@@ -2954,7 +3069,10 @@ static void fbcon_set_all_vcs(struct fb_info *info)
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
+- vc_resize(vc, cols, rows);
++ if (!fbcon_decor_active_nores(info, vc)) {
++ vc_resize(vc, cols, rows);
++ }
++
+ }
+
+ if (fg != -1)
+@@ -3570,6 +3688,7 @@ static void fbcon_exit(void)
+ }
+ }
+
++ fbcon_decor_exit();
+ fbcon_has_exited = 1;
+ }
+
+--
+1.8.1.5
+
+diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
+new file mode 100644
+index 0000000..7189ce6
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.c
+@@ -0,0 +1,555 @@
++/*
++ * linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ * Copyright (C) 2004-2009 Michal Januszewski <spock@gentoo.org>
++ *
++ * Code based upon "Bootsplash" (C) 2001-2003
++ * Volker Poplawski <volker@poplawski.de>,
++ * Stefan Reinauer <stepan@suse.de>,
++ * Steffen Winterfeldt <snwint@suse.de>,
++ * Michael Schroeder <mls@suse.de>,
++ * Ken Wimer <wimer@suse.de>.
++ *
++ * Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++static int initialized = 0;
++
++int fbcon_decor_call_helper(char* cmd, unsigned short vc)
++{
++ char *envp[] = {
++ "HOME=/",
++ "PATH=/sbin:/bin",
++ NULL
++ };
++
++ char tfb[5];
++ char tcons[5];
++ unsigned char fb = (int) con2fb_map[vc];
++
++ char *argv[] = {
++ fbcon_decor_path,
++ "2",
++ cmd,
++ tcons,
++ tfb,
++ vc_cons[vc].d->vc_decor.theme,
++ NULL
++ };
++
++ snprintf(tfb,5,"%d",fb);
++ snprintf(tcons,5,"%d",vc);
++
++ return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++ struct fb_info* info;
++
++ if (!vc->vc_decor.state)
++ return -EINVAL;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL)
++ return -EINVAL;
++
++ vc->vc_decor.state = 0;
++ vc_resize(vc, info->var.xres / vc->vc_font.width,
++ info->var.yres / vc->vc_font.height);
++
++ if (fg_console == vc->vc_num && redraw) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ }
++
++ printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++ vc->vc_num);
++
++ return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++ struct fb_info* info;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++ info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++ vc->vc_num == fg_console))
++ return -EINVAL;
++
++ vc->vc_decor.state = 1;
++ vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++ vc->vc_decor.theight / vc->vc_font.height);
++
++ if (fg_console == vc->vc_num) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++
++ printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++ vc->vc_num);
++
++ return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++ int ret;
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_lock();
++ if (!state)
++ ret = fbcon_decor_disable(vc, 1);
++ else
++ ret = fbcon_decor_enable(vc);
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++ *state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++ struct fb_info *info;
++ int len;
++ char *tmp;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL || !cfg->twidth || !cfg->theight ||
++ cfg->tx + cfg->twidth > info->var.xres ||
++ cfg->ty + cfg->theight > info->var.yres)
++ return -EINVAL;
++
++ len = strlen_user(cfg->theme);
++ if (!len || len > FBCON_DECOR_THEME_LEN)
++ return -EINVAL;
++ tmp = kmalloc(len, GFP_KERNEL);
++ if (!tmp)
++ return -ENOMEM;
++ if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++ return -EFAULT;
++ cfg->theme = tmp;
++ cfg->state = 0;
++
++ /* If this ioctl is a response to a request from kernel, the console sem
++ * is already held; we also don't need to disable decor because either the
++ * new config and background picture will be successfully loaded, and the
++ * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
++// if (origin == FBCON_DECOR_IO_ORIG_USER) {
++ console_lock();
++ if (vc->vc_decor.state)
++ fbcon_decor_disable(vc, 1);
++// }
++
++ if (vc->vc_decor.theme)
++ kfree(vc->vc_decor.theme);
++
++ vc->vc_decor = *cfg;
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++ vc->vc_num, vc->vc_decor.theme);
++ return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
++{
++ char __user *tmp;
++
++ tmp = decor->theme;
++ *decor = vc->vc_decor;
++ decor->theme = tmp;
++
++ if (vc->vc_decor.theme) {
++ if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
++ return -EFAULT;
++ } else
++ if (put_user(0, tmp))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
++{
++ struct fb_info *info;
++ int len;
++ u8 *tmp;
++
++ if (vc->vc_num != fg_console)
++ return -EINVAL;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL)
++ return -EINVAL;
++
++ if (img->width != info->var.xres || img->height != info->var.yres) {
++ printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++ printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
++ return -EINVAL;
++ }
++
++ if (img->depth != info->var.bits_per_pixel) {
++ printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++ return -EINVAL;
++ }
++
++ if (img->depth == 8) {
++ if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++ !img->cmap.blue)
++ return -EINVAL;
++
++ tmp = vmalloc(img->cmap.len * 3 * 2);
++ if (!tmp)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp,
++ (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
++ copy_from_user(tmp + (img->cmap.len << 1),
++ (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
++ copy_from_user(tmp + (img->cmap.len << 2),
++ (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
++ vfree(tmp);
++ return -EFAULT;
++ }
++
++ img->cmap.transp = NULL;
++ img->cmap.red = (u16*)tmp;
++ img->cmap.green = img->cmap.red + img->cmap.len;
++ img->cmap.blue = img->cmap.green + img->cmap.len;
++ } else {
++ img->cmap.red = NULL;
++ }
++
++ len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++ /*
++ * Allocate an additional byte so that we never go outside of the
++ * buffer boundaries in the rendering functions in a 24 bpp mode.
++ */
++ tmp = vmalloc(len + 1);
++
++ if (!tmp)
++ goto out;
++
++ if (copy_from_user(tmp, (void __user*)img->data, len))
++ goto out;
++
++ img->data = tmp;
++
++ /* If this ioctl is a response to a request from kernel, the console sem
++ * is already held. */
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_lock();
++
++ if (info->bgdecor.data)
++ vfree((u8*)info->bgdecor.data);
++ if (info->bgdecor.cmap.red)
++ vfree(info->bgdecor.cmap.red);
++
++ info->bgdecor = *img;
++
++ if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ return 0;
++
++out: if (img->cmap.red)
++ vfree(img->cmap.red);
++
++ if (tmp)
++ vfree(tmp);
++ return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++ struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
++ struct vc_data *vc = NULL;
++ unsigned short vc_num = 0;
++ unsigned char origin = 0;
++ void __user *data = NULL;
++
++ if (!access_ok(VERIFY_READ, wrapper,
++ sizeof(struct fbcon_decor_iowrapper)))
++ return -EFAULT;
++
++ __get_user(vc_num, &wrapper->vc);
++ __get_user(origin, &wrapper->origin);
++ __get_user(data, &wrapper->data);
++
++ if (!vc_cons_allocated(vc_num))
++ return -EINVAL;
++
++ vc = vc_cons[vc_num].d;
++
++ switch (cmd) {
++ case FBIOCONDECOR_SETPIC:
++ {
++ struct fb_image img;
++ if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++ return -EFAULT;
++
++ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++ }
++ case FBIOCONDECOR_SETCFG:
++ {
++ struct vc_decor cfg;
++ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++ return -EFAULT;
++
++ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++ }
++ case FBIOCONDECOR_GETCFG:
++ {
++ int rval;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++ return -EFAULT;
++
++ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++ if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++ return -EFAULT;
++ return rval;
++ }
++ case FBIOCONDECOR_SETSTATE:
++ {
++ unsigned int state = 0;
++ if (get_user(state, (unsigned int __user *)data))
++ return -EFAULT;
++ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++ }
++ case FBIOCONDECOR_GETSTATE:
++ {
++ unsigned int state = 0;
++ fbcon_decor_ioctl_dogetstate(vc, &state);
++ return put_user(state, (unsigned int __user *)data);
++ }
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
++
++ struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++ struct vc_data *vc = NULL;
++ unsigned short vc_num = 0;
++ unsigned char origin = 0;
++ compat_uptr_t data_compat = 0;
++ void __user *data = NULL;
++
++ if (!access_ok(VERIFY_READ, wrapper,
++ sizeof(struct fbcon_decor_iowrapper32)))
++ return -EFAULT;
++
++ __get_user(vc_num, &wrapper->vc);
++ __get_user(origin, &wrapper->origin);
++ __get_user(data_compat, &wrapper->data);
++ data = compat_ptr(data_compat);
++
++ if (!vc_cons_allocated(vc_num))
++ return -EINVAL;
++
++ vc = vc_cons[vc_num].d;
++
++ switch (cmd) {
++ case FBIOCONDECOR_SETPIC32:
++ {
++ struct fb_image32 img_compat;
++ struct fb_image img;
++
++ if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++ return -EFAULT;
++
++ fb_image_from_compat(img, img_compat);
++
++ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++ }
++
++ case FBIOCONDECOR_SETCFG32:
++ {
++ struct vc_decor32 cfg_compat;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++ return -EFAULT;
++
++ vc_decor_from_compat(cfg, cfg_compat);
++
++ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++ }
++
++ case FBIOCONDECOR_GETCFG32:
++ {
++ int rval;
++ struct vc_decor32 cfg_compat;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++ return -EFAULT;
++ cfg.theme = compat_ptr(cfg_compat.theme);
++
++ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++ vc_decor_to_compat(cfg_compat, cfg);
++
++ if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++ return -EFAULT;
++ return rval;
++ }
++
++ case FBIOCONDECOR_SETSTATE32:
++ {
++ compat_uint_t state_compat = 0;
++ unsigned int state = 0;
++
++ if (get_user(state_compat, (compat_uint_t __user *)data))
++ return -EFAULT;
++
++ state = (unsigned int)state_compat;
++
++ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++ }
++
++ case FBIOCONDECOR_GETSTATE32:
++ {
++ compat_uint_t state_compat = 0;
++ unsigned int state = 0;
++
++ fbcon_decor_ioctl_dogetstate(vc, &state);
++ state_compat = (compat_uint_t)state;
++
++ return put_user(state_compat, (compat_uint_t __user *)data);
++ }
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++#else
++ #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = fbcon_decor_ioctl,
++ .compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "fbcondecor",
++ .fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset()
++{
++ int i;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ registered_fb[i]->bgdecor.data = NULL;
++ registered_fb[i]->bgdecor.cmap.red = NULL;
++ }
++
++ for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++ vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++ vc_cons[i].d->vc_decor.theight = 0;
++ vc_cons[i].d->vc_decor.theme = NULL;
++ }
++
++ return;
++}
++
++int fbcon_decor_init()
++{
++ int i;
++
++ fbcon_decor_reset();
++
++ if (initialized)
++ return 0;
++
++ i = misc_register(&fbcon_decor_dev);
++ if (i) {
++ printk(KERN_ERR "fbcondecor: failed to register device\n");
++ return i;
++ }
++
++ fbcon_decor_call_helper("init", 0);
++ initialized = 1;
++ return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++ fbcon_decor_reset();
++ return 0;
++}
++
++EXPORT_SYMBOL(fbcon_decor_path);
+diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
+new file mode 100644
+index 0000000..1d852dd
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.h
+@@ -0,0 +1,78 @@
++/*
++ * linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ * Copyright (C) 2004 Michal Januszewski <spock@gentoo.org>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char* cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme)
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) && \
++ x->bgdecor.width == x->var.xres && \
++ x->bgdecor.height == x->var.yres && \
++ x->bgdecor.depth == x->var.bits_per_pixel)
++
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x,y) (0)
++#define fbcon_decor_active(x,y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
+index 5c3960d..162b5f4 100644
+--- a/drivers/video/fbcmap.c
++++ b/drivers/video/fbcmap.c
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+
++#include "console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+ 0x0000, 0xaaaa
+ };
+@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+ if (transp)
+ htransp = *transp++;
+ if (info->fbops->fb_setcolreg(start++,
+- hred, hgreen, hblue,
++ hred, hgreen, hblue,
+ htransp, info))
+ break;
+ }
+ }
+- if (rc == 0)
++ if (rc == 0) {
+ fb_copy_cmap(cmap, &info->cmap);
+-
++ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++ }
+ return rc;
+ }
+
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index c6ce416..7ce6640 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1231,15 +1231,6 @@ struct fb_fix_screeninfo32 {
+ u16 reserved[3];
+ };
+
+-struct fb_cmap32 {
+- u32 start;
+- u32 len;
+- compat_caddr_t red;
+- compat_caddr_t green;
+- compat_caddr_t blue;
+- compat_caddr_t transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+ {
+diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
+new file mode 100644
+index 0000000..04b8d80
+--- /dev/null
++++ b/include/linux/console_decor.h
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++ __u8 bg_color; /* The color that is to be treated as transparent */
++ __u8 state; /* Current decor state: 0 = off, 1 = on */
++ __u16 tx, ty; /* Top left corner coordinates of the text field */
++ __u16 twidth, theight; /* Width and height of the text field */
++ char* theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++ __u8 bg_color; /* The color that is to be treated as transparent */
++ __u8 state; /* Current decor state: 0 = off, 1 = on */
++ __u16 tx, ty; /* Top left corner coordinates of the text field */
++ __u16 twidth, theight; /* Width and height of the text field */
++ compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++ (to).bg_color = (from).bg_color; \
++ (to).state = (from).state; \
++ (to).tx = (from).tx; \
++ (to).ty = (from).ty; \
++ (to).twidth = (from).twidth; \
++ (to).theight = (from).theight; \
++ (to).theme = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++ (to).bg_color = (from).bg_color; \
++ (to).state = (from).state; \
++ (to).tx = (from).tx; \
++ (to).ty = (from).ty; \
++ (to).twidth = (from).twidth; \
++ (to).theight = (from).theight; \
++ (to).theme = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index 7f0c329..98f5d60 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -19,6 +19,7 @@
+ struct vt_struct;
+
+ #define NPAR 16
++#include <linux/console_decor.h>
+
+ struct vc_data {
+ struct tty_port port; /* Upper level data */
+@@ -107,6 +108,8 @@ struct vc_data {
+ unsigned long vc_uni_pagedir;
+ unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++ struct vc_decor vc_decor;
+ /* additional information is in vt_kern.h */
+ };
+
+diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
+index d31cb68..ad161bb 100644
+--- a/include/uapi/linux/fb.h
++++ b/include/uapi/linux/fb.h
+@@ -8,6 +8,25 @@
+
+ #define FB_MAX 32 /* sufficient for now */
+
++struct fbcon_decor_iowrapper
++{
++ unsigned short vc; /* Virtual console */
++ unsigned char origin; /* Point of origin of the request */
++ void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32
++{
++ unsigned short vc; /* Virtual console */
++ unsigned char origin; /* Point of origin of the request */
++ compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+ 0x46 is 'F' */
+ #define FBIOGET_VSCREENINFO 0x4600
+@@ -34,6 +53,24 @@
+ #define FBIOPUT_MODEINFO 0x4617
+ #define FBIOGET_DISPINFO 0x4618
+ #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
++#define FBIOCONDECOR_SETCFG _IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG _IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE _IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE _IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC _IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32 _IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32 _IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32 _IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32 _IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32 _IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN 128 /* Maximum lenght of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL 0 /* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER 1 /* User ioctl origin */
+
+ #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */
+ #define FB_TYPE_PLANES 1 /* Non interleaved planes */
+@@ -286,6 +323,28 @@ struct fb_cmap {
+ __u16 *transp; /* transparency, can be NULL */
+ };
+
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++ __u32 start;
++ __u32 len; /* Number of entries */
++ compat_uptr_t red; /* Red values */
++ compat_uptr_t green;
++ compat_uptr_t blue;
++ compat_uptr_t transp; /* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++ (to).start = (from).start; \
++ (to).len = (from).len; \
++ (to).red = compat_ptr((from).red); \
++ (to).green = compat_ptr((from).green); \
++ (to).blue = compat_ptr((from).blue); \
++ (to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ struct fb_con2fbmap {
+ __u32 console;
+ __u32 framebuffer;
+@@ -367,6 +426,34 @@ struct fb_image {
+ struct fb_cmap cmap; /* color map info */
+ };
+
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++ __u32 dx; /* Where to place image */
++ __u32 dy;
++ __u32 width; /* Size of image */
++ __u32 height;
++ __u32 fg_color; /* Only used when a mono bitmap */
++ __u32 bg_color;
++ __u8 depth; /* Depth of the image */
++ const compat_uptr_t data; /* Pointer to image data */
++ struct fb_cmap32 cmap; /* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++ (to).dx = (from).dx; \
++ (to).dy = (from).dy; \
++ (to).width = (from).width; \
++ (to).height = (from).height; \
++ (to).fg_color = (from).fg_color; \
++ (to).bg_color = (from).bg_color; \
++ (to).depth = (from).depth; \
++ (to).data = compat_ptr((from).data); \
++ fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+ * hardware cursor control
+ */
+
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index d31cb68..ad161bb 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -488,5 +488,8 @@ #define FBINFO_STATE_SUSPENDED 1
+ u32 state; /* Hardware state i.e suspend */
+ void *fbcon_par; /* fbcon use-only private area */
++
++ struct fb_image bgdecor;
++
+ /* From here on everything is device dependent */
+ void *par;
+ /* we need the PCI or similar aperture base/size not
+
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 4ab1187..6561627 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -145,6 +145,10 @@ static int min_percpu_pagelist_fract = 8;
+ static int ngroups_max = NGROUPS_MAX;
+ static const int cap_last_cap = CAP_LAST_CAP;
+
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -248,6 +252,15 @@ static struct ctl_table sysctl_base_table[] = {
+ .mode = 0555,
+ .child = dev_table,
+ },
++#ifdef CONFIG_FB_CON_DECOR
++ {
++ .procname = "fbcondecor",
++ .data = &fbcon_decor_path,
++ .maxlen = KMOD_PATH_LEN,
++ .mode = 0644,
++ .proc_handler = &proc_dostring,
++ },
++#endif
+ { }
+ };
+
+@@ -1091,7 +1104,7 @@ static struct ctl_table vm_table[] = {
+ .proc_handler = proc_dointvec,
+ },
+ {
+- .procname = "page-cluster",
++ .procname = "page-cluster",
+ .data = &page_cluster,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+@@ -1535,7 +1548,7 @@ static struct ctl_table fs_table[] = {
+ .mode = 0555,
+ .child = inotify_table,
+ },
+-#endif
++#endif
+ #ifdef CONFIG_EPOLL
+ {
+ .procname = "epoll",
+@@ -1873,12 +1886,12 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
+ unsigned long page = 0;
+ size_t left;
+ char *kbuf;
+-
++
+ if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+-
++
+ i = (int *) tbl_data;
+ vleft = table->maxlen / sizeof(*i);
+ left = *lenp;
+@@ -1967,7 +1980,7 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
++ * values from/to the user buffer, treated as an ASCII string.
+ *
+ * Returns 0 on success.
+ */
+@@ -2326,7 +2339,7 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
++ * values from/to the user buffer, treated as an ASCII string.
+ * The values read are assumed to be in seconds, and are converted into
+ * jiffies.
+ *
+@@ -2348,8 +2361,8 @@ int proc_dointvec_jiffies(struct ctl_table *table, int write,
+ * @ppos: pointer to the file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
+- * The values read are assumed to be in 1/USER_HZ seconds, and
++ * values from/to the user buffer, treated as an ASCII string.
++ * The values read are assumed to be in 1/USER_HZ seconds, and
+ * are converted into jiffies.
+ *
+ * Returns 0 on success.
+@@ -2371,8 +2384,8 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
+ * @ppos: the current position in the file
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
+- * The values read are assumed to be in 1/1000 seconds, and
++ * values from/to the user buffer, treated as an ASCII string.
++ * The values read are assumed to be in 1/1000 seconds, and
+ * are converted into jiffies.
+ *
+ * Returns 0 on success.
+--
+1.7.10
+
diff --git a/4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch b/4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch
new file mode 100644
index 00000000..108309d3
--- /dev/null
+++ b/4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch
@@ -0,0 +1,160 @@
+--- a/fs/attr.c 2014-06-11 19:56:10.729668444 -0400
++++ b/fs/attr.c 2014-06-11 20:19:09.719657538 -0400
+@@ -50,14 +50,14 @@ int inode_change_ok(const struct inode *
+ if ((ia_valid & ATTR_UID) &&
+ (!uid_eq(current_fsuid(), inode->i_uid) ||
+ !uid_eq(attr->ia_uid, inode->i_uid)) &&
+- !inode_capable(inode, CAP_CHOWN))
++ !capable_wrt_inode_uidgid(inode, CAP_CHOWN))
+ return -EPERM;
+
+ /* Make sure caller can chgrp. */
+ if ((ia_valid & ATTR_GID) &&
+ (!uid_eq(current_fsuid(), inode->i_uid) ||
+ (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) &&
+- !inode_capable(inode, CAP_CHOWN))
++ !capable_wrt_inode_uidgid(inode, CAP_CHOWN))
+ return -EPERM;
+
+ /* Make sure a caller can chmod. */
+@@ -67,7 +67,7 @@ int inode_change_ok(const struct inode *
+ /* Also check the setgid bit! */
+ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
+ inode->i_gid) &&
+- !inode_capable(inode, CAP_FSETID))
++ !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ attr->ia_mode &= ~S_ISGID;
+ }
+
+@@ -160,7 +160,7 @@ void setattr_copy(struct inode *inode, c
+ umode_t mode = attr->ia_mode;
+
+ if (!in_group_p(inode->i_gid) &&
+- !inode_capable(inode, CAP_FSETID))
++ !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ mode &= ~S_ISGID;
+ inode->i_mode = mode;
+ }
+--- a/fs/inode.c 2014-06-11 19:56:23.489668343 -0400
++++ b/fs/inode.c 2014-06-11 20:06:19.049663633 -0400
+@@ -1840,14 +1840,18 @@ EXPORT_SYMBOL(inode_init_owner);
+ * inode_owner_or_capable - check current task permissions to inode
+ * @inode: inode being checked
+ *
+- * Return true if current either has CAP_FOWNER to the inode, or
+- * owns the file.
++ * Return true if current either has CAP_FOWNER in a namespace with the
++ * inode owner uid mapped, or owns the file
+ */
+ bool inode_owner_or_capable(const struct inode *inode)
+ {
++ struct user_namespace *ns;
++
+ if (uid_eq(current_fsuid(), inode->i_uid))
+ return true;
+- if (inode_capable(inode, CAP_FOWNER))
++
++ ns = current_user_ns();
++ if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
+ return true;
+ return false;
+ }
+--- a/fs/namei.c 2014-06-11 19:56:33.039668268 -0400
++++ b/fs/namei.c 2014-06-11 20:10:37.189661592 -0400
+@@ -332,10 +332,11 @@ int generic_permission(struct inode *ino
+
+ if (S_ISDIR(inode->i_mode)) {
+ /* DACs are overridable for directories */
+- if (inode_capable(inode, CAP_DAC_OVERRIDE))
++ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+ return 0;
+ if (!(mask & MAY_WRITE))
+- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
++ if (capable_wrt_inode_uidgid(inode,
++ CAP_DAC_READ_SEARCH))
+ return 0;
+ return -EACCES;
+ }
+@@ -345,7 +346,7 @@ int generic_permission(struct inode *ino
+ * at least one exec bit set.
+ */
+ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
+- if (inode_capable(inode, CAP_DAC_OVERRIDE))
++ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+ return 0;
+
+ /*
+@@ -353,7 +354,7 @@ int generic_permission(struct inode *ino
+ */
+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+ if (mask == MAY_READ)
+- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
++ if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
+ return 0;
+
+ return -EACCES;
+@@ -2370,7 +2371,7 @@ static inline int check_sticky(struct in
+ return 0;
+ if (uid_eq(dir->i_uid, fsuid))
+ return 0;
+- return !inode_capable(inode, CAP_FOWNER);
++ return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
+ }
+
+ /*
+--- a/fs/xfs/xfs_ioctl.c 2014-06-11 19:57:03.309668028 -0400
++++ b/fs/xfs/xfs_ioctl.c 2014-06-11 20:11:15.719661287 -0400
+@@ -1241,7 +1241,7 @@ xfs_ioctl_setattr(
+ * cleared upon successful return from chown()
+ */
+ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+- !inode_capable(VFS_I(ip), CAP_FSETID))
++ !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
+ ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
+
+ /*
+--- a/include/linux/capability.h 2014-06-11 19:57:21.319667886 -0400
++++ b/include/linux/capability.h 2014-06-11 20:11:52.129660999 -0400
+@@ -210,7 +210,7 @@ extern bool has_ns_capability_noaudit(st
+ struct user_namespace *ns, int cap);
+ extern bool capable(int cap);
+ extern bool ns_capable(struct user_namespace *ns, int cap);
+-extern bool inode_capable(const struct inode *inode, int cap);
++extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+ extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
+
+ /* audit system wants to get cap info from files as well */
+--- a/kernel/capability.c 2014-06-11 20:00:58.389666169 -0400
++++ b/kernel/capability.c 2014-06-11 20:13:46.629660094 -0400
+@@ -433,23 +433,19 @@ bool capable(int cap)
+ EXPORT_SYMBOL(capable);
+
+ /**
+- * inode_capable - Check superior capability over inode
++ * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
+ * @inode: The inode in question
+ * @cap: The capability in question
+ *
+- * Return true if the current task has the given superior capability
+- * targeted at it's own user namespace and that the given inode is owned
+- * by the current user namespace or a child namespace.
+- *
+- * Currently we check to see if an inode is owned by the current
+- * user namespace by seeing if the inode's owner maps into the
+- * current user namespace.
+- *
++ * Return true if the current task has the given capability targeted at
++ * its own user namespace and that the given inode's uid and gid are
++ * mapped into the current user namespace
+ */
+-bool inode_capable(const struct inode *inode, int cap)
++bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
+ {
+ struct user_namespace *ns = current_user_ns();
+
+- return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
++ return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
++ kgid_has_mapping(ns, inode->i_gid);
+ }
+-EXPORT_SYMBOL(inode_capable);
++EXPORT_SYMBOL(capable_wrt_inode_uidgid);
diff --git a/4500_support-for-pogoplug-e02.patch b/4500_support-for-pogoplug-e02.patch
new file mode 100644
index 00000000..9f0becde
--- /dev/null
+++ b/4500_support-for-pogoplug-e02.patch
@@ -0,0 +1,172 @@
+diff --git a/arch/arm/configs/kirkwood_defconfig b/arch/arm/configs/kirkwood_defconfig
+index 0f2aa61..8c3146b 100644
+--- a/arch/arm/configs/kirkwood_defconfig
++++ b/arch/arm/configs/kirkwood_defconfig
+@@ -20,6 +20,7 @@ CONFIG_MACH_NET2BIG_V2=y
+ CONFIG_MACH_D2NET_V2=y
+ CONFIG_MACH_NET2BIG_V2=y
+ CONFIG_MACH_NET5BIG_V2=y
++CONFIG_MACH_POGO_E02=n
+ CONFIG_MACH_OPENRD_BASE=y
+ CONFIG_MACH_OPENRD_CLIENT=y
+ CONFIG_MACH_OPENRD_ULTIMATE=y
+diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig
+index b634f96..cd7f289 100644
+--- a/arch/arm/mach-kirkwood/Kconfig
++++ b/arch/arm/mach-kirkwood/Kconfig
+@@ -62,6 +62,15 @@ config MACH_NETSPACE_V2
+ Say 'Y' here if you want your kernel to support the
+ LaCie Network Space v2 NAS.
+
++config MACH_POGO_E02
++ bool "CE Pogoplug E02"
++ default n
++ help
++ Say 'Y' here if you want your kernel to support the
++ CloudEngines Pogoplug e02. It differs from Marvell's
++ SheevaPlug Reference Board by a few details, but
++ especially in the led assignments.
++
+ config MACH_OPENRD
+ bool
+
+diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile
+index ac4cd75..dddbb40 100644
+--- a/arch/arm/mach-kirkwood/Makefile
++++ b/arch/arm/mach-kirkwood/Makefile
+@@ -2,6 +2,7 @@ obj-y += common.o irq.o pcie.o mpp.o
+ obj-$(CONFIG_MACH_D2NET_V2) += d2net_v2-setup.o lacie_v2-common.o
+ obj-$(CONFIG_MACH_NET2BIG_V2) += netxbig_v2-setup.o lacie_v2-common.o
+ obj-$(CONFIG_MACH_NET5BIG_V2) += netxbig_v2-setup.o lacie_v2-common.o
++obj-$(CONFIG_MACH_POGO_E02) += pogo_e02-setup.o
+ obj-$(CONFIG_MACH_OPENRD) += openrd-setup.o
+ obj-$(CONFIG_MACH_RD88F6192_NAS) += rd88f6192-nas-setup.o
+ obj-$(CONFIG_MACH_RD88F6281) += rd88f6281-setup.o
+diff --git a/arch/arm/mach-kirkwood/pogo_e02-setup.c b/arch/arm/mach-kirkwood/pogo_e02-setup.c
+new file mode 100644
+index 0000000..f57e8f7
+--- /dev/null
++++ b/arch/arm/mach-kirkwood/pogo_e02-setup.c
+@@ -0,0 +1,122 @@
++/*
++ * arch/arm/mach-kirkwood/pogo_e02-setup.c
++ *
++ * CloudEngines Pogoplug E02 support
++ *
++ * Copyright (C) 2013 Christoph Junghans <ottxor@gentoo.org>
++ * Based on a patch in Arch Linux for Arm by:
++ * Copyright (C) 2012 Kevin Mihelich <kevin@miheli.ch>
++ * and <pazos@lavabit.com>
++ *
++ * Based on the board file sheevaplug-setup.c
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/ata_platform.h>
++#include <linux/mtd/partitions.h>
++#include <linux/mv643xx_eth.h>
++#include <linux/gpio.h>
++#include <linux/leds.h>
++#include <asm/mach-types.h>
++#include <asm/mach/arch.h>
++#include <mach/kirkwood.h>
++#include "common.h"
++#include "mpp.h"
++
++static struct mtd_partition pogo_e02_nand_parts[] = {
++ {
++ .name = "u-boot",
++ .offset = 0,
++ .size = SZ_1M
++ }, {
++ .name = "uImage",
++ .offset = MTDPART_OFS_NXTBLK,
++ .size = SZ_4M
++ }, {
++ .name = "pogoplug",
++ .offset = MTDPART_OFS_NXTBLK,
++ .size = SZ_32M
++ }, {
++ .name = "root",
++ .offset = MTDPART_OFS_NXTBLK,
++ .size = MTDPART_SIZ_FULL
++ },
++};
++
++static struct mv643xx_eth_platform_data pogo_e02_ge00_data = {
++ .phy_addr = MV643XX_ETH_PHY_ADDR(0),
++};
++
++static struct gpio_led pogo_e02_led_pins[] = {
++ {
++ .name = "status:green:health",
++ .default_trigger = "default-on",
++ .gpio = 48,
++ .active_low = 1,
++ },
++ {
++ .name = "status:orange:fault",
++ .default_trigger = "none",
++ .gpio = 49,
++ .active_low = 1,
++ }
++};
++
++static struct gpio_led_platform_data pogo_e02_led_data = {
++ .leds = pogo_e02_led_pins,
++ .num_leds = ARRAY_SIZE(pogo_e02_led_pins),
++};
++
++static struct platform_device pogo_e02_leds = {
++ .name = "leds-gpio",
++ .id = -1,
++ .dev = {
++ .platform_data = &pogo_e02_led_data,
++ }
++};
++
++static unsigned int pogo_e02_mpp_config[] __initdata = {
++ MPP29_GPIO, /* USB Power Enable */
++ MPP48_GPIO, /* LED Green */
++ MPP49_GPIO, /* LED Orange */
++ 0
++};
++
++static void __init pogo_e02_init(void)
++{
++ /*
++ * Basic setup. Needs to be called early.
++ */
++ kirkwood_init();
++
++ /* setup gpio pin select */
++ kirkwood_mpp_conf(pogo_e02_mpp_config);
++
++ kirkwood_uart0_init();
++ kirkwood_nand_init(ARRAY_AND_SIZE(pogo_e02_nand_parts), 25);
++
++ if (gpio_request(29, "USB Power Enable") != 0 ||
++ gpio_direction_output(29, 1) != 0)
++ pr_err("can't set up GPIO 29 (USB Power Enable)\n");
++ kirkwood_ehci_init();
++
++ kirkwood_ge00_init(&pogo_e02_ge00_data);
++
++ platform_device_register(&pogo_e02_leds);
++}
++
++MACHINE_START(POGO_E02, "Pogoplug E02")
++ .atag_offset = 0x100,
++ .init_machine = pogo_e02_init,
++ .map_io = kirkwood_map_io,
++ .init_early = kirkwood_init_early,
++ .init_irq = kirkwood_init_irq,
++ .timer = &kirkwood_timer,
++ .restart = kirkwood_restart,
++MACHINE_END
diff --git a/5000_BFQ-1-block-cgroups-kconfig-build-bits-for-v6r2-3.11.patch b/5000_BFQ-1-block-cgroups-kconfig-build-bits-for-v6r2-3.11.patch
new file mode 100644
index 00000000..cf5fd059
--- /dev/null
+++ b/5000_BFQ-1-block-cgroups-kconfig-build-bits-for-v6r2-3.11.patch
@@ -0,0 +1,97 @@
+From 3728677b4d3cd39d83be87f9939328201b871c48 Mon Sep 17 00:00:00 2001
+From: Arianna Avanzini <avanzini.arianna@gmail.com>
+Date: Tue, 3 Sep 2013 16:50:42 +0200
+Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v6r2-3.11
+
+Update Kconfig.iosched and do the related Makefile changes to include
+kernel configuration options for BFQ. Also add the bfqio controller
+to the cgroups subsystem.
+
+Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+---
+ block/Kconfig.iosched | 25 +++++++++++++++++++++++++
+ block/Makefile | 1 +
+ include/linux/cgroup_subsys.h | 4 ++++
+ 3 files changed, 30 insertions(+)
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index 421bef9..695e064 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -39,6 +39,27 @@ config CFQ_GROUP_IOSCHED
+ ---help---
+ Enable group IO scheduling in CFQ.
+
++config IOSCHED_BFQ
++ tristate "BFQ I/O scheduler"
++ default n
++ ---help---
++ The BFQ I/O scheduler tries to distribute bandwidth among
++ all processes according to their weights.
++ It aims at distributing the bandwidth as desired, independently of
++ the disk parameters and with any workload. It also tries to
++ guarantee low latency to interactive and soft real-time
++ applications. If compiled built-in (saying Y here), BFQ can
++ be configured to support hierarchical scheduling.
++
++config CGROUP_BFQIO
++ bool "BFQ hierarchical scheduling support"
++ depends on CGROUPS && IOSCHED_BFQ=y
++ default n
++ ---help---
++ Enable hierarchical scheduling in BFQ, using the cgroups
++ filesystem interface. The name of the subsystem will be
++ bfqio.
++
+ choice
+ prompt "Default I/O scheduler"
+ default DEFAULT_CFQ
+@@ -52,6 +73,9 @@ choice
+ config DEFAULT_CFQ
+ bool "CFQ" if IOSCHED_CFQ=y
+
++ config DEFAULT_BFQ
++ bool "BFQ" if IOSCHED_BFQ=y
++
+ config DEFAULT_NOOP
+ bool "No-op"
+
+@@ -61,6 +85,7 @@ config DEFAULT_IOSCHED
+ string
+ default "deadline" if DEFAULT_DEADLINE
+ default "cfq" if DEFAULT_CFQ
++ default "bfq" if DEFAULT_BFQ
+ default "noop" if DEFAULT_NOOP
+
+ endmenu
+diff --git a/block/Makefile b/block/Makefile
+index 39b76ba..c0d20fa 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
+ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
+ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
+ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
++obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
+
+ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+ obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
+diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
+index b613ffd..43c5dc9 100644
+--- a/include/linux/cgroup_subsys.h
++++ b/include/linux/cgroup_subsys.h
+@@ -39,6 +39,10 @@ SUBSYS(net_cls)
+ SUBSYS(blkio)
+ #endif
+
++#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_BFQIO)
++SUBSYS(bfqio)
++#endif
++
+ #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF)
+ SUBSYS(perf)
+ #endif
+--
+1.8.1.4
+
diff --git a/5000_BFQ-2-block-introduce-the-v6r2-I-O-sched-for-3.11.patch1 b/5000_BFQ-2-block-introduce-the-v6r2-I-O-sched-for-3.11.patch1
new file mode 100644
index 00000000..4ff89b82
--- /dev/null
+++ b/5000_BFQ-2-block-introduce-the-v6r2-I-O-sched-for-3.11.patch1
@@ -0,0 +1,5773 @@
+From 009b78bafe1763f71e6bdbb4f536b564a73b7db5 Mon Sep 17 00:00:00 2001
+From: Arianna Avanzini <avanzini.arianna@gmail.com>
+Date: Thu, 9 May 2013 19:10:02 +0200
+Subject: [PATCH 2/3] block: introduce the BFQ-v6r2 I/O sched for 3.11
+
+Add the BFQ-v6r2 I/O scheduler to 3.11.
+The general structure is borrowed from CFQ, as much code. A (bfq_)queue
+is associated to each task doing I/O on a device, and each time a
+scheduling decision has to be made a queue is selected and served until
+it expires.
+
+ - Slices are given in the service domain: tasks are assigned
+ budgets, measured in number of sectors. Once got the disk, a task
+ must however consume its assigned budget within a configurable
+ maximum time (by default, the maximum possible value of the
+ budgets is automatically computed to comply with this timeout).
+ This allows the desired latency vs "throughput boosting" tradeoff
+ to be set.
+
+ - Budgets are scheduled according to a variant of WF2Q+, implemented
+ using an augmented rb-tree to take eligibility into account while
+ preserving an O(log N) overall complexity.
+
+ - A low-latency tunable is provided; if enabled, both interactive
+ and soft real-time applications are guaranteed very low latency.
+
+ - Latency guarantees are preserved also in presence of NCQ.
+
+ - Also with flash-based devices, a high throughput is achieved while
+ still preserving latency guarantees.
+
+ - Useful features borrowed from CFQ: cooperating-queues merging (with
+ some additional optimizations with respect to the original CFQ version),
+ static fallback queue for OOM.
+
+ - BFQ supports full hierarchical scheduling, exporting a cgroups
+ interface. Each node has a full scheduler, so each group can
+ be assigned its own ioprio (mapped to a weight, see next point)
+ and an ioprio_class.
+
+ - If the cgroups interface is used, weights can be explictly
+ assigned, otherwise ioprio values are mapped to weights using the
+ relation weight = IOPRIO_BE_NR - ioprio.
+
+ - ioprio classes are served in strict priority order, i.e., lower
+ priority queues are not served as long as there are higher
+ priority queues. Among queues in the same class the bandwidth is
+ distributed in proportion to the weight of each queue. A very
+ thin extra bandwidth is however guaranteed to the Idle class, to
+ prevent it from starving.
+
+Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+---
+ block/bfq-cgroup.c | 881 +++++++++++++++
+ block/bfq-ioc.c | 36 +
+ block/bfq-iosched.c | 3082 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ block/bfq-sched.c | 1072 ++++++++++++++++++
+ block/bfq.h | 603 ++++++++++
+ 5 files changed, 5674 insertions(+)
+ create mode 100644 block/bfq-cgroup.c
+ create mode 100644 block/bfq-ioc.c
+ create mode 100644 block/bfq-iosched.c
+ create mode 100644 block/bfq-sched.c
+ create mode 100644 block/bfq.h
+
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+new file mode 100644
+index 0000000..bb9b851
+--- /dev/null
++++ b/block/bfq-cgroup.c
+@@ -0,0 +1,881 @@
++/*
++ * BFQ: CGROUPS support.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file.
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++
++static DEFINE_MUTEX(bfqio_mutex);
++
++static bool bfqio_is_removed(struct cgroup *cgroup)
++{
++ return test_bit(CGRP_DEAD, &cgroup->flags);
++}
++
++static struct bfqio_cgroup bfqio_root_cgroup = {
++ .weight = BFQ_DEFAULT_GRP_WEIGHT,
++ .ioprio = BFQ_DEFAULT_GRP_IOPRIO,
++ .ioprio_class = BFQ_DEFAULT_GRP_CLASS,
++};
++
++static inline void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static struct bfqio_cgroup *cgroup_to_bfqio(struct cgroup *cgroup)
++{
++ return container_of(cgroup_subsys_state(cgroup, bfqio_subsys_id),
++ struct bfqio_cgroup, css);
++}
++
++/*
++ * Search the bfq_group for bfqd into the hash table (by now only a list)
++ * of bgrp. Must be called under rcu_read_lock().
++ */
++static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
++ struct bfq_data *bfqd)
++{
++ struct bfq_group *bfqg;
++ void *key;
++
++ hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) {
++ key = rcu_dereference(bfqg->bfqd);
++ if (key == bfqd)
++ return bfqg;
++ }
++
++ return NULL;
++}
++
++static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
++ struct bfq_group *bfqg)
++{
++ struct bfq_entity *entity = &bfqg->entity;
++
++ /*
++ * If the weight of the entity has never been set via the sysfs
++ * interface, then bgrp->weight == 0. In this case we initialize
++ * the weight from the current ioprio value. Otherwise, the group
++ * weight, if set, has priority over the ioprio value.
++ */
++ if (bgrp->weight == 0) {
++ entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio);
++ entity->new_ioprio = bgrp->ioprio;
++ } else {
++ entity->new_weight = bgrp->weight;
++ entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight);
++ }
++ entity->orig_weight = entity->weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
++ entity->my_sched_data = &bfqg->sched_data;
++}
++
++static inline void bfq_group_set_parent(struct bfq_group *bfqg,
++ struct bfq_group *parent)
++{
++ struct bfq_entity *entity;
++
++ BUG_ON(parent == NULL);
++ BUG_ON(bfqg == NULL);
++
++ entity = &bfqg->entity;
++ entity->parent = parent->my_entity;
++ entity->sched_data = &parent->sched_data;
++}
++
++/**
++ * bfq_group_chain_alloc - allocate a chain of groups.
++ * @bfqd: queue descriptor.
++ * @cgroup: the leaf cgroup this chain starts from.
++ *
++ * Allocate a chain of groups starting from the one belonging to
++ * @cgroup up to the root cgroup. Stop if a cgroup on the chain
++ * to the root has already an allocated group on @bfqd.
++ */
++static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
++ struct cgroup *cgroup)
++{
++ struct bfqio_cgroup *bgrp;
++ struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
++
++ for (; cgroup != NULL; cgroup = cgroup->parent) {
++ bgrp = cgroup_to_bfqio(cgroup);
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ if (bfqg != NULL) {
++ /*
++ * All the cgroups in the path from there to the
++ * root must have a bfq_group for bfqd, so we don't
++ * need any more allocations.
++ */
++ break;
++ }
++
++ bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
++ if (bfqg == NULL)
++ goto cleanup;
++
++ bfq_group_init_entity(bgrp, bfqg);
++ bfqg->my_entity = &bfqg->entity;
++
++ if (leaf == NULL) {
++ leaf = bfqg;
++ prev = leaf;
++ } else {
++ bfq_group_set_parent(prev, bfqg);
++ /*
++ * Build a list of allocated nodes using the bfqd
++ * filed, that is still unused and will be initialized
++ * only after the node will be connected.
++ */
++ prev->bfqd = bfqg;
++ prev = bfqg;
++ }
++ }
++
++ return leaf;
++
++cleanup:
++ while (leaf != NULL) {
++ prev = leaf;
++ leaf = leaf->bfqd;
++ kfree(prev);
++ }
++
++ return NULL;
++}
++
++/**
++ * bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy.
++ * @bfqd: the queue descriptor.
++ * @cgroup: the leaf cgroup to start from.
++ * @leaf: the leaf group (to be associated to @cgroup).
++ *
++ * Try to link a chain of groups to a cgroup hierarchy, connecting the
++ * nodes bottom-up, so we can be sure that when we find a cgroup in the
++ * hierarchy that already as a group associated to @bfqd all the nodes
++ * in the path to the root cgroup have one too.
++ *
++ * On locking: the queue lock protects the hierarchy (there is a hierarchy
++ * per device) while the bfqio_cgroup lock protects the list of groups
++ * belonging to the same cgroup.
++ */
++static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup,
++ struct bfq_group *leaf)
++{
++ struct bfqio_cgroup *bgrp;
++ struct bfq_group *bfqg, *next, *prev = NULL;
++ unsigned long flags;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ for (; cgroup != NULL && leaf != NULL; cgroup = cgroup->parent) {
++ bgrp = cgroup_to_bfqio(cgroup);
++ next = leaf->bfqd;
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ BUG_ON(bfqg != NULL);
++
++ spin_lock_irqsave(&bgrp->lock, flags);
++
++ rcu_assign_pointer(leaf->bfqd, bfqd);
++ hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
++ hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
++
++ spin_unlock_irqrestore(&bgrp->lock, flags);
++
++ prev = leaf;
++ leaf = next;
++ }
++
++ BUG_ON(cgroup == NULL && leaf != NULL);
++ if (cgroup != NULL && prev != NULL) {
++ bgrp = cgroup_to_bfqio(cgroup);
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ bfq_group_set_parent(prev, bfqg);
++ }
++}
++
++/**
++ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
++ * @bfqd: queue descriptor.
++ * @cgroup: cgroup being searched for.
++ *
++ * Return a group associated to @bfqd in @cgroup, allocating one if
++ * necessary. When a group is returned all the cgroups in the path
++ * to the root have a group associated to @bfqd.
++ *
++ * If the allocation fails, return the root group: this breaks guarantees
++ * but is a safe fallbak. If this loss becames a problem it can be
++ * mitigated using the equivalent weight (given by the product of the
++ * weights of the groups in the path from @group to the root) in the
++ * root scheduler.
++ *
++ * We allocate all the missing nodes in the path from the leaf cgroup
++ * to the root and we connect the nodes only after all the allocations
++ * have been successful.
++ */
++static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
++ struct cgroup *cgroup)
++{
++ struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup);
++ struct bfq_group *bfqg;
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ if (bfqg != NULL)
++ return bfqg;
++
++ bfqg = bfq_group_chain_alloc(bfqd, cgroup);
++ if (bfqg != NULL)
++ bfq_group_chain_link(bfqd, cgroup, bfqg);
++ else
++ bfqg = bfqd->root_group;
++
++ return bfqg;
++}
++
++/**
++ * bfq_bfqq_move - migrate @bfqq to @bfqg.
++ * @bfqd: queue descriptor.
++ * @bfqq: the queue to move.
++ * @entity: @bfqq's entity.
++ * @bfqg: the group to move to.
++ *
++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
++ * it on the new one. Avoid putting the entity on the old group idle tree.
++ *
++ * Must be called under the queue lock; the cgroup owning @bfqg must
++ * not disappear (by now this just means that we are called under
++ * rcu_read_lock()).
++ */
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_entity *entity, struct bfq_group *bfqg)
++{
++ int busy, resume;
++
++ busy = bfq_bfqq_busy(bfqq);
++ resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
++
++ BUG_ON(resume && !entity->on_st);
++ BUG_ON(busy && !resume && entity->on_st && bfqq != bfqd->active_queue);
++
++ if (busy) {
++ BUG_ON(atomic_read(&bfqq->ref) < 2);
++
++ if (!resume)
++ bfq_del_bfqq_busy(bfqd, bfqq, 0);
++ else
++ bfq_deactivate_bfqq(bfqd, bfqq, 0);
++ } else if (entity->on_st)
++ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
++
++ /*
++ * Here we use a reference to bfqg. We don't need a refcounter
++ * as the cgroup reference will not be dropped, so that its
++ * destroy() callback will not be invoked.
++ */
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++
++ if (busy && resume)
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ if (bfqd->active_queue == NULL && !bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++}
++
++/**
++ * __bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bfqd: the queue descriptor.
++ * @bic: the bic to move.
++ * @cgroup: the cgroup to move to.
++ *
++ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
++ * has to make sure that the reference to cgroup is valid across the call.
++ *
++ * NOTE: an alternative approach might have been to store the current
++ * cgroup in bfqq and getting a reference to it, reducing the lookup
++ * time here, at the price of slightly more complex code.
++ */
++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct cgroup *cgroup)
++{
++ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
++ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++ struct bfq_entity *entity;
++ struct bfq_group *bfqg;
++ struct bfqio_cgroup *bgrp;
++
++ bgrp = cgroup_to_bfqio(cgroup);
++
++ bfqg = bfq_find_alloc_group(bfqd, cgroup);
++ if (async_bfqq != NULL) {
++ entity = &async_bfqq->entity;
++
++ if (entity->sched_data != &bfqg->sched_data) {
++ bic_set_bfqq(bic, NULL, 0);
++ bfq_log_bfqq(bfqd, async_bfqq,
++ "bic_change_group: %p %d",
++ async_bfqq, atomic_read(&async_bfqq->ref));
++ bfq_put_queue(async_bfqq);
++ }
++ }
++
++ if (sync_bfqq != NULL) {
++ entity = &sync_bfqq->entity;
++ if (entity->sched_data != &bfqg->sched_data)
++ bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
++ }
++
++ return bfqg;
++}
++
++/**
++ * bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bic: the bic being migrated.
++ * @cgroup: the destination cgroup.
++ *
++ * When the task owning @bic is moved to @cgroup, @bic is immediately
++ * moved into its new parent group.
++ */
++static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
++ struct cgroup *cgroup)
++{
++ struct bfq_data *bfqd;
++ unsigned long uninitialized_var(flags);
++
++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data), &flags);
++ if (bfqd != NULL) {
++ __bfq_bic_change_cgroup(bfqd, bic, cgroup);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++}
++
++/**
++ * bfq_bic_update_cgroup - update the cgroup of @bic.
++ * @bic: the @bic to update.
++ *
++ * Make sure that @bic is enqueued in the cgroup of the current task.
++ * We need this in addition to moving bics during the cgroup attach
++ * phase because the task owning @bic could be at its first disk
++ * access or we may end up in the root cgroup as the result of a
++ * memory allocation failure and here we try to move to the right
++ * group.
++ *
++ * Must be called under the queue lock. It is safe to use the returned
++ * value even after the rcu_read_unlock() as the migration/destruction
++ * paths act under the queue lock too. IOW it is impossible to race with
++ * group migration/destruction and end up with an invalid group as:
++ * a) here cgroup has not yet been destroyed, nor its destroy callback
++ * has started execution, as current holds a reference to it,
++ * b) if it is destroyed after rcu_read_unlock() [after current is
++ * migrated to a different cgroup] its attach() callback will have
++ * taken care of remove all the references to the old cgroup data.
++ */
++static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_group *bfqg;
++ struct cgroup *cgroup;
++
++ BUG_ON(bfqd == NULL);
++
++ rcu_read_lock();
++ cgroup = task_cgroup(current, bfqio_subsys_id);
++ bfqg = __bfq_bic_change_cgroup(bfqd, bic, cgroup);
++ rcu_read_unlock();
++
++ return bfqg;
++}
++
++/**
++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
++ * @st: the service tree being flushed.
++ */
++static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entity = st->first_idle;
++
++ for (; entity != NULL; entity = st->first_idle)
++ __bfq_deactivate_entity(entity, 0);
++}
++
++/**
++ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
++ * @bfqd: the device data structure with the root group.
++ * @entity: the entity to move.
++ */
++static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(bfqq == NULL);
++ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
++ return;
++}
++
++/**
++ * bfq_reparent_active_entities - move to the root group all active entities.
++ * @bfqd: the device data structure with the root group.
++ * @bfqg: the group to move from.
++ * @st: the service tree with the entities.
++ *
++ * Needs queue_lock to be taken and reference to be valid over the call.
++ */
++static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ struct bfq_service_tree *st)
++{
++ struct rb_root *active = &st->active;
++ struct bfq_entity *entity = NULL;
++
++ if (!RB_EMPTY_ROOT(&st->active))
++ entity = bfq_entity_of(rb_first(active));
++
++ for (; entity != NULL ; entity = bfq_entity_of(rb_first(active)))
++ bfq_reparent_leaf_entity(bfqd, entity);
++
++ if (bfqg->sched_data.active_entity != NULL)
++ bfq_reparent_leaf_entity(bfqd, bfqg->sched_data.active_entity);
++
++ return;
++}
++
++/**
++ * bfq_destroy_group - destroy @bfqg.
++ * @bgrp: the bfqio_cgroup containing @bfqg.
++ * @bfqg: the group being destroyed.
++ *
++ * Destroy @bfqg, making sure that it is not referenced from its parent.
++ */
++static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
++{
++ struct bfq_data *bfqd;
++ struct bfq_service_tree *st;
++ struct bfq_entity *entity = bfqg->my_entity;
++ unsigned long uninitialized_var(flags);
++ int i;
++
++ hlist_del(&bfqg->group_node);
++
++ /*
++ * Empty all service_trees belonging to this group before deactivating
++ * the group itself.
++ */
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
++ st = bfqg->sched_data.service_tree + i;
++
++ /*
++ * The idle tree may still contain bfq_queues belonging
++ * to exited task because they never migrated to a different
++ * cgroup from the one being destroyed now. Noone else
++ * can access them so it's safe to act without any lock.
++ */
++ bfq_flush_idle_tree(st);
++
++ /*
++ * It may happen that some queues are still active
++ * (busy) upon group destruction (if the corresponding
++ * processes have been forced to terminate). We move
++ * all the leaf entities corresponding to these queues
++ * to the root_group.
++ * Also, it may happen that the group has an entity
++ * under service, which is disconnected from the active
++ * tree: it must be moved, too.
++ * There is no need to put the sync queues, as the
++ * scheduler has taken no reference.
++ */
++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++ if (bfqd != NULL) {
++ bfq_reparent_active_entities(bfqd, bfqg, st);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++ BUG_ON(!RB_EMPTY_ROOT(&st->active));
++ BUG_ON(!RB_EMPTY_ROOT(&st->idle));
++ }
++ BUG_ON(bfqg->sched_data.next_active != NULL);
++ BUG_ON(bfqg->sched_data.active_entity != NULL);
++
++ /*
++ * We may race with device destruction, take extra care when
++ * dereferencing bfqg->bfqd.
++ */
++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++ if (bfqd != NULL) {
++ hlist_del(&bfqg->bfqd_node);
++ __bfq_deactivate_entity(entity, 0);
++ bfq_put_async_queues(bfqd, bfqg);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++ BUG_ON(entity->tree != NULL);
++
++ /*
++ * No need to defer the kfree() to the end of the RCU grace
++ * period: we are called from the destroy() callback of our
++ * cgroup, so we can be sure that noone is a) still using
++ * this cgroup or b) doing lookups in it.
++ */
++ kfree(bfqg);
++}
++
++static void bfq_end_raising_async(struct bfq_data *bfqd)
++{
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
++ bfq_end_raising_async_queues(bfqd, bfqg);
++}
++
++/**
++ * bfq_disconnect_groups - diconnect @bfqd from all its groups.
++ * @bfqd: the device descriptor being exited.
++ *
++ * When the device exits we just make sure that no lookup can return
++ * the now unused group structures. They will be deallocated on cgroup
++ * destruction.
++ */
++static void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ bfq_log(bfqd, "disconnect_groups beginning") ;
++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
++ hlist_del(&bfqg->bfqd_node);
++
++ __bfq_deactivate_entity(bfqg->my_entity, 0);
++
++ /*
++ * Don't remove from the group hash, just set an
++ * invalid key. No lookups can race with the
++ * assignment as bfqd is being destroyed; this
++ * implies also that new elements cannot be added
++ * to the list.
++ */
++ rcu_assign_pointer(bfqg->bfqd, NULL);
++
++ bfq_log(bfqd, "disconnect_groups: put async for group %p",
++ bfqg) ;
++ bfq_put_async_queues(bfqd, bfqg);
++ }
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++ struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
++ struct bfq_group *bfqg = bfqd->root_group;
++
++ bfq_put_async_queues(bfqd, bfqg);
++
++ spin_lock_irq(&bgrp->lock);
++ hlist_del_rcu(&bfqg->group_node);
++ spin_unlock_irq(&bgrp->lock);
++
++ /*
++ * No need to synchronize_rcu() here: since the device is gone
++ * there cannot be any read-side access to its root_group.
++ */
++ kfree(bfqg);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ struct bfqio_cgroup *bgrp;
++ int i;
++
++ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++ if (bfqg == NULL)
++ return NULL;
++
++ bfqg->entity.parent = NULL;
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ bgrp = &bfqio_root_cgroup;
++ spin_lock_irq(&bgrp->lock);
++ rcu_assign_pointer(bfqg->bfqd, bfqd);
++ hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
++ spin_unlock_irq(&bgrp->lock);
++
++ return bfqg;
++}
++
++#define SHOW_FUNCTION(__VAR) \
++static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup, \
++ struct cftype *cftype) \
++{ \
++ struct bfqio_cgroup *bgrp; \
++ u64 ret = -ENODEV; \
++ \
++ mutex_lock(&bfqio_mutex); \
++ if (bfqio_is_removed(cgroup)) \
++ goto out_unlock; \
++ \
++ bgrp = cgroup_to_bfqio(cgroup); \
++ spin_lock_irq(&bgrp->lock); \
++ ret = bgrp->__VAR; \
++ spin_unlock_irq(&bgrp->lock); \
++ \
++out_unlock: \
++ mutex_unlock(&bfqio_mutex); \
++ return ret; \
++}
++
++SHOW_FUNCTION(weight);
++SHOW_FUNCTION(ioprio);
++SHOW_FUNCTION(ioprio_class);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__VAR, __MIN, __MAX) \
++static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \
++ struct cftype *cftype, \
++ u64 val) \
++{ \
++ struct bfqio_cgroup *bgrp; \
++ struct bfq_group *bfqg; \
++ int ret = -EINVAL; \
++ \
++ if (val < (__MIN) || val > (__MAX)) \
++ return ret; \
++ \
++ ret = -ENODEV; \
++ mutex_lock(&bfqio_mutex); \
++ if (bfqio_is_removed(cgroup)) \
++ goto out_unlock; \
++ ret = 0; \
++ \
++ bgrp = cgroup_to_bfqio(cgroup); \
++ \
++ spin_lock_irq(&bgrp->lock); \
++ bgrp->__VAR = (unsigned short)val; \
++ hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \
++ /* \
++ * Setting the ioprio_changed flag of the entity \
++ * to 1 with new_##__VAR == ##__VAR would re-set \
++ * the value of the weight to its ioprio mapping. \
++ * Set the flag only if necessary. \
++ */ \
++ if ((unsigned short)val != bfqg->entity.new_##__VAR) { \
++ bfqg->entity.new_##__VAR = (unsigned short)val; \
++ smp_wmb(); \
++ bfqg->entity.ioprio_changed = 1; \
++ } \
++ } \
++ spin_unlock_irq(&bgrp->lock); \
++ \
++out_unlock: \
++ mutex_unlock(&bfqio_mutex); \
++ return ret; \
++}
++
++STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
++STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
++STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
++#undef STORE_FUNCTION
++
++static struct cftype bfqio_files[] = {
++ {
++ .name = "weight",
++ .read_u64 = bfqio_cgroup_weight_read,
++ .write_u64 = bfqio_cgroup_weight_write,
++ },
++ {
++ .name = "ioprio",
++ .read_u64 = bfqio_cgroup_ioprio_read,
++ .write_u64 = bfqio_cgroup_ioprio_write,
++ },
++ {
++ .name = "ioprio_class",
++ .read_u64 = bfqio_cgroup_ioprio_class_read,
++ .write_u64 = bfqio_cgroup_ioprio_class_write,
++ },
++ { }, /* terminate */
++};
++
++static struct cgroup_subsys_state *bfqio_create(struct cgroup *cgroup)
++{
++ struct bfqio_cgroup *bgrp;
++
++ if (cgroup->parent != NULL) {
++ bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
++ if (bgrp == NULL)
++ return ERR_PTR(-ENOMEM);
++ } else
++ bgrp = &bfqio_root_cgroup;
++
++ spin_lock_init(&bgrp->lock);
++ INIT_HLIST_HEAD(&bgrp->group_data);
++ bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
++ bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
++
++ return &bgrp->css;
++}
++
++/*
++ * We cannot support shared io contexts, as we have no means to support
++ * two tasks with the same ioc in two different groups without major rework
++ * of the main bic/bfqq data structures. By now we allow a task to change
++ * its cgroup only if it's the only owner of its ioc; the drawback of this
++ * behavior is that a group containing a task that forked using CLONE_IO
++ * will not be destroyed until the tasks sharing the ioc die.
++ */
++static int bfqio_can_attach(struct cgroup *cgroup, struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++ struct io_context *ioc;
++ int ret = 0;
++
++ cgroup_taskset_for_each(task, cgroup, tset) {
++ /* task_lock() is needed to avoid races with exit_io_context() */
++ task_lock(task);
++ ioc = task->io_context;
++ if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
++ /*
++ * ioc == NULL means that the task is either too young or
++ * exiting: if it has still no ioc the ioc can't be shared,
++ * if the task is exiting the attach will fail anyway, no
++ * matter what we return here.
++ */
++ ret = -EINVAL;
++ task_unlock(task);
++ if (ret)
++ break;
++ }
++
++ return ret;
++}
++
++static void bfqio_attach(struct cgroup *cgroup, struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++ struct io_context *ioc;
++ struct io_cq *icq;
++
++ /*
++ * IMPORTANT NOTE: The move of more than one process at a time to a
++ * new group has not yet been tested.
++ */
++ cgroup_taskset_for_each(task, cgroup, tset) {
++ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
++ if (ioc) {
++ /*
++ * Handle cgroup change here.
++ */
++ rcu_read_lock();
++ hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node)
++ if (!strncmp(icq->q->elevator->type->elevator_name,
++ "bfq", ELV_NAME_MAX))
++ bfq_bic_change_cgroup(icq_to_bic(icq),
++ cgroup);
++ rcu_read_unlock();
++ put_io_context(ioc);
++ }
++ }
++}
++
++static void bfqio_destroy(struct cgroup *cgroup)
++{
++ struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup);
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ /*
++ * Since we are destroying the cgroup, there are no more tasks
++ * referencing it, and all the RCU grace periods that may have
++ * referenced it are ended (as the destruction of the parent
++ * cgroup is RCU-safe); bgrp->group_data will not be accessed by
++ * anything else and we don't need any synchronization.
++ */
++ hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node)
++ bfq_destroy_group(bgrp, bfqg);
++
++ BUG_ON(!hlist_empty(&bgrp->group_data));
++
++ kfree(bgrp);
++}
++
++struct cgroup_subsys bfqio_subsys = {
++ .name = "bfqio",
++ .css_alloc = bfqio_create,
++ .can_attach = bfqio_can_attach,
++ .attach = bfqio_attach,
++ .css_free = bfqio_destroy,
++ .subsys_id = bfqio_subsys_id,
++ .base_cftypes = bfqio_files,
++};
++#else
++static inline void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static inline struct bfq_group *
++bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ return bfqd->root_group;
++}
++
++static inline void bfq_bfqq_move(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++}
++
++static void bfq_end_raising_async(struct bfq_data *bfqd)
++{
++ bfq_end_raising_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++ bfq_put_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++ kfree(bfqd->root_group);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ int i;
++
++ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++ if (bfqg == NULL)
++ return NULL;
++
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ return bfqg;
++}
++#endif
+diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c
+new file mode 100644
+index 0000000..326e3ec
+--- /dev/null
++++ b/block/bfq-ioc.c
+@@ -0,0 +1,36 @@
++/*
++ * BFQ: I/O context handling.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++ /* bic->icq is the first member, %NULL will convert to %NULL */
++ return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ *
++ * Queue lock must be held.
++ */
++static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++ struct io_context *ioc)
++{
++ if(ioc)
++ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
++ return NULL;
++}
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+new file mode 100644
+index 0000000..0ed2746
+--- /dev/null
++++ b/block/bfq-iosched.c
+@@ -0,0 +1,3082 @@
++/*
++ * BFQ, or Budget Fair Queueing, disk scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file.
++ *
++ * BFQ is a proportional share disk scheduling algorithm based on the
++ * slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
++ * measured in number of sectors, to tasks instead of time slices.
++ * The disk is not granted to the active task for a given time slice,
++ * but until it has exahusted its assigned budget. This change from
++ * the time to the service domain allows BFQ to distribute the disk
++ * bandwidth among tasks as desired, without any distortion due to
++ * ZBR, workload fluctuations or other factors. BFQ uses an ad hoc
++ * internal scheduler, called B-WF2Q+, to schedule tasks according to
++ * their budgets. Thanks to this accurate scheduler, BFQ can afford
++ * to assign high budgets to disk-bound non-seeky tasks (to boost the
++ * throughput), and yet guarantee low latencies to interactive and
++ * soft real-time applications.
++ *
++ * BFQ has been introduced in [1], where the interested reader can
++ * find an accurate description of the algorithm, the bandwidth
++ * distribution and latency guarantees it provides, plus formal proofs
++ * of all the properties. With respect to the algorithm presented in
++ * the paper, this implementation adds several little heuristics, and
++ * a hierarchical extension, based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente and F. Checconi, ``High Throughput Disk Scheduling
++ * with Deterministic Guarantees on Bandwidth Distribution,'',
++ * IEEE Transactions on Computer, May 2010.
++ *
++ * http://algo.ing.unimo.it/people/paolo/disk_sched/bfq-techreport.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ * Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ * First: A Flexible and Accurate Mechanism for Proportional Share
++ * Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "bfq.h"
++#include "blk.h"
++
++/* Max number of dispatches in one round of service. */
++static const int bfq_quantum = 4;
++
++/* Expiration time of sync (0) and async (1) requests, in jiffies. */
++static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = 16 * 1024;
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in jiffies. */
++static int bfq_slice_idle = HZ / 125;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = 16 * 1024;
++static const int bfq_max_budget_async_rq = 4;
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multipled by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout_sync = HZ / 8;
++static int bfq_timeout_async = HZ / 25;
++
++struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ms), we consider thinktime immediate. */
++#define BFQ_MIN_TT 2
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD 4
++#define BFQ_HW_QUEUE_SAMPLES 32
++
++#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
++
++/* Min samples used for peak rate estimation (for autotuning). */
++#define BFQ_PEAK_RATE_SAMPLES 32
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT 16
++
++/*
++ * The duration of the weight raising for interactive applications is
++ * computed automatically (as default behaviour), using the following
++ * formula: duration = (R / r) * T, where r is the peak rate of the
++ * disk, and R and T are two reference parameters. In particular, R is
++ * the peak rate of a reference disk, and T is about the maximum time
++ * for starting popular large applications on that disk, under BFQ and
++ * while reading two files in parallel. Finally, BFQ uses two
++ * different pairs (R, T) depending on whether the disk is rotational
++ * or non-rotational.
++ */
++#define T_rot (msecs_to_jiffies(5500))
++#define T_nonrot (msecs_to_jiffies(2000))
++/* Next two quantities are in sectors/usec, left-shifted by BFQ_RATE_SHIFT */
++#define R_rot 17415
++#define R_nonrot 34791
++
++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
++
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
++ IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
++ IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
++ * We regard a request as SYNC, if either it's a read or has the SYNC bit
++ * set (in which case it could also be a direct WRITE).
++ */
++static inline int bfq_bio_sync(struct bio *bio)
++{
++ if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
++ return 1;
++
++ return 0;
++}
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++ if (bfqd->queued != 0) {
++ bfq_log(bfqd, "schedule dispatch");
++ kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work);
++ }
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now. Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++ struct request *rq1,
++ struct request *rq2,
++ sector_t last)
++{
++ sector_t s1, s2, d1 = 0, d2 = 0;
++ unsigned long back_max;
++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
++ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
++
++ if (rq1 == NULL || rq1 == rq2)
++ return rq2;
++ if (rq2 == NULL)
++ return rq1;
++
++ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++ return rq1;
++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++ return rq2;
++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++ return rq1;
++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++ return rq2;
++
++ s1 = blk_rq_pos(rq1);
++ s2 = blk_rq_pos(rq2);
++
++ /*
++ * By definition, 1KiB is 2 sectors.
++ */
++ back_max = bfqd->bfq_back_max * 2;
++
++ /*
++ * Strict one way elevator _except_ in the case where we allow
++ * short backward seeks which are biased as twice the cost of a
++ * similar forward seek.
++ */
++ if (s1 >= last)
++ d1 = s1 - last;
++ else if (s1 + back_max >= last)
++ d1 = (last - s1) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ1_WRAP;
++
++ if (s2 >= last)
++ d2 = s2 - last;
++ else if (s2 + back_max >= last)
++ d2 = (last - s2) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ2_WRAP;
++
++ /* Found required data */
++
++ /*
++ * By doing switch() on the bit mask "wrap" we avoid having to
++ * check two variables for all permutations: --> faster!
++ */
++ switch (wrap) {
++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++ if (d1 < d2)
++ return rq1;
++ else if (d2 < d1)
++ return rq2;
++ else {
++ if (s1 >= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++
++ case BFQ_RQ2_WRAP:
++ return rq1;
++ case BFQ_RQ1_WRAP:
++ return rq2;
++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++ default:
++ /*
++ * Since both rqs are wrapped,
++ * start with the one that's further behind head
++ * (--> only *one* back seek required),
++ * since back seek takes more time than forward.
++ */
++ if (s1 <= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++ sector_t sector, struct rb_node **ret_parent,
++ struct rb_node ***rb_link)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *bfqq = NULL;
++
++ parent = NULL;
++ p = &root->rb_node;
++ while (*p) {
++ struct rb_node **n;
++
++ parent = *p;
++ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++ /*
++ * Sort strictly based on sector. Smallest to the left,
++ * largest to the right.
++ */
++ if (sector > blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_right;
++ else if (sector < blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_left;
++ else
++ break;
++ p = n;
++ bfqq = NULL;
++ }
++
++ *ret_parent = parent;
++ if (rb_link)
++ *rb_link = p;
++
++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ (long long unsigned)sector,
++ bfqq != NULL ? bfqq->pid : 0);
++
++ return bfqq;
++}
++
++static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *__bfqq;
++
++ if (bfqq->pos_root != NULL) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++
++ if (bfq_class_idle(bfqq))
++ return;
++ if (!bfqq->next_rq)
++ return;
++
++ bfqq->pos_root = &bfqd->rq_pos_tree;
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++ blk_rq_pos(bfqq->next_rq), &parent, &p);
++ if (__bfqq == NULL) {
++ rb_link_node(&bfqq->pos_node, parent, p);
++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++ } else
++ bfqq->pos_root = NULL;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct rb_node *rbnext = rb_next(&last->rb_node);
++ struct rb_node *rbprev = rb_prev(&last->rb_node);
++ struct request *next = NULL, *prev = NULL;
++
++ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++ if (rbprev != NULL)
++ prev = rb_entry_rq(rbprev);
++
++ if (rbnext != NULL)
++ next = rb_entry_rq(rbnext);
++ else {
++ rbnext = rb_first(&bfqq->sort_list);
++ if (rbnext && rbnext != &last->rb_node)
++ next = rb_entry_rq(rbnext);
++ }
++
++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++static void bfq_del_rq_rb(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ BUG_ON(bfqq->queued[sync] == 0);
++ bfqq->queued[sync]--;
++ bfqd->queued--;
++
++ elv_rb_del(&bfqq->sort_list, rq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->active_queue)
++ bfq_del_bfqq_busy(bfqd, bfqq, 1);
++ /*
++ * Remove queue from request-position tree as it is empty.
++ */
++ if (bfqq->pos_root != NULL) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++ }
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static inline unsigned long bfq_serv_to_charge(struct request *rq,
++ struct bfq_queue *bfqq)
++{
++ return blk_rq_sectors(rq) *
++ (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->raising_coeff == 1) *
++ bfq_async_charge_factor));
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown). We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct request *next_rq = bfqq->next_rq;
++ unsigned long new_budget;
++
++ if (next_rq == NULL)
++ return;
++
++ if (bfqq == bfqd->active_queue)
++ /*
++ * In order not to break guarantees, budgets cannot be
++ * changed after an entity has been selected.
++ */
++ return;
++
++ BUG_ON(entity->tree != &st->active);
++ BUG_ON(entity == entity->sched_data->active_entity);
++
++ new_budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ entity->budget = new_budget;
++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", new_budget);
++ bfq_activate_bfqq(bfqd, bfqq);
++}
++
++static inline unsigned int bfq_wrais_duration(struct bfq_data *bfqd)
++{
++ u64 dur;
++
++ if (bfqd->bfq_raising_max_time > 0)
++ return bfqd->bfq_raising_max_time;
++
++ dur = bfqd->RT_prod;
++ do_div(dur, bfqd->peak_rate);
++
++ return dur;
++}
++
++static void bfq_add_rq_rb(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *next_rq, *prev;
++ unsigned long old_raising_coeff = bfqq->raising_coeff;
++ int idle_for_long_time = bfqq->budget_timeout +
++ bfqd->bfq_raising_min_idle_time < jiffies;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_rq_rb %d", rq_is_sync(rq));
++ bfqq->queued[rq_is_sync(rq)]++;
++ bfqd->queued++;
++
++ elv_rb_add(&bfqq->sort_list, rq);
++
++ /*
++ * Check if this request is a better next-serve candidate.
++ */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++ BUG_ON(next_rq == NULL);
++ bfqq->next_rq = next_rq;
++
++ /*
++ * Adjust priority tree position, if next_rq changes.
++ */
++ if (prev != bfqq->next_rq)
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++
++ if (!bfq_bfqq_busy(bfqq)) {
++ int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 &&
++ bfqq->soft_rt_next_start < jiffies;
++ entity->budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++
++ if (! bfqd->low_latency)
++ goto add_bfqq_busy;
++
++ /*
++ * If the queue is not being boosted and has been idle
++ * for enough time, start a weight-raising period
++ */
++ if(old_raising_coeff == 1 && (idle_for_long_time || soft_rt)) {
++ bfqq->raising_coeff = bfqd->bfq_raising_coeff;
++ if (idle_for_long_time)
++ bfqq->raising_cur_max_time =
++ bfq_wrais_duration(bfqd);
++ else
++ bfqq->raising_cur_max_time =
++ bfqd->bfq_raising_rt_max_time;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais starting at %llu msec,"
++ "rais_max_time %u",
++ bfqq->last_rais_start_finish,
++ jiffies_to_msecs(bfqq->
++ raising_cur_max_time));
++ } else if (old_raising_coeff > 1) {
++ if (idle_for_long_time)
++ bfqq->raising_cur_max_time =
++ bfq_wrais_duration(bfqd);
++ else if (bfqq->raising_cur_max_time ==
++ bfqd->bfq_raising_rt_max_time &&
++ !soft_rt) {
++ bfqq->raising_coeff = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %llu msec,"
++ "rais_max_time %u",
++ bfqq->last_rais_start_finish,
++ jiffies_to_msecs(bfqq->
++ raising_cur_max_time));
++ }
++ }
++ if (old_raising_coeff != bfqq->raising_coeff)
++ entity->ioprio_changed = 1;
++add_bfqq_busy:
++ bfq_add_bfqq_busy(bfqd, bfqq);
++ } else {
++ if(bfqd->low_latency && old_raising_coeff == 1 &&
++ !rq_is_sync(rq) &&
++ bfqq->last_rais_start_finish +
++ bfqd->bfq_raising_min_inter_arr_async < jiffies) {
++ bfqq->raising_coeff = bfqd->bfq_raising_coeff;
++ bfqq->raising_cur_max_time = bfq_wrais_duration(bfqd);
++
++ entity->ioprio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "non-idle wrais starting at %llu msec,"
++ "rais_max_time %u",
++ bfqq->last_rais_start_finish,
++ jiffies_to_msecs(bfqq->
++ raising_cur_max_time));
++ }
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ if(bfqd->low_latency &&
++ (old_raising_coeff == 1 || bfqq->raising_coeff == 1 ||
++ idle_for_long_time))
++ bfqq->last_rais_start_finish = jiffies;
++}
++
++static void bfq_reposition_rq_rb(struct bfq_queue *bfqq, struct request *rq)
++{
++ elv_rb_del(&bfqq->sort_list, rq);
++ bfqq->queued[rq_is_sync(rq)]--;
++ bfqq->bfqd->queued--;
++ bfq_add_rq_rb(rq);
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++ struct bio *bio)
++{
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (bic == NULL)
++ return NULL;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ if (bfqq != NULL) {
++ sector_t sector = bio->bi_sector + bio_sectors(bio);
++
++ return elv_rb_find(&bfqq->sort_list, sector);
++ }
++
++ return NULL;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ bfqd->rq_in_driver++;
++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
++ (long long unsigned)bfqd->last_position);
++}
++
++static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ WARN_ON(bfqd->rq_in_driver == 0);
++ bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ if (bfqq->next_rq == rq) {
++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ list_del_init(&rq->queuelist);
++ bfq_del_rq_rb(rq);
++
++ if (rq->cmd_flags & REQ_META) {
++ WARN_ON(bfqq->meta_pending == 0);
++ bfqq->meta_pending--;
++ }
++}
++
++static int bfq_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *__rq;
++
++ __rq = bfq_find_rq_fmerge(bfqd, bio);
++ if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
++ *req = __rq;
++ return ELEVATOR_FRONT_MERGE;
++ }
++
++ return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++ int type)
++{
++ if (type == ELEVATOR_FRONT_MERGE) {
++ struct bfq_queue *bfqq = RQ_BFQQ(req);
++
++ bfq_reposition_rq_rb(bfqq, req);
++ }
++}
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * Reposition in fifo if next is older than rq.
++ */
++ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++ time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
++ list_move(&rq->queuelist, &next->queuelist);
++ rq_set_fifo_time(rq, rq_fifo_time(next));
++ }
++
++ if (bfqq->next_rq == next)
++ bfqq->next_rq = rq;
++
++ bfq_remove_request(next);
++}
++
++/* Must be called with bfqq != NULL */
++static inline void bfq_bfqq_end_raising(struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq == NULL);
++ bfqq->raising_coeff = 1;
++ bfqq->raising_cur_max_time = 0;
++ /* Trigger a weight change on the next activation of the queue */
++ bfqq->entity.ioprio_changed = 1;
++}
++
++static void bfq_end_raising_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ if (bfqg->async_bfqq[i][j] != NULL)
++ bfq_bfqq_end_raising(bfqg->async_bfqq[i][j]);
++ if (bfqg->async_idle_bfqq != NULL)
++ bfq_bfqq_end_raising(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_raising(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++ bfq_bfqq_end_raising(bfqq);
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++ bfq_bfqq_end_raising(bfqq);
++ bfq_end_raising_async(bfqd);
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++ return 0;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (bic == NULL)
++ return 0;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_active_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq != NULL) {
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_mark_bfqq_budget_new(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ bfq_log_bfqq(bfqd, bfqq, "set_active_queue, cur-budget = %lu",
++ bfqq->entity.budget);
++ }
++
++ bfqd->active_queue = bfqq;
++}
++
++/*
++ * Get and set a new active queue for service.
++ */
++static struct bfq_queue *bfq_set_active_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (!bfqq)
++ bfqq = bfq_get_next_queue(bfqd);
++ else
++ bfq_get_next_queue_forced(bfqd, bfqq);
++
++ __bfq_set_active_queue(bfqd, bfqq);
++ return bfqq;
++}
++
++static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
++ struct request *rq)
++{
++ if (blk_rq_pos(rq) >= bfqd->last_position)
++ return blk_rq_pos(rq) - bfqd->last_position;
++ else
++ return bfqd->last_position - blk_rq_pos(rq);
++}
++
++/*
++ * Return true if bfqq has no request pending and rq is close enough to
++ * bfqd->last_position, or if rq is closer to bfqd->last_position than
++ * bfqq->next_rq
++ */
++static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
++{
++ return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
++}
++
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
++{
++ struct rb_root *root = &bfqd->rq_pos_tree;
++ struct rb_node *parent, *node;
++ struct bfq_queue *__bfqq;
++ sector_t sector = bfqd->last_position;
++
++ if (RB_EMPTY_ROOT(root))
++ return NULL;
++
++ /*
++ * First, if we find a request starting at the end of the last
++ * request, choose it.
++ */
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++ if (__bfqq != NULL)
++ return __bfqq;
++
++ /*
++ * If the exact sector wasn't found, the parent of the NULL leaf
++ * will contain the closest sector (rq_pos_tree sorted by next_request
++ * position).
++ */
++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++ if (bfq_rq_close(bfqd, __bfqq->next_rq))
++ return __bfqq;
++
++ if (blk_rq_pos(__bfqq->next_rq) < sector)
++ node = rb_next(&__bfqq->pos_node);
++ else
++ node = rb_prev(&__bfqq->pos_node);
++ if (node == NULL)
++ return NULL;
++
++ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
++ if (bfq_rq_close(bfqd, __bfqq->next_rq))
++ return __bfqq;
++
++ return NULL;
++}
++
++/*
++ * bfqd - obvious
++ * cur_bfqq - passed in so that we don't decide that the current queue
++ * is closely cooperating with itself.
++ *
++ * We are assuming that cur_bfqq has dispatched at least one request,
++ * and that bfqd->last_position reflects a position on the disk associated
++ * with the I/O issued by cur_bfqq.
++ */
++static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
++ struct bfq_queue *cur_bfqq)
++{
++ struct bfq_queue *bfqq;
++
++ if (bfq_class_idle(cur_bfqq))
++ return NULL;
++ if (!bfq_bfqq_sync(cur_bfqq))
++ return NULL;
++ if (BFQQ_SEEKY(cur_bfqq))
++ return NULL;
++
++ /* If device has only one backlogged bfq_queue, don't search. */
++ if (bfqd->busy_queues == 1)
++ return NULL;
++
++ /*
++ * We should notice if some of the queues are cooperating, e.g.
++ * working closely on the same area of the disk. In that case,
++ * we can group them together and don't waste time idling.
++ */
++ bfqq = bfqq_close(bfqd);
++ if (bfqq == NULL || bfqq == cur_bfqq)
++ return NULL;
++
++ /*
++ * Do not merge queues from different bfq_groups.
++ */
++ if (bfqq->entity.parent != cur_bfqq->entity.parent)
++ return NULL;
++
++ /*
++ * It only makes sense to merge sync queues.
++ */
++ if (!bfq_bfqq_sync(bfqq))
++ return NULL;
++ if (BFQQ_SEEKY(bfqq))
++ return NULL;
++
++ /*
++ * Do not merge queues of different priority classes.
++ */
++ if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
++ return NULL;
++
++ return bfqq;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < 194)
++ return bfq_default_max_budget;
++ else
++ return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < 194)
++ return bfq_default_max_budget / 32;
++ else
++ return bfqd->bfq_max_budget / 32;
++}
++
++/*
++ * Decides whether idling should be done for given device and
++ * given active queue.
++ */
++static inline bool bfq_queue_nonrot_noidle(struct bfq_data *bfqd,
++ struct bfq_queue *active_bfqq)
++{
++ if (active_bfqq == NULL)
++ return false;
++ /*
++ * If device is SSD it has no seek penalty, disable idling; but
++ * do so only if:
++ * - device does not support queuing, otherwise we still have
++ * a problem with sync vs async workloads;
++ * - the queue is not weight-raised, to preserve guarantees.
++ */
++ return (blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag &&
++ active_bfqq->raising_coeff == 1);
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->active_queue;
++ struct bfq_io_cq *bic;
++ unsigned long sl;
++
++ WARN_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Tasks have exited, don't wait. */
++ bic = bfqd->active_bic;
++ if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
++ return;
++
++ bfq_mark_bfqq_wait_request(bfqq);
++
++ /*
++ * We don't want to idle for seeks, but we do want to allow
++ * fair distribution of slice time for a process doing back-to-back
++ * seeks. So allow a little bit of time for him to submit a new rq.
++ *
++ * To prevent processes with (partly) seeky workloads from
++ * being too ill-treated, grant them a small fraction of the
++ * assigned budget before reducing the waiting time to
++ * BFQ_MIN_TT. This happened to help reduce latency.
++ */
++ sl = bfqd->bfq_slice_idle;
++ if (bfq_sample_valid(bfqq->seek_samples) && BFQQ_SEEKY(bfqq) &&
++ bfqq->entity.service > bfq_max_budget(bfqd) / 8 &&
++ bfqq->raising_coeff == 1)
++ sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
++ else if (bfqq->raising_coeff > 1)
++ sl = sl * 3;
++ bfqd->last_idling_start = ktime_get();
++ mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
++ bfq_log(bfqd, "arm idle: %u/%u ms",
++ jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
++}
++
++/*
++ * Set the maximum time for the active queue to consume its
++ * budget. This prevents seeky processes from lowering the disk
++ * throughput (always guaranteed with a time slice scheme as in CFQ).
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->active_queue;
++ unsigned int timeout_coeff;
++ if (bfqq->raising_cur_max_time == bfqd->bfq_raising_rt_max_time)
++ timeout_coeff = 1;
++ else
++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++ bfqd->last_budget_start = ktime_get();
++
++ bfq_clear_bfqq_budget_new(bfqq);
++ bfqq->budget_timeout = jiffies +
++ bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
++
++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
++ timeout_coeff));
++}
++
++/*
++ * Move request from internal lists to the request queue dispatch list.
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ bfq_remove_request(rq);
++ bfqq->dispatched++;
++ elv_dispatch_sort(q, rq);
++
++ if (bfq_bfqq_sync(bfqq))
++ bfqd->sync_flight++;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
++{
++ struct request *rq = NULL;
++
++ if (bfq_bfqq_fifo_expire(bfqq))
++ return NULL;
++
++ bfq_mark_bfqq_fifo_expire(bfqq);
++
++ if (list_empty(&bfqq->fifo))
++ return NULL;
++
++ rq = rq_entry_fifo(bfqq->fifo.next);
++
++ if (time_before(jiffies, rq_fifo_time(rq)))
++ return NULL;
++
++ return rq;
++}
++
++/*
++ * Must be called with the queue_lock held.
++ */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return;
++
++ /*
++ * Merge in the direction of the lesser amount of work.
++ */
++ if (new_process_refs >= process_refs) {
++ bfqq->new_bfqq = new_bfqq;
++ atomic_add(process_refs, &new_bfqq->ref);
++ } else {
++ new_bfqq->new_bfqq = bfqq;
++ atomic_add(new_process_refs, &bfqq->ref);
++ }
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++}
++
++static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ return entity->budget - entity->service;
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq != bfqd->active_queue);
++
++ __bfq_bfqd_reset_active(bfqd);
++
++ /*
++ * If this bfqq is shared between multiple processes, check
++ * to make sure that those processes are still issuing I/Os
++ * within the mean seek distance. If not, it may be time to
++ * break the queues apart again.
++ */
++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++ bfq_mark_bfqq_split_coop(bfqq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * overloading budget_timeout field to store when
++ * the queue remains with no backlog, used by
++ * the weight-raising mechanism
++ */
++ bfqq->budget_timeout = jiffies ;
++ bfq_del_bfqq_busy(bfqd, bfqq, 1);
++ } else {
++ bfq_activate_bfqq(bfqd, bfqq);
++ /*
++ * Resort priority tree of potential close cooperators.
++ */
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++ }
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget. See the body for detailed
++ * comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ enum bfqq_expiration reason)
++{
++ struct request *next_rq;
++ unsigned long budget, min_budget;
++
++ budget = bfqq->max_budget;
++ min_budget = bfq_min_budget(bfqd);
++
++ BUG_ON(bfqq != bfqd->active_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
++ budget, bfq_min_budget(bfqd));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->active_queue));
++
++ if (bfq_bfqq_sync(bfqq)) {
++ switch (reason) {
++ /*
++ * Caveat: in all the following cases we trade latency
++ * for throughput.
++ */
++ case BFQ_BFQQ_TOO_IDLE:
++ /*
++ * This is the only case where we may reduce
++ * the budget: if there is no requets of the
++ * process still waiting for completion, then
++ * we assume (tentatively) that the timer has
++ * expired because the batch of requests of
++ * the process could have been served with a
++ * smaller budget. Hence, betting that
++ * process will behave in the same way when it
++ * becomes backlogged again, we reduce its
++ * next budget. As long as we guess right,
++ * this budget cut reduces the latency
++ * experienced by the process.
++ *
++ * However, if there are still outstanding
++ * requests, then the process may have not yet
++ * issued its next request just because it is
++ * still waiting for the completion of some of
++ * the still oustanding ones. So in this
++ * subcase we do not reduce its budget, on the
++ * contrary we increase it to possibly boost
++ * the throughput, as discussed in the
++ * comments to the BUDGET_TIMEOUT case.
++ */
++ if (bfqq->dispatched > 0) /* still oustanding reqs */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ else {
++ if (budget > 5 * min_budget)
++ budget -= 4 * min_budget;
++ else
++ budget = min_budget;
++ }
++ break;
++ case BFQ_BFQQ_BUDGET_TIMEOUT:
++ /*
++ * We double the budget here because: 1) it
++ * gives the chance to boost the throughput if
++ * this is not a seeky process (which may have
++ * bumped into this timeout because of, e.g.,
++ * ZBR), 2) together with charge_full_budget
++ * it helps give seeky processes higher
++ * timestamps, and hence be served less
++ * frequently.
++ */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_BUDGET_EXHAUSTED:
++ /*
++ * The process still has backlog, and did not
++ * let either the budget timeout or the disk
++ * idling timeout expire. Hence it is not
++ * seeky, has a short thinktime and may be
++ * happy with a higher budget too. So
++ * definitely increase the budget of this good
++ * candidate to boost the disk throughput.
++ */
++ budget = min(budget * 4, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_NO_MORE_REQUESTS:
++ /*
++ * Leave the budget unchanged.
++ */
++ default:
++ return;
++ }
++ } else /* async queue */
++ /* async queues get always the maximum possible budget
++ * (their ability to dispatch is limited by
++ * @bfqd->bfq_max_budget_async_rq).
++ */
++ budget = bfqd->bfq_max_budget;
++
++ bfqq->max_budget = budget;
++
++ if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
++ bfqq->max_budget > bfqd->bfq_max_budget)
++ bfqq->max_budget = bfqd->bfq_max_budget;
++
++ /*
++ * Make sure that we have enough budget for the next request.
++ * Since the finish time of the bfqq must be kept in sync with
++ * the budget, be sure to call __bfq_bfqq_expire() after the
++ * update.
++ */
++ next_rq = bfqq->next_rq;
++ if (next_rq != NULL)
++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ else
++ bfqq->entity.budget = bfqq->max_budget;
++
++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
++ next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
++ bfqq->entity.budget);
++}
++
++static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
++{
++ unsigned long max_budget;
++
++ /*
++ * The max_budget calculated when autotuning is equal to the
++ * amount of sectors transfered in timeout_sync at the
++ * estimated peak rate.
++ */
++ max_budget = (unsigned long)(peak_rate * 1000 *
++ timeout >> BFQ_RATE_SHIFT);
++
++ return max_budget;
++}
++
++/*
++ * In addition to updating the peak rate, checks whether the process
++ * is "slow", and returns 1 if so. This slow flag is used, in addition
++ * to the budget timeout, to reduce the amount of service provided to
++ * seeky processes, and hence reduce their chances to lower the
++ * throughput. See the code for more details.
++ */
++static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int compensate, enum bfqq_expiration reason)
++{
++ u64 bw, usecs, expected, timeout;
++ ktime_t delta;
++ int update = 0;
++
++ if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
++ return 0;
++
++ if (compensate)
++ delta = bfqd->last_idling_start;
++ else
++ delta = ktime_get();
++ delta = ktime_sub(delta, bfqd->last_budget_start);
++ usecs = ktime_to_us(delta);
++
++ /* Don't trust short/unrealistic values. */
++ if (usecs < 100 || usecs >= LONG_MAX)
++ return 0;
++
++ /*
++ * Calculate the bandwidth for the last slice. We use a 64 bit
++ * value to store the peak rate, in sectors per usec in fixed
++ * point math. We do so to have enough precision in the estimate
++ * and to avoid overflows.
++ */
++ bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
++ do_div(bw, (unsigned long)usecs);
++
++ timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++ /*
++ * Use only long (> 20ms) intervals to filter out spikes for
++ * the peak rate estimation.
++ */
++ if (usecs > 20000) {
++ if (bw > bfqd->peak_rate ||
++ (!BFQQ_SEEKY(bfqq) &&
++ reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
++ bfq_log(bfqd, "measured bw =%llu", bw);
++ /*
++ * To smooth oscillations use a low-pass filter with
++ * alpha=7/8, i.e.,
++ * new_rate = (7/8) * old_rate + (1/8) * bw
++ */
++ do_div(bw, 8);
++ if (bw == 0)
++ return 0;
++ bfqd->peak_rate *= 7;
++ do_div(bfqd->peak_rate, 8);
++ bfqd->peak_rate += bw;
++ update = 1;
++ bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
++ }
++
++ update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
++
++ if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
++ bfqd->peak_rate_samples++;
++
++ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
++ update && bfqd->bfq_user_max_budget == 0) {
++ bfqd->bfq_max_budget =
++ bfq_calc_max_budget(bfqd->peak_rate, timeout);
++ bfq_log(bfqd, "new max_budget=%lu",
++ bfqd->bfq_max_budget);
++ }
++ }
++
++ /*
++ * If the process has been served for a too short time
++ * interval to let its possible sequential accesses prevail on
++ * the initial seek time needed to move the disk head on the
++ * first sector it requested, then give the process a chance
++ * and for the moment return false.
++ */
++ if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
++ return 0;
++
++ /*
++ * A process is considered ``slow'' (i.e., seeky, so that we
++ * cannot treat it fairly in the service domain, as it would
++ * slow down too much the other processes) if, when a slice
++ * ends for whatever reason, it has received service at a
++ * rate that would not be high enough to complete the budget
++ * before the budget timeout expiration.
++ */
++ expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
++
++ /*
++ * Caveat: processes doing IO in the slower disk zones will
++ * tend to be slow(er) even if not seeky. And the estimated
++ * peak rate will actually be an average over the disk
++ * surface. Hence, to not be too harsh with unlucky processes,
++ * we keep a budget/3 margin of safety before declaring a
++ * process slow.
++ */
++ return expected > (4 * bfqq->entity.budget) / 3;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ *
++ * If the process associated to the queue is slow (i.e., seeky), or in
++ * case of budget timeout, or, finally, if it is async, we
++ * artificially charge it an entire budget (independently of the
++ * actual service it received). As a consequence, the queue will get
++ * higher timestamps than the correct ones upon reactivation, and
++ * hence it will be rescheduled as if it had received more service
++ * than what it actually received. In the end, this class of processes
++ * will receive less service in proportion to how slowly they consume
++ * their budgets (and hence how seriously they tend to lower the
++ * throughput).
++ *
++ * In contrast, when a queue expires because it has been idling for
++ * too much or because it exhausted its budget, we do not touch the
++ * amount of service it has received. Hence when the queue will be
++ * reactivated and its timestamps updated, the latter will be in sync
++ * with the actual service received by the queue until expiration.
++ *
++ * Charging a full budget to the first type of queues and the exact
++ * service to the others has the effect of using the WF2Q+ policy to
++ * schedule the former on a timeslice basis, without violating the
++ * service domain guarantees of the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ int compensate,
++ enum bfqq_expiration reason)
++{
++ int slow;
++ BUG_ON(bfqq != bfqd->active_queue);
++
++ /* Update disk peak rate for autotuning and check whether the
++ * process is slow (see bfq_update_peak_rate).
++ */
++ slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
++
++ /*
++ * As above explained, 'punish' slow (i.e., seeky), timed-out
++ * and async queues, to favor sequential sync workloads.
++ *
++ * Processes doing IO in the slower disk zones will tend to be
++ * slow(er) even if not seeky. Hence, since the estimated peak
++ * rate is actually an average over the disk surface, these
++ * processes may timeout just for bad luck. To avoid punishing
++ * them we do not charge a full budget to a process that
++ * succeeded in consuming at least 2/3 of its budget.
++ */
++ if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3))
++ bfq_bfqq_charge_full_budget(bfqq);
++
++ if (bfqd->low_latency && bfqq->raising_coeff == 1)
++ bfqq->last_rais_start_finish = jiffies;
++
++ if (bfqd->low_latency && bfqd->bfq_raising_max_softrt_rate > 0) {
++ if(reason != BFQ_BFQQ_BUDGET_TIMEOUT)
++ bfqq->soft_rt_next_start =
++ jiffies +
++ HZ * bfqq->entity.service /
++ bfqd->bfq_raising_max_softrt_rate;
++ else
++ bfqq->soft_rt_next_start = -1; /* infinity */
++ }
++ bfq_log_bfqq(bfqd, bfqq,
++ "expire (%d, slow %d, num_disp %d, idle_win %d)", reason, slow,
++ bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
++
++ /* Increase, decrease or leave budget unchanged according to reason */
++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++ __bfq_bfqq_expire(bfqd, bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_budget_new(bfqq))
++ return 0;
++
++ if (time_before(jiffies, bfqq->budget_timeout))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * If we expire a queue that is waiting for the arrival of a new
++ * request, we may prevent the fictitious timestamp backshifting that
++ * allows the guarantees of the queue to be preserved (see [1] for
++ * this tricky aspect). Hence we return true only if this condition
++ * does not hold, or if the queue is slow enough to deserve only to be
++ * kicked off for preserving a high throughput.
++*/
++static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "may_budget_timeout: wr %d left %d timeout %d",
++ bfq_bfqq_wait_request(bfqq),
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
++ bfq_bfqq_budget_timeout(bfqq));
++
++ return (!bfq_bfqq_wait_request(bfqq) ||
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
++ &&
++ bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * If the active queue is empty, but it is sync and either of the following
++ * conditions holds, then: 1) the queue must remain active and cannot be
++ * expired, and 2) the disk must be idled to wait for the possible arrival
++ * of a new request for the queue. The conditions are:
++ * - the device is rotational and not performing NCQ, and the queue has its
++ * idle window set (in this case, waiting for a new request for the queue
++ * is likely to boost the disk throughput);
++ * - the queue is weight-raised (waiting for the request is necessary for
++ * providing the queue with fairness and latency guarantees).
++ */
++static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq,
++ int budg_timeout)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ return (bfq_bfqq_sync(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
++ bfqd->bfq_slice_idle != 0 &&
++ ((bfq_bfqq_idle_window(bfqq) && !bfqd->hw_tag &&
++ !blk_queue_nonrot(bfqd->queue))
++ || bfqq->raising_coeff > 1) &&
++ (bfqd->rq_in_driver == 0 ||
++ budg_timeout ||
++ bfqq->raising_coeff > 1) &&
++ !bfq_close_cooperator(bfqd, bfqq) &&
++ (!bfq_bfqq_coop(bfqq) ||
++ !bfq_bfqq_some_coop_idle(bfqq)) &&
++ !bfq_queue_nonrot_noidle(bfqd, bfqq));
++}
++
++/*
++ * Select a queue for service. If we have a current active queue,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *new_bfqq = NULL;
++ struct request *next_rq;
++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ int budg_timeout;
++
++ bfqq = bfqd->active_queue;
++ if (bfqq == NULL)
++ goto new_queue;
++
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already active queue");
++
++ /*
++ * If another queue has a request waiting within our mean seek
++ * distance, let it run. The expire code will check for close
++ * cooperators and put the close queue at the front of the
++ * service tree. If possible, merge the expiring queue with the
++ * new bfqq.
++ */
++ new_bfqq = bfq_close_cooperator(bfqd, bfqq);
++ if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
++ bfq_setup_merge(bfqq, new_bfqq);
++
++ budg_timeout = bfq_may_expire_for_budg_timeout(bfqq);
++ if (budg_timeout &&
++ !bfq_bfqq_must_idle(bfqq, budg_timeout))
++ goto expire;
++
++ next_rq = bfqq->next_rq;
++ /*
++ * If bfqq has requests queued and it has enough budget left to
++ * serve them, keep the queue, otherwise expire it.
++ */
++ if (next_rq != NULL) {
++ if (bfq_serv_to_charge(next_rq, bfqq) >
++ bfq_bfqq_budget_left(bfqq)) {
++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++ goto expire;
++ } else {
++ /*
++ * The idle timer may be pending because we may not
++ * disable disk idling even when a new request arrives
++ */
++ if (timer_pending(&bfqd->idle_slice_timer)) {
++ /*
++ * If we get here: 1) at least a new request
++ * has arrived but we have not disabled the
++ * timer because the request was too small,
++ * 2) then the block layer has unplugged the
++ * device, causing the dispatch to be invoked.
++ *
++ * Since the device is unplugged, now the
++ * requests are probably large enough to
++ * provide a reasonable throughput.
++ * So we disable idling.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++ }
++ if (new_bfqq == NULL)
++ goto keep_queue;
++ else
++ goto expire;
++ }
++ }
++
++ /*
++ * No requests pending. If there is no cooperator, and the active
++ * queue still has requests in flight or is idling for a new request,
++ * then keep it.
++ */
++ if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
++ (bfqq->dispatched != 0 &&
++ (bfq_bfqq_idle_window(bfqq) || bfqq->raising_coeff > 1) &&
++ !bfq_queue_nonrot_noidle(bfqd, bfqq)))) {
++ bfqq = NULL;
++ goto keep_queue;
++ } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
++ /*
++ * Expiring the queue because there is a close cooperator,
++ * cancel timer.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++ }
++
++ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, 0, reason);
++new_queue:
++ bfqq = bfq_set_active_queue(bfqd, new_bfqq);
++ bfq_log(bfqd, "select_queue: new queue %d returned",
++ bfqq != NULL ? bfqq->pid : 0);
++keep_queue:
++ return bfqq;
++}
++
++static void update_raising_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq->raising_coeff > 1) { /* queue is being boosted */
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, "
++ "old raising coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies -
++ bfqq->last_rais_start_finish),
++ jiffies_to_msecs(bfqq->raising_cur_max_time),
++ bfqq->raising_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ BUG_ON(bfqq != bfqd->active_queue && entity->weight !=
++ entity->orig_weight * bfqq->raising_coeff);
++ if(entity->ioprio_changed)
++ bfq_log_bfqq(bfqd, bfqq,
++ "WARN: pending prio change");
++ /*
++ * If too much time has elapsed from the beginning
++ * of this weight-raising period and process is not soft
++ * real-time, stop it
++ */
++ if (jiffies - bfqq->last_rais_start_finish >
++ bfqq->raising_cur_max_time) {
++ int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 &&
++ bfqq->soft_rt_next_start < jiffies;
++
++ bfqq->last_rais_start_finish = jiffies;
++ if (soft_rt)
++ bfqq->raising_cur_max_time =
++ bfqd->bfq_raising_rt_max_time;
++ else {
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %llu msec,"
++ "rais_max_time %u",
++ bfqq->last_rais_start_finish,
++ jiffies_to_msecs(bfqq->
++ raising_cur_max_time));
++ bfq_bfqq_end_raising(bfqq);
++ __bfq_entity_update_weight_prio(
++ bfq_entity_service_tree(entity),
++ entity);
++ }
++ }
++ }
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++ struct request *rq;
++ unsigned long service_to_charge;
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Follow expired path, else get first next available. */
++ rq = bfq_check_fifo(bfqq);
++ if (rq == NULL)
++ rq = bfqq->next_rq;
++ service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++ if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
++ /*
++ * This may happen if the next rq is chosen
++ * in fifo order instead of sector order.
++ * The budget is properly dimensioned
++ * to be always sufficient to serve the next request
++ * only if it is chosen in sector order. The reason is
++ * that it would be quite inefficient and little useful
++ * to always make sure that the budget is large enough
++ * to serve even the possible next rq in fifo order.
++ * In fact, requests are seldom served in fifo order.
++ *
++ * Expire the queue for budget exhaustion, and
++ * make sure that the next act_budget is enough
++ * to serve the next request, even if it comes
++ * from the fifo expired path.
++ */
++ bfqq->next_rq = rq;
++ /*
++ * Since this dispatch is failed, make sure that
++ * a new one will be performed
++ */
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++ goto expire;
++ }
++
++ /* Finally, insert request into driver dispatch list. */
++ bfq_bfqq_served(bfqq, service_to_charge);
++ bfq_dispatch_insert(bfqd->queue, rq);
++
++ update_raising_data(bfqd, bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched %u sec req (%llu), "
++ "budg left %lu",
++ blk_rq_sectors(rq),
++ (long long unsigned)blk_rq_pos(rq),
++ bfq_bfqq_budget_left(bfqq));
++
++ dispatched++;
++
++ if (bfqd->active_bic == NULL) {
++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++ bfqd->active_bic = RQ_BIC(rq);
++ }
++
++ if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
++ dispatched >= bfqd->bfq_max_budget_async_rq) ||
++ bfq_class_idle(bfqq)))
++ goto expire;
++
++ return dispatched;
++
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
++ return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++
++ while (bfqq->next_rq != NULL) {
++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++ dispatched++;
++ }
++
++ BUG_ON(!list_empty(&bfqq->fifo));
++ return dispatched;
++}
++
++/*
++ * Drain our current requests. Used for barriers and when switching
++ * io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *n;
++ struct bfq_service_tree *st;
++ int dispatched = 0;
++
++ bfqq = bfqd->active_queue;
++ if (bfqq != NULL)
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ /*
++ * Loop through classes, and be careful to leave the scheduler
++ * in a consistent state, as feedback mechanisms and vtime
++ * updates cannot be disabled during the process.
++ */
++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++ st = bfq_entity_service_tree(&bfqq->entity);
++
++ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++ bfqq->max_budget = bfq_max_budget(bfqd);
++
++ bfq_forget_idle(st);
++ }
++
++ BUG_ON(bfqd->busy_queues != 0);
++
++ return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq;
++ int max_dispatch;
++
++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++ if (bfqd->busy_queues == 0)
++ return 0;
++
++ if (unlikely(force))
++ return bfq_forced_dispatch(bfqd);
++
++ if((bfqq = bfq_select_queue(bfqd)) == NULL)
++ return 0;
++
++ max_dispatch = bfqd->bfq_quantum;
++ if (bfq_class_idle(bfqq))
++ max_dispatch = 1;
++
++ if (!bfq_bfqq_sync(bfqq))
++ max_dispatch = bfqd->bfq_max_budget_async_rq;
++
++ if (bfqq->dispatched >= max_dispatch) {
++ if (bfqd->busy_queues > 1)
++ return 0;
++ if (bfqq->dispatched >= 4 * max_dispatch)
++ return 0;
++ }
++
++ if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
++ return 0;
++
++ bfq_clear_bfqq_wait_request(bfqq);
++ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++ if (! bfq_dispatch_request(bfqd, bfqq))
++ return 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d"
++ "(max_disp %d)", bfqq->pid, max_dispatch);
++
++ return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits. Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ BUG_ON(atomic_read(&bfqq->ref) <= 0);
++
++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
++ atomic_read(&bfqq->ref));
++ if (!atomic_dec_and_test(&bfqq->ref))
++ return;
++
++ BUG_ON(rb_first(&bfqq->sort_list) != NULL);
++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++ BUG_ON(bfqq->entity.tree != NULL);
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqd->active_queue == bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++ kmem_cache_free(bfq_pool, bfqq);
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *__bfqq, *next;
++
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++ */
++ __bfqq = bfqq->new_bfqq;
++ while (__bfqq) {
++ if (__bfqq == bfqq) {
++ WARN(1, "bfqq->new_bfqq loop detected.\n");
++ break;
++ }
++ next = __bfqq->new_bfqq;
++ bfq_put_queue(__bfqq);
++ __bfqq = next;
++ }
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq == bfqd->active_queue) {
++ __bfq_bfqq_expire(bfqd, bfqq);
++ bfq_schedule_dispatch(bfqd);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++}
++
++static void bfq_init_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++
++ bic->ttime.last_end_request = jiffies;
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++ if (bic->bfqq[BLK_RW_ASYNC]) {
++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
++ bic->bfqq[BLK_RW_ASYNC] = NULL;
++ }
++
++ if (bic->bfqq[BLK_RW_SYNC]) {
++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
++ bic->bfqq[BLK_RW_SYNC] = NULL;
++ }
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++ struct task_struct *tsk = current;
++ int ioprio_class;
++
++ if (!bfq_bfqq_prio_changed(bfqq))
++ return;
++
++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ switch (ioprio_class) {
++ default:
++ printk(KERN_ERR "bfq: bad prio %x\n", ioprio_class);
++ case IOPRIO_CLASS_NONE:
++ /*
++ * No prio set, inherit CPU scheduling settings.
++ */
++ bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
++ bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
++ break;
++ case IOPRIO_CLASS_RT:
++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
++ break;
++ case IOPRIO_CLASS_BE:
++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
++ break;
++ case IOPRIO_CLASS_IDLE:
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
++ bfqq->entity.new_ioprio = 7;
++ bfq_clear_bfqq_idle_window(bfqq);
++ break;
++ }
++
++ bfqq->entity.ioprio_changed = 1;
++
++ /*
++ * Keep track of original prio settings in case we have to temporarily
++ * elevate the priority of this queue.
++ */
++ bfqq->org_ioprio = bfqq->entity.new_ioprio;
++ bfq_clear_bfqq_prio_changed(bfqq);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd;
++ struct bfq_queue *bfqq, *new_bfqq;
++ struct bfq_group *bfqg;
++ unsigned long uninitialized_var(flags);
++ int ioprio = bic->icq.ioc->ioprio;
++
++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data), &flags);
++ /*
++ * This condition may trigger on a newly created bic, be sure to drop the
++ * lock before returning.
++ */
++ if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
++ goto out;
++
++ bfqq = bic->bfqq[BLK_RW_ASYNC];
++ if (bfqq != NULL) {
++ bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
++ sched_data);
++ new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
++ GFP_ATOMIC);
++ if (new_bfqq != NULL) {
++ bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
++ bfq_log_bfqq(bfqd, bfqq,
++ "changed_ioprio: bfqq %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++ }
++
++ bfqq = bic->bfqq[BLK_RW_SYNC];
++ if (bfqq != NULL)
++ bfq_mark_bfqq_prio_changed(bfqq);
++
++ bic->ioprio = ioprio;
++
++out:
++ bfq_put_bfqd_unlock(bfqd, &flags);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ pid_t pid, int is_sync)
++{
++ RB_CLEAR_NODE(&bfqq->entity.rb_node);
++ INIT_LIST_HEAD(&bfqq->fifo);
++
++ atomic_set(&bfqq->ref, 0);
++ bfqq->bfqd = bfqd;
++
++ bfq_mark_bfqq_prio_changed(bfqq);
++
++ if (is_sync) {
++ if (!bfq_class_idle(bfqq))
++ bfq_mark_bfqq_idle_window(bfqq);
++ bfq_mark_bfqq_sync(bfqq);
++ }
++
++ /* Tentative initial value to trade off between thr and lat */
++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++ bfqq->pid = pid;
++
++ bfqq->raising_coeff = 1;
++ bfqq->last_rais_start_finish = 0;
++ bfqq->soft_rt_next_start = -1;
++}
++
++static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int is_sync,
++ struct bfq_io_cq *bic,
++ gfp_t gfp_mask)
++{
++ struct bfq_queue *bfqq, *new_bfqq = NULL;
++
++retry:
++ /* bic always exists here */
++ bfqq = bic_to_bfqq(bic, is_sync);
++
++ /*
++ * Always try a new alloc if we fall back to the OOM bfqq
++ * originally, since it should just be a temporary situation.
++ */
++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++ bfqq = NULL;
++ if (new_bfqq != NULL) {
++ bfqq = new_bfqq;
++ new_bfqq = NULL;
++ } else if (gfp_mask & __GFP_WAIT) {
++ spin_unlock_irq(bfqd->queue->queue_lock);
++ new_bfqq = kmem_cache_alloc_node(bfq_pool,
++ gfp_mask | __GFP_ZERO,
++ bfqd->queue->node);
++ spin_lock_irq(bfqd->queue->queue_lock);
++ if (new_bfqq != NULL)
++ goto retry;
++ } else {
++ bfqq = kmem_cache_alloc_node(bfq_pool,
++ gfp_mask | __GFP_ZERO,
++ bfqd->queue->node);
++ }
++
++ if (bfqq != NULL) {
++ bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
++ bfq_log_bfqq(bfqd, bfqq, "allocated");
++ } else {
++ bfqq = &bfqd->oom_bfqq;
++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++ }
++
++ bfq_init_prio_data(bfqq, bic);
++ bfq_init_entity(&bfqq->entity, bfqg);
++ }
++
++ if (new_bfqq != NULL)
++ kmem_cache_free(bfq_pool, new_bfqq);
++
++ return bfqq;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int ioprio_class, int ioprio)
++{
++ switch (ioprio_class) {
++ case IOPRIO_CLASS_RT:
++ return &bfqg->async_bfqq[0][ioprio];
++ case IOPRIO_CLASS_NONE:
++ ioprio = IOPRIO_NORM;
++ /* fall through */
++ case IOPRIO_CLASS_BE:
++ return &bfqg->async_bfqq[1][ioprio];
++ case IOPRIO_CLASS_IDLE:
++ return &bfqg->async_idle_bfqq;
++ default:
++ BUG();
++ }
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg, int is_sync,
++ struct bfq_io_cq *bic, gfp_t gfp_mask)
++{
++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ struct bfq_queue **async_bfqq = NULL;
++ struct bfq_queue *bfqq = NULL;
++
++ if (!is_sync) {
++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++ ioprio);
++ bfqq = *async_bfqq;
++ }
++
++ if (bfqq == NULL)
++ bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++
++ /*
++ * Pin the queue now that it's allocated, scheduler exit will prune it.
++ */
++ if (!is_sync && *async_bfqq == NULL) {
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ *async_bfqq = bfqq;
++ }
++
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++ return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic)
++{
++ unsigned long elapsed = jiffies - bic->ttime.last_end_request;
++ unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
++
++ bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
++ bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) / bic->ttime.ttime_samples;
++}
++
++static void bfq_update_io_seektime(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ sector_t sdist;
++ u64 total;
++
++ if (bfqq->last_request_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
++ else
++ sdist = bfqq->last_request_pos - blk_rq_pos(rq);
++
++ /*
++ * Don't allow the seek distance to get too large from the
++ * odd fragment, pagein, etc.
++ */
++ if (bfqq->seek_samples == 0) /* first request, not really a seek */
++ sdist = 0;
++ else if (bfqq->seek_samples <= 60) /* second & third seek */
++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
++ else
++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
++
++ bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
++ bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
++ total = bfqq->seek_total + (bfqq->seek_samples/2);
++ do_div(total, bfqq->seek_samples);
++ if (bfq_bfqq_coop(bfqq)) {
++ /*
++ * If the mean seektime increases for a (non-seeky) shared
++ * queue, some cooperator is likely to be idling too much.
++ * On the contrary, if it decreases, some cooperator has
++ * probably waked up.
++ *
++ */
++ if ((sector_t)total < bfqq->seek_mean)
++ bfq_mark_bfqq_some_coop_idle(bfqq) ;
++ else if ((sector_t)total > bfqq->seek_mean)
++ bfq_clear_bfqq_some_coop_idle(bfqq) ;
++ }
++ bfqq->seek_mean = (sector_t)total;
++
++ bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
++ (u64)bfqq->seek_mean);
++}
++
++/*
++ * Disable idle window if the process thinks too long or seeks so much that
++ * it doesn't matter.
++ */
++static void bfq_update_idle_window(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ int enable_idle;
++
++ /* Don't idle for async or idle io prio class. */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++ return;
++
++ enable_idle = bfq_bfqq_idle_window(bfqq);
++
++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++ bfqd->bfq_slice_idle == 0 ||
++ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
++ bfqq->raising_coeff == 1))
++ enable_idle = 0;
++ else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
++ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
++ bfqq->raising_coeff == 1)
++ enable_idle = 0;
++ else
++ enable_idle = 1;
++ }
++ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
++ enable_idle);
++
++ if (enable_idle)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq. Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ struct bfq_io_cq *bic = RQ_BIC(rq);
++
++ if (rq->cmd_flags & REQ_META)
++ bfqq->meta_pending++;
++
++ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_io_seektime(bfqd, bfqq, rq);
++ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
++ !BFQQ_SEEKY(bfqq))
++ bfq_update_idle_window(bfqd, bfqq, bic);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
++ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
++ (long long unsigned)bfqq->seek_mean);
++
++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++ if (bfqq == bfqd->active_queue) {
++ /*
++ * If there is just this request queued and the request
++ * is small, just exit.
++ * In this way, if the disk is being idled to wait for a new
++ * request from the active queue, we avoid unplugging the
++ * device now.
++ *
++ * By doing so, we spare the disk to be committed
++ * to serve just a small request. On the contrary, we wait for
++ * the block layer to decide when to unplug the device:
++ * hopefully, new requests will be merged to this
++ * one quickly, then the device will be unplugged
++ * and larger requests will be dispatched.
++ */
++ if (bfqq->queued[rq_is_sync(rq)] == 1 &&
++ blk_rq_sectors(rq) < 32) {
++ return;
++ }
++ if (bfq_bfqq_wait_request(bfqq)) {
++ /*
++ * If we are waiting for a request for this queue, let
++ * it rip immediately and flag that we must not expire
++ * this queue just now.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++ /*
++ * Here we can safely expire the queue, in
++ * case of budget timeout, without wasting
++ * guarantees
++ */
++ if (bfq_bfqq_budget_timeout(bfqq))
++ bfq_bfqq_expire(bfqd, bfqq, 0,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++ __blk_run_queue(bfqd->queue);
++ }
++ }
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++ bfq_init_prio_data(bfqq, RQ_BIC(rq));
++
++ bfq_add_rq_rb(rq);
++
++ rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]);
++ list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++ bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++ bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
++ bfqd->rq_in_driver);
++
++ if (bfqd->hw_tag == 1)
++ return;
++
++ /*
++ * This sample is valid if the number of outstanding requests
++ * is large enough to allow a queueing behavior. Note that the
++ * sum is not exact, as it's not taking into account deactivated
++ * requests.
++ */
++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++ return;
++
++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++ return;
++
++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++ bfqd->max_rq_in_driver = 0;
++ bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ bfq_log_bfqq(bfqd, bfqq, "completed %u sects req (%d)",
++ blk_rq_sectors(rq), sync);
++
++ bfq_update_hw_tag(bfqd);
++
++ WARN_ON(!bfqd->rq_in_driver);
++ WARN_ON(!bfqq->dispatched);
++ bfqd->rq_in_driver--;
++ bfqq->dispatched--;
++
++ if (bfq_bfqq_sync(bfqq))
++ bfqd->sync_flight--;
++
++ if (sync)
++ RQ_BIC(rq)->ttime.last_end_request = jiffies;
++
++ /*
++ * If this is the active queue, check if it needs to be expired,
++ * or if we want to idle in case it has no pending requests.
++ */
++ if (bfqd->active_queue == bfqq) {
++ int budg_timeout = bfq_may_expire_for_budg_timeout(bfqq);
++ if (bfq_bfqq_budget_new(bfqq))
++ bfq_set_budget_timeout(bfqd);
++
++ /* Idling is disabled also for cooperation issues:
++ * 1) there is a close cooperator for the queue, or
++ * 2) the queue is shared and some cooperator is likely
++ * to be idle (in this case, by not arming the idle timer,
++ * we try to slow down the queue, to prevent the zones
++ * of the disk accessed by the active cooperators to become
++ * too distant from the zone that will be accessed by the
++ * currently idle cooperators)
++ */
++ if (bfq_bfqq_must_idle(bfqq, budg_timeout))
++ bfq_arm_slice_timer(bfqd);
++ else if (budg_timeout)
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++ }
++
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++}
++
++static inline int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++ bfq_clear_bfqq_must_alloc(bfqq);
++ return ELV_MQUEUE_MUST;
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, int rw)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Don't force setup of a queue from here, as a call to may_queue
++ * does not necessarily imply that a request actually will be queued.
++ * So just lookup a possibly existing queue, or return 'may queue'
++ * if that fails.
++ */
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (bic == NULL)
++ return ELV_MQUEUE_MAY;
++
++ bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
++ if (bfqq != NULL) {
++ bfq_init_prio_data(bfqq, bic);
++
++ return __bfq_may_queue(bfqq);
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (bfqq != NULL) {
++ const int rw = rq_data_dir(rq);
++
++ BUG_ON(!bfqq->allocated[rw]);
++ bfqq->allocated[rw]--;
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++}
++
++static struct bfq_queue *
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (long unsigned)bfqq->new_bfqq->pid);
++ bic_set_bfqq(bic, bfqq->new_bfqq, 1);
++ bfq_mark_bfqq_coop(bfqq->new_bfqq);
++ bfq_put_queue(bfqq);
++ return bic_to_bfqq(bic, 1);
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to said bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->pid = current->pid;
++ bfq_clear_bfqq_some_coop_idle(bfqq);
++ bfq_clear_bfqq_coop(bfqq);
++ bfq_clear_bfqq_split_coop(bfqq);
++ return bfqq;
++ }
++
++ bic_set_bfqq(bic, NULL, 1);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++ return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ const int rw = rq_data_dir(rq);
++ const int is_sync = rq_is_sync(rq);
++ struct bfq_queue *bfqq;
++ struct bfq_group *bfqg;
++ unsigned long flags;
++
++ might_sleep_if(gfp_mask & __GFP_WAIT);
++
++ bfq_changed_ioprio(bic);
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ if (bic == NULL)
++ goto queue_fail;
++
++ bfqg = bfq_bic_update_cgroup(bic);
++
++new_queue:
++ bfqq = bic_to_bfqq(bic, is_sync);
++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++ bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++ bic_set_bfqq(bic, bfqq, is_sync);
++ } else {
++ /*
++ * If the queue was seeky for too long, break it apart.
++ */
++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++ bfqq = bfq_split_bfqq(bic, bfqq);
++ if (!bfqq)
++ goto new_queue;
++ }
++
++ /*
++ * Check to see if this queue is scheduled to merge with
++ * another closely cooperating queue. The merging of queues
++ * happens here as it must be done in process context.
++ * The reference on new_bfqq was taken in merge_bfqqs.
++ */
++ if (bfqq->new_bfqq != NULL)
++ bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
++ }
++
++ bfqq->allocated[rw]++;
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++
++ rq->elv.priv[0] = bic;
++ rq->elv.priv[1] = bfqq;
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++
++queue_fail:
++ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++ struct bfq_data *bfqd =
++ container_of(work, struct bfq_data, unplug_work);
++ struct request_queue *q = bfqd->queue;
++
++ spin_lock_irq(q->queue_lock);
++ __blk_run_queue(q);
++ spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the active_queue
++ * is idling inside its time slice.
++ */
++static void bfq_idle_slice_timer(unsigned long data)
++{
++ struct bfq_data *bfqd = (struct bfq_data *)data;
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ enum bfqq_expiration reason;
++
++ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++ bfqq = bfqd->active_queue;
++ /*
++ * Theoretical race here: active_queue can be NULL or different
++ * from the queue that was idling if the timer handler spins on
++ * the queue_lock and a new request arrives for the current
++ * queue and there is a full dispatch cycle that changes the
++ * active_queue. This can hardly happen, but in the worst case
++ * we just expire a queue too early.
++ */
++ if (bfqq != NULL) {
++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the first
++ * request of the active queue arrives during
++ * disk idling
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, 1, reason);
++ }
++
++schedule_dispatch:
++ bfq_schedule_dispatch(bfqd);
++
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++ del_timer_sync(&bfqd->idle_slice_timer);
++ cancel_work_sync(&bfqd->unplug_work);
++}
++
++static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue **bfqq_ptr)
++{
++ struct bfq_group *root_group = bfqd->root_group;
++ struct bfq_queue *bfqq = *bfqq_ptr;
++
++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ if (bfqq != NULL) {
++ bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ *bfqq_ptr = NULL;
++ }
++}
++
++/*
++ * Release all the bfqg references to its async queues. If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure untill all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ struct request_queue *q = bfqd->queue;
++ struct bfq_queue *bfqq, *n;
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ spin_lock_irq(q->queue_lock);
++
++ BUG_ON(bfqd->active_queue != NULL);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++ bfq_deactivate_bfqq(bfqd, bfqq, 0);
++
++ bfq_disconnect_groups(bfqd);
++ spin_unlock_irq(q->queue_lock);
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ synchronize_rcu();
++
++ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++ bfq_free_root_group(bfqd);
++ kfree(bfqd);
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++ struct bfq_group *bfqg;
++ struct bfq_data *bfqd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (eq == NULL)
++ return -ENOMEM;
++
++ bfqd = kmalloc_node(sizeof(*bfqd), GFP_KERNEL | __GFP_ZERO, q->node);
++ if (bfqd == NULL) {
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++ eq->elevator_data = bfqd;
++
++ /*
++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++ * Grab a permanent reference to it, so that the normal code flow
++ * will not attempt to free it.
++ */
++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
++ atomic_inc(&bfqd->oom_bfqq.ref);
++
++ bfqd->queue = q;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
++ bfqg = bfq_alloc_root_group(bfqd, q->node);
++ if (bfqg == NULL) {
++ kfree(bfqd);
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++
++ bfqd->root_group = bfqg;
++
++ init_timer(&bfqd->idle_slice_timer);
++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++ bfqd->idle_slice_timer.data = (unsigned long)bfqd;
++
++ bfqd->rq_pos_tree = RB_ROOT;
++
++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++ INIT_LIST_HEAD(&bfqd->active_list);
++ INIT_LIST_HEAD(&bfqd->idle_list);
++
++ bfqd->hw_tag = -1;
++
++ bfqd->bfq_max_budget = bfq_default_max_budget;
++
++ bfqd->bfq_quantum = bfq_quantum;
++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++ bfqd->bfq_back_max = bfq_back_max;
++ bfqd->bfq_back_penalty = bfq_back_penalty;
++ bfqd->bfq_slice_idle = bfq_slice_idle;
++ bfqd->bfq_class_idle_last_service = 0;
++ bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
++ bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
++ bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
++
++ bfqd->low_latency = true;
++
++ bfqd->bfq_raising_coeff = 20;
++ bfqd->bfq_raising_rt_max_time = msecs_to_jiffies(300);
++ bfqd->bfq_raising_max_time = 0;
++ bfqd->bfq_raising_min_idle_time = msecs_to_jiffies(2000);
++ bfqd->bfq_raising_min_inter_arr_async = msecs_to_jiffies(500);
++ bfqd->bfq_raising_max_softrt_rate = 7000;
++
++ /* Initially estimate the device's peak rate as the reference rate */
++ if (blk_queue_nonrot(bfqd->queue)) {
++ bfqd->RT_prod = R_nonrot * T_nonrot;
++ bfqd->peak_rate = R_nonrot;
++ } else {
++ bfqd->RT_prod = R_rot * T_rot;
++ bfqd->peak_rate = R_rot;
++ }
++
++ return 0;
++}
++
++static void bfq_slab_kill(void)
++{
++ if (bfq_pool != NULL)
++ kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++ bfq_pool = KMEM_CACHE(bfq_queue, 0);
++ if (bfq_pool == NULL)
++ return -ENOMEM;
++ return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%d\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page, size_t count)
++{
++ unsigned long new_val;
++ int ret = strict_strtoul(page, 10, &new_val);
++
++ if (ret == 0)
++ *var = new_val;
++
++ return count;
++}
++
++static ssize_t bfq_raising_max_time_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ return sprintf(page, "%d\n", bfqd->bfq_raising_max_time > 0 ?
++ jiffies_to_msecs(bfqd->bfq_raising_max_time) :
++ jiffies_to_msecs(bfq_wrais_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = e->elevator_data;
++ ssize_t num_char = 0;
++
++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++ bfqd->queued);
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ num_char += sprintf(page + num_char, "Active:\n");
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, nr_queued %d %d,"
++ " dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ bfqq->queued[0],
++ bfqq->queued[1],
++ jiffies_to_msecs(jiffies -
++ bfqq->last_rais_start_finish),
++ jiffies_to_msecs(bfqq->raising_cur_max_time));
++ }
++
++ num_char += sprintf(page + num_char, "Idle:\n");
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ jiffies_to_msecs(jiffies -
++ bfqq->last_rais_start_finish),
++ jiffies_to_msecs(bfqq->raising_cur_max_time));
++ }
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++
++ return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned int __data = __VAR; \
++ if (__CONV) \
++ __data = jiffies_to_msecs(__data); \
++ return bfq_var_show(__data, (page)); \
++}
++SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_max_budget_async_rq_show, bfqd->bfq_max_budget_async_rq, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
++SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_raising_coeff_show, bfqd->bfq_raising_coeff, 0);
++SHOW_FUNCTION(bfq_raising_rt_max_time_show, bfqd->bfq_raising_rt_max_time, 1);
++SHOW_FUNCTION(bfq_raising_min_idle_time_show, bfqd->bfq_raising_min_idle_time,
++ 1);
++SHOW_FUNCTION(bfq_raising_min_inter_arr_async_show,
++ bfqd->bfq_raising_min_inter_arr_async,
++ 1);
++SHOW_FUNCTION(bfq_raising_max_softrt_rate_show,
++ bfqd->bfq_raising_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
++static ssize_t \
++__FUNC(struct elevator_queue *e, const char *page, size_t count) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ if (__CONV) \
++ *(__PTR) = msecs_to_jiffies(__data); \
++ else \
++ *(__PTR) = __data; \
++ return ret; \
++}
++STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
++ 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_raising_coeff_store, &bfqd->bfq_raising_coeff, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_raising_max_time_store, &bfqd->bfq_raising_max_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_raising_rt_max_time_store, &bfqd->bfq_raising_rt_max_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_raising_min_idle_time_store,
++ &bfqd->bfq_raising_min_idle_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_raising_min_inter_arr_async_store,
++ &bfqd->bfq_raising_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_raising_max_softrt_rate_store,
++ &bfqd->bfq_raising_max_softrt_rate, 0, INT_MAX, 0);
++#undef STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ return count;
++}
++
++static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
++{
++ u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++ if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
++ return bfq_calc_max_budget(bfqd->peak_rate, timeout);
++ else
++ return bfq_default_max_budget;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data == 0)
++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++ else {
++ if (__data > INT_MAX)
++ __data = INT_MAX;
++ bfqd->bfq_max_budget = __data;
++ }
++
++ bfqd->bfq_user_max_budget = __data;
++
++ return ret;
++}
++
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data < 1)
++ __data = 1;
++ else if (__data > INT_MAX)
++ __data = INT_MAX;
++
++ bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
++ if (bfqd->bfq_user_max_budget == 0)
++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++
++ return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (__data == 0 && bfqd->low_latency != 0)
++ bfq_end_raising(bfqd);
++ bfqd->low_latency = __data;
++
++ return ret;
++}
++
++#define BFQ_ATTR(name) \
++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++ BFQ_ATTR(quantum),
++ BFQ_ATTR(fifo_expire_sync),
++ BFQ_ATTR(fifo_expire_async),
++ BFQ_ATTR(back_seek_max),
++ BFQ_ATTR(back_seek_penalty),
++ BFQ_ATTR(slice_idle),
++ BFQ_ATTR(max_budget),
++ BFQ_ATTR(max_budget_async_rq),
++ BFQ_ATTR(timeout_sync),
++ BFQ_ATTR(timeout_async),
++ BFQ_ATTR(low_latency),
++ BFQ_ATTR(raising_coeff),
++ BFQ_ATTR(raising_max_time),
++ BFQ_ATTR(raising_rt_max_time),
++ BFQ_ATTR(raising_min_idle_time),
++ BFQ_ATTR(raising_min_inter_arr_async),
++ BFQ_ATTR(raising_max_softrt_rate),
++ BFQ_ATTR(weights),
++ __ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++ .ops = {
++ .elevator_merge_fn = bfq_merge,
++ .elevator_merged_fn = bfq_merged_request,
++ .elevator_merge_req_fn = bfq_merged_requests,
++ .elevator_allow_merge_fn = bfq_allow_merge,
++ .elevator_dispatch_fn = bfq_dispatch_requests,
++ .elevator_add_req_fn = bfq_insert_request,
++ .elevator_activate_req_fn = bfq_activate_request,
++ .elevator_deactivate_req_fn = bfq_deactivate_request,
++ .elevator_completed_req_fn = bfq_completed_request,
++ .elevator_former_req_fn = elv_rb_former_request,
++ .elevator_latter_req_fn = elv_rb_latter_request,
++ .elevator_init_icq_fn = bfq_init_icq,
++ .elevator_exit_icq_fn = bfq_exit_icq,
++ .elevator_set_req_fn = bfq_set_request,
++ .elevator_put_req_fn = bfq_put_request,
++ .elevator_may_queue_fn = bfq_may_queue,
++ .elevator_init_fn = bfq_init_queue,
++ .elevator_exit_fn = bfq_exit_queue,
++ },
++ .icq_size = sizeof(struct bfq_io_cq),
++ .icq_align = __alignof__(struct bfq_io_cq),
++ .elevator_attrs = bfq_attrs,
++ .elevator_name = "bfq",
++ .elevator_owner = THIS_MODULE,
++};
++
++static int __init bfq_init(void)
++{
++ /*
++ * Can be 0 on HZ < 1000 setups.
++ */
++ if (bfq_slice_idle == 0)
++ bfq_slice_idle = 1;
++
++ if (bfq_timeout_async == 0)
++ bfq_timeout_async = 1;
++
++ if (bfq_slab_setup())
++ return -ENOMEM;
++
++ elv_register(&iosched_bfq);
++
++ return 0;
++}
++
++static void __exit bfq_exit(void)
++{
++ elv_unregister(&iosched_bfq);
++ bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Budget Fair Queueing IO scheduler");
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+new file mode 100644
+index 0000000..03f8061
+--- /dev/null
++++ b/block/bfq-sched.c
+@@ -0,0 +1,1072 @@
++/*
++ * BFQ: Hierarchical B-WF2Q+ scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++#define for_each_entity(entity) \
++ for (; entity != NULL; entity = entity->parent)
++
++#define for_each_entity_safe(entity, parent) \
++ for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
++
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ int extract,
++ struct bfq_data *bfqd);
++
++static inline void bfq_update_budget(struct bfq_entity *next_active)
++{
++ struct bfq_entity *bfqg_entity;
++ struct bfq_group *bfqg;
++ struct bfq_sched_data *group_sd;
++
++ BUG_ON(next_active == NULL);
++
++ group_sd = next_active->sched_data;
++
++ bfqg = container_of(group_sd, struct bfq_group, sched_data);
++ /*
++ * bfq_group's my_entity field is not NULL only if the group
++ * is not the root group. We must not touch the root entity
++ * as it must never become an active entity.
++ */
++ bfqg_entity = bfqg->my_entity;
++ if (bfqg_entity != NULL)
++ bfqg_entity->budget = next_active->budget;
++}
++
++static int bfq_update_next_active(struct bfq_sched_data *sd)
++{
++ struct bfq_entity *next_active;
++
++ if (sd->active_entity != NULL)
++ /* will update/requeue at the end of service */
++ return 0;
++
++ /*
++ * NOTE: this can be improved in many ways, such as returning
++ * 1 (and thus propagating upwards the update) only when the
++ * budget changes, or caching the bfqq that will be scheduled
++ * next from this subtree. By now we worry more about
++ * correctness than about performance...
++ */
++ next_active = bfq_lookup_next_entity(sd, 0, NULL);
++ sd->next_active = next_active;
++
++ if (next_active != NULL)
++ bfq_update_budget(next_active);
++
++ return 1;
++}
++
++static inline void bfq_check_next_active(struct bfq_sched_data *sd,
++ struct bfq_entity *entity)
++{
++ BUG_ON(sd->next_active != entity);
++}
++#else
++#define for_each_entity(entity) \
++ for (; entity != NULL; entity = NULL)
++
++#define for_each_entity_safe(entity, parent) \
++ for (parent = NULL; entity != NULL; entity = parent)
++
++static inline int bfq_update_next_active(struct bfq_sched_data *sd)
++{
++ return 0;
++}
++
++static inline void bfq_check_next_active(struct bfq_sched_data *sd,
++ struct bfq_entity *entity)
++{
++}
++
++static inline void bfq_update_budget(struct bfq_entity *next_active)
++{
++}
++#endif
++
++/*
++ * Shift for timestamp calculations. This actually limits the maximum
++ * service allowed in one timestamp delta (small shift values increase it),
++ * the maximum total weight that can be used for the queues in the system
++ * (big shift values increase it), and the period of virtual time wraparounds.
++ */
++#define WFQ_SERVICE_SHIFT 22
++
++/**
++ * bfq_gt - compare two timestamps.
++ * @a: first ts.
++ * @b: second ts.
++ *
++ * Return @a > @b, dealing with wrapping correctly.
++ */
++static inline int bfq_gt(u64 a, u64 b)
++{
++ return (s64)(a - b) > 0;
++}
++
++static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = NULL;
++
++ BUG_ON(entity == NULL);
++
++ if (entity->my_sched_data == NULL)
++ bfqq = container_of(entity, struct bfq_queue, entity);
++
++ return bfqq;
++}
++
++
++/**
++ * bfq_delta - map service into the virtual time domain.
++ * @service: amount of service.
++ * @weight: scale factor (weight of an entity or weight sum).
++ */
++static inline u64 bfq_delta(unsigned long service,
++ unsigned long weight)
++{
++ u64 d = (u64)service << WFQ_SERVICE_SHIFT;
++
++ do_div(d, weight);
++ return d;
++}
++
++/**
++ * bfq_calc_finish - assign the finish time to an entity.
++ * @entity: the entity to act upon.
++ * @service: the service to be charged to the entity.
++ */
++static inline void bfq_calc_finish(struct bfq_entity *entity,
++ unsigned long service)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(entity->weight == 0);
++
++ entity->finish = entity->start +
++ bfq_delta(service, entity->weight);
++
++ if (bfqq != NULL) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: serv %lu, w %d",
++ service, entity->weight);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: start %llu, finish %llu, delta %llu",
++ entity->start, entity->finish,
++ bfq_delta(service, entity->weight));
++ }
++}
++
++/**
++ * bfq_entity_of - get an entity from a node.
++ * @node: the node field of the entity.
++ *
++ * Convert a node pointer to the relative entity. This is used only
++ * to simplify the logic of some functions and not as the generic
++ * conversion mechanism because, e.g., in the tree walking functions,
++ * the check for a %NULL value would be redundant.
++ */
++static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
++{
++ struct bfq_entity *entity = NULL;
++
++ if (node != NULL)
++ entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ return entity;
++}
++
++/**
++ * bfq_extract - remove an entity from a tree.
++ * @root: the tree root.
++ * @entity: the entity to remove.
++ */
++static inline void bfq_extract(struct rb_root *root,
++ struct bfq_entity *entity)
++{
++ BUG_ON(entity->tree != root);
++
++ entity->tree = NULL;
++ rb_erase(&entity->rb_node, root);
++}
++
++/**
++ * bfq_idle_extract - extract an entity from the idle tree.
++ * @st: the service tree of the owning @entity.
++ * @entity: the entity being removed.
++ */
++static void bfq_idle_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *next;
++
++ BUG_ON(entity->tree != &st->idle);
++
++ if (entity == st->first_idle) {
++ next = rb_next(&entity->rb_node);
++ st->first_idle = bfq_entity_of(next);
++ }
++
++ if (entity == st->last_idle) {
++ next = rb_prev(&entity->rb_node);
++ st->last_idle = bfq_entity_of(next);
++ }
++
++ bfq_extract(&st->idle, entity);
++
++ if (bfqq != NULL)
++ list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_insert - generic tree insertion.
++ * @root: tree root.
++ * @entity: entity to insert.
++ *
++ * This is used for the idle and the active tree, since they are both
++ * ordered by finish time.
++ */
++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
++{
++ struct bfq_entity *entry;
++ struct rb_node **node = &root->rb_node;
++ struct rb_node *parent = NULL;
++
++ BUG_ON(entity->tree != NULL);
++
++ while (*node != NULL) {
++ parent = *node;
++ entry = rb_entry(parent, struct bfq_entity, rb_node);
++
++ if (bfq_gt(entry->finish, entity->finish))
++ node = &parent->rb_left;
++ else
++ node = &parent->rb_right;
++ }
++
++ rb_link_node(&entity->rb_node, parent, node);
++ rb_insert_color(&entity->rb_node, root);
++
++ entity->tree = root;
++}
++
++/**
++ * bfq_update_min - update the min_start field of a entity.
++ * @entity: the entity to update.
++ * @node: one of its children.
++ *
++ * This function is called when @entity may store an invalid value for
++ * min_start due to updates to the active tree. The function assumes
++ * that the subtree rooted at @node (which may be its left or its right
++ * child) has a valid min_start value.
++ */
++static inline void bfq_update_min(struct bfq_entity *entity,
++ struct rb_node *node)
++{
++ struct bfq_entity *child;
++
++ if (node != NULL) {
++ child = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entity->min_start, child->min_start))
++ entity->min_start = child->min_start;
++ }
++}
++
++/**
++ * bfq_update_active_node - recalculate min_start.
++ * @node: the node to update.
++ *
++ * @node may have changed position or one of its children may have moved,
++ * this function updates its min_start value. The left and right subtrees
++ * are assumed to hold a correct min_start value.
++ */
++static inline void bfq_update_active_node(struct rb_node *node)
++{
++ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ entity->min_start = entity->start;
++ bfq_update_min(entity, node->rb_right);
++ bfq_update_min(entity, node->rb_left);
++}
++
++/**
++ * bfq_update_active_tree - update min_start for the whole active tree.
++ * @node: the starting node.
++ *
++ * @node must be the deepest modified node after an update. This function
++ * updates its min_start using the values held by its children, assuming
++ * that they did not change, and then updates all the nodes that may have
++ * changed in the path to the root. The only nodes that may have changed
++ * are the ones in the path or their siblings.
++ */
++static void bfq_update_active_tree(struct rb_node *node)
++{
++ struct rb_node *parent;
++
++up:
++ bfq_update_active_node(node);
++
++ parent = rb_parent(node);
++ if (parent == NULL)
++ return;
++
++ if (node == parent->rb_left && parent->rb_right != NULL)
++ bfq_update_active_node(parent->rb_right);
++ else if (parent->rb_left != NULL)
++ bfq_update_active_node(parent->rb_left);
++
++ node = parent;
++ goto up;
++}
++
++/**
++ * bfq_active_insert - insert an entity in the active tree of its group/device.
++ * @st: the service tree of the entity.
++ * @entity: the entity being inserted.
++ *
++ * The active tree is ordered by finish time, but an extra key is kept
++ * per each node, containing the minimum value for the start times of
++ * its children (and the node itself), so it's possible to search for
++ * the eligible node with the lowest finish time in logarithmic time.
++ */
++static void bfq_active_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node = &entity->rb_node;
++
++ bfq_insert(&st->active, entity);
++
++ if (node->rb_left != NULL)
++ node = node->rb_left;
++ else if (node->rb_right != NULL)
++ node = node->rb_right;
++
++ bfq_update_active_tree(node);
++
++ if (bfqq != NULL)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
++}
++
++/**
++ * bfq_ioprio_to_weight - calc a weight from an ioprio.
++ * @ioprio: the ioprio value to convert.
++ */
++static unsigned short bfq_ioprio_to_weight(int ioprio)
++{
++ WARN_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
++ return IOPRIO_BE_NR - ioprio;
++}
++
++/**
++ * bfq_weight_to_ioprio - calc an ioprio from a weight.
++ * @weight: the weight value to convert.
++ *
++ * To preserve as mush as possible the old only-ioprio user interface,
++ * 0 is used as an escape ioprio value for weights (numerically) equal or
++ * larger than IOPRIO_BE_NR
++ */
++static unsigned short bfq_weight_to_ioprio(int weight)
++{
++ WARN_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
++ return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
++}
++
++static inline void bfq_get_entity(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_sched_data *sd;
++
++ if (bfqq != NULL) {
++ sd = entity->sched_data;
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ }
++}
++
++/**
++ * bfq_find_deepest - find the deepest node that an extraction can modify.
++ * @node: the node being removed.
++ *
++ * Do the first step of an extraction in an rb tree, looking for the
++ * node that will replace @node, and returning the deepest node that
++ * the following modifications to the tree can touch. If @node is the
++ * last node in the tree return %NULL.
++ */
++static struct rb_node *bfq_find_deepest(struct rb_node *node)
++{
++ struct rb_node *deepest;
++
++ if (node->rb_right == NULL && node->rb_left == NULL)
++ deepest = rb_parent(node);
++ else if (node->rb_right == NULL)
++ deepest = node->rb_left;
++ else if (node->rb_left == NULL)
++ deepest = node->rb_right;
++ else {
++ deepest = rb_next(node);
++ if (deepest->rb_right != NULL)
++ deepest = deepest->rb_right;
++ else if (rb_parent(deepest) != node)
++ deepest = rb_parent(deepest);
++ }
++
++ return deepest;
++}
++
++/**
++ * bfq_active_extract - remove an entity from the active tree.
++ * @st: the service_tree containing the tree.
++ * @entity: the entity being removed.
++ */
++static void bfq_active_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node;
++
++ node = bfq_find_deepest(&entity->rb_node);
++ bfq_extract(&st->active, entity);
++
++ if (node != NULL)
++ bfq_update_active_tree(node);
++
++ if (bfqq != NULL)
++ list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_idle_insert - insert an entity into the idle tree.
++ * @st: the service tree containing the tree.
++ * @entity: the entity to insert.
++ */
++static void bfq_idle_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
++ st->first_idle = entity;
++ if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
++ st->last_idle = entity;
++
++ bfq_insert(&st->idle, entity);
++
++ if (bfqq != NULL)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
++}
++
++/**
++ * bfq_forget_entity - remove an entity from the wfq trees.
++ * @st: the service tree.
++ * @entity: the entity being removed.
++ *
++ * Update the device status and forget everything about @entity, putting
++ * the device reference to it, if it is a queue. Entities belonging to
++ * groups are not refcounted.
++ */
++static void bfq_forget_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_sched_data *sd;
++
++ BUG_ON(!entity->on_st);
++
++ entity->on_st = 0;
++ st->wsum -= entity->weight;
++ if (bfqq != NULL) {
++ sd = entity->sched_data;
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++}
++
++/**
++ * bfq_put_idle_entity - release the idle tree ref of an entity.
++ * @st: service tree for the entity.
++ * @entity: the entity being released.
++ */
++static void bfq_put_idle_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ bfq_idle_extract(st, entity);
++ bfq_forget_entity(st, entity);
++}
++
++/**
++ * bfq_forget_idle - update the idle tree if necessary.
++ * @st: the service tree to act upon.
++ *
++ * To preserve the global O(log N) complexity we only remove one entry here;
++ * as the idle tree will not grow indefinitely this can be done safely.
++ */
++static void bfq_forget_idle(struct bfq_service_tree *st)
++{
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
++ !bfq_gt(last_idle->finish, st->vtime)) {
++ /*
++ * Forget the whole idle tree, increasing the vtime past
++ * the last finish time of idle entities.
++ */
++ st->vtime = last_idle->finish;
++ }
++
++ if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
++ bfq_put_idle_entity(st, first_idle);
++}
++
++static struct bfq_service_tree *
++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
++ struct bfq_entity *entity)
++{
++ struct bfq_service_tree *new_st = old_st;
++
++ if (entity->ioprio_changed) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(old_st->wsum < entity->weight);
++ old_st->wsum -= entity->weight;
++
++ if (entity->new_weight != entity->orig_weight) {
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio =
++ bfq_weight_to_ioprio(entity->orig_weight);
++ } else if (entity->new_ioprio != entity->ioprio) {
++ entity->ioprio = entity->new_ioprio;
++ entity->orig_weight =
++ bfq_ioprio_to_weight(entity->ioprio);
++ } else
++ entity->new_weight = entity->orig_weight =
++ bfq_ioprio_to_weight(entity->ioprio);
++
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->ioprio_changed = 0;
++
++ /*
++ * NOTE: here we may be changing the weight too early,
++ * this will cause unfairness. The correct approach
++ * would have required additional complexity to defer
++ * weight changes to the proper time instants (i.e.,
++ * when entity->finish <= old_st->vtime).
++ */
++ new_st = bfq_entity_service_tree(entity);
++ entity->weight = entity->orig_weight *
++ (bfqq != NULL ? bfqq->raising_coeff : 1);
++ new_st->wsum += entity->weight;
++
++ if (new_st != old_st)
++ entity->start = new_st->vtime;
++ }
++
++ return new_st;
++}
++
++/**
++ * bfq_bfqq_served - update the scheduler status after selection for service.
++ * @bfqq: the queue being served.
++ * @served: bytes to transfer.
++ *
++ * NOTE: this can be optimized, as the timestamps of upper level entities
++ * are synchronized every time a new bfqq is selected for service. By now,
++ * we keep it to better check consistency.
++ */
++static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st;
++
++ for_each_entity(entity) {
++ st = bfq_entity_service_tree(entity);
++
++ entity->service += served;
++ BUG_ON(entity->service > entity->budget);
++ BUG_ON(st->wsum == 0);
++
++ st->vtime += bfq_delta(served, st->wsum);
++ bfq_forget_idle(st);
++ }
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
++}
++
++/**
++ * bfq_bfqq_charge_full_budget - set the service to the entity budget.
++ * @bfqq: the queue that needs a service update.
++ *
++ * When it's not possible to be fair in the service domain, because
++ * a queue is not consuming its budget fast enough (the meaning of
++ * fast depends on the timeout parameter), we charge it a full
++ * budget. In this way we should obtain a sort of time-domain
++ * fairness among all the seeky/slow queues.
++ */
++static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
++
++ bfq_bfqq_served(bfqq, entity->budget - entity->service);
++}
++
++/**
++ * __bfq_activate_entity - activate an entity.
++ * @entity: the entity being activated.
++ *
++ * Called whenever an entity is activated, i.e., it is not active and one
++ * of its children receives a new request, or has to be reactivated due to
++ * budget exhaustion. It uses the current budget of the entity (and the
++ * service received if @entity is active) of the queue to calculate its
++ * timestamps.
++ */
++static void __bfq_activate_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ if (entity == sd->active_entity) {
++ BUG_ON(entity->tree != NULL);
++ /*
++ * If we are requeueing the current entity we have
++ * to take care of not charging to it service it has
++ * not received.
++ */
++ bfq_calc_finish(entity, entity->service);
++ entity->start = entity->finish;
++ sd->active_entity = NULL;
++ } else if (entity->tree == &st->active) {
++ /*
++ * Requeueing an entity due to a change of some
++ * next_active entity below it. We reuse the old
++ * start time.
++ */
++ bfq_active_extract(st, entity);
++ } else if (entity->tree == &st->idle) {
++ /*
++ * Must be on the idle tree, bfq_idle_extract() will
++ * check for that.
++ */
++ bfq_idle_extract(st, entity);
++ entity->start = bfq_gt(st->vtime, entity->finish) ?
++ st->vtime : entity->finish;
++ } else {
++ /*
++ * The finish time of the entity may be invalid, and
++ * it is in the past for sure, otherwise the queue
++ * would have been on the idle tree.
++ */
++ entity->start = st->vtime;
++ st->wsum += entity->weight;
++ bfq_get_entity(entity);
++
++ BUG_ON(entity->on_st);
++ entity->on_st = 1;
++ }
++
++ st = __bfq_entity_update_weight_prio(st, entity);
++ bfq_calc_finish(entity, entity->budget);
++ bfq_active_insert(st, entity);
++}
++
++/**
++ * bfq_activate_entity - activate an entity and its ancestors if necessary.
++ * @entity: the entity to activate.
++ *
++ * Activate @entity and all the entities on the path from it to the root.
++ */
++static void bfq_activate_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd;
++
++ for_each_entity(entity) {
++ __bfq_activate_entity(entity);
++
++ sd = entity->sched_data;
++ if (!bfq_update_next_active(sd))
++ /*
++ * No need to propagate the activation to the
++ * upper entities, as they will be updated when
++ * the active entity is rescheduled.
++ */
++ break;
++ }
++}
++
++/**
++ * __bfq_deactivate_entity - deactivate an entity from its service tree.
++ * @entity: the entity to deactivate.
++ * @requeue: if false, the entity will not be put into the idle tree.
++ *
++ * Deactivate an entity, independently from its previous state. If the
++ * entity was not on a service tree just return, otherwise if it is on
++ * any scheduler tree, extract it from that tree, and if necessary
++ * and if the caller did not specify @requeue, put it on the idle tree.
++ *
++ * Return %1 if the caller should update the entity hierarchy, i.e.,
++ * if the entity was under service or if it was the next_active for
++ * its sched_data; return %0 otherwise.
++ */
++static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ int was_active = entity == sd->active_entity;
++ int ret = 0;
++
++ if (!entity->on_st)
++ return 0;
++
++ BUG_ON(was_active && entity->tree != NULL);
++
++ if (was_active) {
++ bfq_calc_finish(entity, entity->service);
++ sd->active_entity = NULL;
++ } else if (entity->tree == &st->active)
++ bfq_active_extract(st, entity);
++ else if (entity->tree == &st->idle)
++ bfq_idle_extract(st, entity);
++ else if (entity->tree != NULL)
++ BUG();
++
++ if (was_active || sd->next_active == entity)
++ ret = bfq_update_next_active(sd);
++
++ if (!requeue || !bfq_gt(entity->finish, st->vtime))
++ bfq_forget_entity(st, entity);
++ else
++ bfq_idle_insert(st, entity);
++
++ BUG_ON(sd->active_entity == entity);
++ BUG_ON(sd->next_active == entity);
++
++ return ret;
++}
++
++/**
++ * bfq_deactivate_entity - deactivate an entity.
++ * @entity: the entity to deactivate.
++ * @requeue: true if the entity can be put on the idle tree
++ */
++static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++ struct bfq_sched_data *sd;
++ struct bfq_entity *parent;
++
++ for_each_entity_safe(entity, parent) {
++ sd = entity->sched_data;
++
++ if (!__bfq_deactivate_entity(entity, requeue))
++ /*
++ * The parent entity is still backlogged, and
++ * we don't need to update it as it is still
++ * under service.
++ */
++ break;
++
++ if (sd->next_active != NULL)
++ /*
++ * The parent entity is still backlogged and
++ * the budgets on the path towards the root
++ * need to be updated.
++ */
++ goto update;
++
++ /*
++ * If we reach there the parent is no more backlogged and
++ * we want to propagate the dequeue upwards.
++ */
++ requeue = 1;
++ }
++
++ return;
++
++update:
++ entity = parent;
++ for_each_entity(entity) {
++ __bfq_activate_entity(entity);
++
++ sd = entity->sched_data;
++ if (!bfq_update_next_active(sd))
++ break;
++ }
++}
++
++/**
++ * bfq_update_vtime - update vtime if necessary.
++ * @st: the service tree to act upon.
++ *
++ * If necessary update the service tree vtime to have at least one
++ * eligible entity, skipping to its start time. Assumes that the
++ * active tree of the device is not empty.
++ *
++ * NOTE: this hierarchical implementation updates vtimes quite often,
++ * we may end up with reactivated tasks getting timestamps after a
++ * vtime skip done because we needed a ->first_active entity on some
++ * intermediate node.
++ */
++static void bfq_update_vtime(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entry;
++ struct rb_node *node = st->active.rb_node;
++
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entry->min_start, st->vtime)) {
++ st->vtime = entry->min_start;
++ bfq_forget_idle(st);
++ }
++}
++
++/**
++ * bfq_first_active - find the eligible entity with the smallest finish time
++ * @st: the service tree to select from.
++ *
++ * This function searches the first schedulable entity, starting from the
++ * root of the tree and going on the left every time on this side there is
++ * a subtree with at least one eligible (start >= vtime) entity. The path
++ * on the right is followed only if a) the left subtree contains no eligible
++ * entities and b) no eligible entity has been found yet.
++ */
++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entry, *first = NULL;
++ struct rb_node *node = st->active.rb_node;
++
++ while (node != NULL) {
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++left:
++ if (!bfq_gt(entry->start, st->vtime))
++ first = entry;
++
++ BUG_ON(bfq_gt(entry->min_start, st->vtime));
++
++ if (node->rb_left != NULL) {
++ entry = rb_entry(node->rb_left,
++ struct bfq_entity, rb_node);
++ if (!bfq_gt(entry->min_start, st->vtime)) {
++ node = node->rb_left;
++ goto left;
++ }
++ }
++ if (first != NULL)
++ break;
++ node = node->rb_right;
++ }
++
++ BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
++ return first;
++}
++
++/**
++ * __bfq_lookup_next_entity - return the first eligible entity in @st.
++ * @st: the service tree.
++ *
++ * Update the virtual time in @st and return the first eligible entity
++ * it contains.
++ */
++static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
++ bool force)
++{
++ struct bfq_entity *entity, *new_next_active = NULL;
++
++ if (RB_EMPTY_ROOT(&st->active))
++ return NULL;
++
++ bfq_update_vtime(st);
++ entity = bfq_first_active_entity(st);
++ BUG_ON(bfq_gt(entity->start, st->vtime));
++
++ /*
++ * If the chosen entity does not match with the sched_data's
++ * next_active and we are forcedly serving the IDLE priority
++ * class tree, bubble up budget update.
++ */
++ if (unlikely(force && entity != entity->sched_data->next_active)) {
++ new_next_active = entity;
++ for_each_entity(new_next_active)
++ bfq_update_budget(new_next_active);
++ }
++
++ return entity;
++}
++
++/**
++ * bfq_lookup_next_entity - return the first eligible entity in @sd.
++ * @sd: the sched_data.
++ * @extract: if true the returned entity will be also extracted from @sd.
++ *
++ * NOTE: since we cache the next_active entity at each level of the
++ * hierarchy, the complexity of the lookup can be decreased with
++ * absolutely no effort just returning the cached next_active value;
++ * we prefer to do full lookups to test the consistency of * the data
++ * structures.
++ */
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ int extract,
++ struct bfq_data *bfqd)
++{
++ struct bfq_service_tree *st = sd->service_tree;
++ struct bfq_entity *entity;
++ int i=0;
++
++ BUG_ON(sd->active_entity != NULL);
++
++ if (bfqd != NULL &&
++ jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
++ entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1, true);
++ if (entity != NULL) {
++ i = BFQ_IOPRIO_CLASSES - 1;
++ bfqd->bfq_class_idle_last_service = jiffies;
++ sd->next_active = entity;
++ }
++ }
++ for (; i < BFQ_IOPRIO_CLASSES; i++) {
++ entity = __bfq_lookup_next_entity(st + i, false);
++ if (entity != NULL) {
++ if (extract) {
++ bfq_check_next_active(sd, entity);
++ bfq_active_extract(st + i, entity);
++ sd->active_entity = entity;
++ sd->next_active = NULL;
++ }
++ break;
++ }
++ }
++
++ return entity;
++}
++
++/*
++ * Get next queue for service.
++ */
++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
++{
++ struct bfq_entity *entity = NULL;
++ struct bfq_sched_data *sd;
++ struct bfq_queue *bfqq;
++
++ BUG_ON(bfqd->active_queue != NULL);
++
++ if (bfqd->busy_queues == 0)
++ return NULL;
++
++ sd = &bfqd->root_group->sched_data;
++ for (; sd != NULL; sd = entity->my_sched_data) {
++ entity = bfq_lookup_next_entity(sd, 1, bfqd);
++ BUG_ON(entity == NULL);
++ entity->service = 0;
++ }
++
++ bfqq = bfq_entity_to_bfqq(entity);
++ BUG_ON(bfqq == NULL);
++
++ return bfqq;
++}
++
++/*
++ * Forced extraction of the given queue.
++ */
++static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity;
++ struct bfq_sched_data *sd;
++
++ BUG_ON(bfqd->active_queue != NULL);
++
++ entity = &bfqq->entity;
++ /*
++ * Bubble up extraction/update from the leaf to the root.
++ */
++ for_each_entity(entity) {
++ sd = entity->sched_data;
++ bfq_update_budget(entity);
++ bfq_update_vtime(bfq_entity_service_tree(entity));
++ bfq_active_extract(bfq_entity_service_tree(entity), entity);
++ sd->active_entity = entity;
++ sd->next_active = NULL;
++ entity->service = 0;
++ }
++
++ return;
++}
++
++static void __bfq_bfqd_reset_active(struct bfq_data *bfqd)
++{
++ if (bfqd->active_bic != NULL) {
++ put_io_context(bfqd->active_bic->icq.ioc);
++ bfqd->active_bic = NULL;
++ }
++
++ bfqd->active_queue = NULL;
++ del_timer(&bfqd->idle_slice_timer);
++}
++
++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int requeue)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfqq == bfqd->active_queue)
++ __bfq_bfqd_reset_active(bfqd);
++
++ bfq_deactivate_entity(entity, requeue);
++}
++
++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_activate_entity(entity);
++}
++
++/*
++ * Called when the bfqq no longer has requests pending, remove it from
++ * the service tree.
++ */
++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int requeue)
++{
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ bfq_log_bfqq(bfqd, bfqq, "del from busy");
++
++ bfq_clear_bfqq_busy(bfqq);
++
++ BUG_ON(bfqd->busy_queues == 0);
++ bfqd->busy_queues--;
++
++ bfq_deactivate_bfqq(bfqd, bfqq, requeue);
++}
++
++/*
++ * Called when an inactive queue receives a new request.
++ */
++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqq == bfqd->active_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "add to busy");
++
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ bfq_mark_bfqq_busy(bfqq);
++ bfqd->busy_queues++;
++}
+diff --git a/block/bfq.h b/block/bfq.h
+new file mode 100644
+index 0000000..48ecde9
+--- /dev/null
++++ b/block/bfq.h
+@@ -0,0 +1,603 @@
++/*
++ * BFQ-v6r2 for 3.10.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/ioprio.h>
++#include <linux/rbtree.h>
++
++#define BFQ_IOPRIO_CLASSES 3
++#define BFQ_CL_IDLE_TIMEOUT HZ/5
++
++#define BFQ_MIN_WEIGHT 1
++#define BFQ_MAX_WEIGHT 1000
++
++#define BFQ_DEFAULT_GRP_WEIGHT 10
++#define BFQ_DEFAULT_GRP_IOPRIO 0
++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ * @active: tree for active entities (i.e., those backlogged).
++ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
++ * @first_idle: idle entity with minimum F_i.
++ * @last_idle: idle entity with maximum F_i.
++ * @vtime: scheduler virtual time.
++ * @wsum: scheduler weight sum; active and idle entities contribute to it.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree. All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++ struct rb_root active;
++ struct rb_root idle;
++
++ struct bfq_entity *first_idle;
++ struct bfq_entity *last_idle;
++
++ u64 vtime;
++ unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ * @active_entity: entity under service.
++ * @next_active: head-of-the-line entity in the scheduler.
++ * @service_tree: array of service trees, one per ioprio_class.
++ *
++ * bfq_sched_data is the basic scheduler queue. It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as
++ * an intermediate queue on a hierarchical setup.
++ * @next_active points to the active entity of the sched_data service
++ * trees that will be scheduled next.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_sched_data {
++ struct bfq_entity *active_entity;
++ struct bfq_entity *next_active;
++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ * @rb_node: service_tree member.
++ * @on_st: flag, true if the entity is on a tree (either the active or
++ * the idle one of its service_tree).
++ * @finish: B-WF2Q+ finish timestamp (aka F_i).
++ * @start: B-WF2Q+ start timestamp (aka S_i).
++ * @tree: tree the entity is enqueued into; %NULL if not on a tree.
++ * @min_start: minimum start time of the (active) subtree rooted at
++ * this entity; used for O(log N) lookups into active trees.
++ * @service: service received during the last round of service.
++ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
++ * @weight: weight of the queue
++ * @parent: parent entity, for hierarchical scheduling.
++ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
++ * associated scheduler queue, %NULL on leaf nodes.
++ * @sched_data: the scheduler queue this entity belongs to.
++ * @ioprio: the ioprio in use.
++ * @new_weight: when a weight change is requested, the new weight value.
++ * @orig_weight: original weight, used to implement weight boosting
++ * @new_ioprio: when an ioprio change is requested, the new ioprio value.
++ * @ioprio_class: the ioprio_class in use.
++ * @new_ioprio_class: when an ioprio_class change is requested, the new
++ * ioprio_class value.
++ * @ioprio_changed: flag, true when the user requested a weight, ioprio or
++ * ioprio_class change.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy. Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now. Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @ioprio_changed flag. As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ. When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed. All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++ struct rb_node rb_node;
++
++ int on_st;
++
++ u64 finish;
++ u64 start;
++
++ struct rb_root *tree;
++
++ u64 min_start;
++
++ unsigned long service, budget;
++ unsigned short weight, new_weight;
++ unsigned short orig_weight;
++
++ struct bfq_entity *parent;
++
++ struct bfq_sched_data *my_sched_data;
++ struct bfq_sched_data *sched_data;
++
++ unsigned short ioprio, new_ioprio;
++ unsigned short ioprio_class, new_ioprio_class;
++
++ int ioprio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ * @ref: reference counter.
++ * @bfqd: parent bfq_data.
++ * @new_bfqq: shared bfq_queue if queue is cooperating with
++ * one or more other queues.
++ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
++ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
++ * @sort_list: sorted list of pending requests.
++ * @next_rq: if fifo isn't expired, next request to serve.
++ * @queued: nr of requests queued in @sort_list.
++ * @allocated: currently allocated requests.
++ * @meta_pending: pending metadata requests.
++ * @fifo: fifo list of requests in sort_list.
++ * @entity: entity representing this queue in the scheduler.
++ * @max_budget: maximum budget allowed from the feedback mechanism.
++ * @budget_timeout: budget expiration (in jiffies).
++ * @dispatched: number of requests on the dispatch list or inside driver.
++ * @org_ioprio: saved ioprio during boosted periods.
++ * @flags: status flags.
++ * @bfqq_list: node for active/idle bfqq list inside our bfqd.
++ * @seek_samples: number of seeks sampled
++ * @seek_total: sum of the distances of the seeks sampled
++ * @seek_mean: mean seek distance
++ * @last_request_pos: position of the last request enqueued
++ * @pid: pid of the process owning the queue, used for logging purposes.
++ * @last_rais_start_time: last (idle -> weight-raised) transition attempt
++ * @raising_cur_max_time: current max raising time for this queue
++ *
++ * A bfq_queue is a leaf request queue; it can be associated to an io_context
++ * or more (if it is an async one). @cgroup holds a reference to the
++ * cgroup, to be sure that it does not disappear while a bfqq still
++ * references it (mostly to avoid races between request issuing and task
++ * migration followed by cgroup distruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++ atomic_t ref;
++ struct bfq_data *bfqd;
++
++ /* fields for cooperating queues handling */
++ struct bfq_queue *new_bfqq;
++ struct rb_node pos_node;
++ struct rb_root *pos_root;
++
++ struct rb_root sort_list;
++ struct request *next_rq;
++ int queued[2];
++ int allocated[2];
++ int meta_pending;
++ struct list_head fifo;
++
++ struct bfq_entity entity;
++
++ unsigned long max_budget;
++ unsigned long budget_timeout;
++
++ int dispatched;
++
++ unsigned short org_ioprio;
++
++ unsigned int flags;
++
++ struct list_head bfqq_list;
++
++ unsigned int seek_samples;
++ u64 seek_total;
++ sector_t seek_mean;
++ sector_t last_request_pos;
++
++ pid_t pid;
++
++ /* weight-raising fields */
++ unsigned int raising_cur_max_time;
++ u64 last_rais_start_finish, soft_rt_next_start;
++ unsigned int raising_coeff;
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ * @ttime_total: total process thinktime
++ * @ttime_samples: number of thinktime samples
++ * @ttime_mean: average process thinktime
++ */
++struct bfq_ttime {
++ unsigned long last_end_request;
++
++ unsigned long ttime_total;
++ unsigned long ttime_samples;
++ unsigned long ttime_mean;
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ * @icq: associated io_cq structure
++ * @bfqq: array of two process queues, the sync and the async
++ * @ttime: associated @bfq_ttime struct
++ */
++struct bfq_io_cq {
++ struct io_cq icq; /* must be the first member */
++ struct bfq_queue *bfqq[2];
++ struct bfq_ttime ttime;
++ int ioprio;
++};
++
++/**
++ * struct bfq_data - per device data structure.
++ * @queue: request queue for the managed device.
++ * @root_group: root bfq_group for the device.
++ * @rq_pos_tree: rbtree sorted by next_request position,
++ * used when determining if two or more queues
++ * have interleaving requests (see bfq_close_cooperator).
++ * @busy_queues: number of bfq_queues containing requests (including the
++ * queue under service, even if it is idling).
++ * @queued: number of queued requests.
++ * @rq_in_driver: number of requests dispatched and waiting for completion.
++ * @sync_flight: number of sync requests in the driver.
++ * @max_rq_in_driver: max number of reqs in driver in the last @hw_tag_samples
++ * completed requests .
++ * @hw_tag_samples: nr of samples used to calculate hw_tag.
++ * @hw_tag: flag set to one if the driver is showing a queueing behavior.
++ * @budgets_assigned: number of budgets assigned.
++ * @idle_slice_timer: timer set when idling for the next sequential request
++ * from the queue under service.
++ * @unplug_work: delayed work to restart dispatching on the request queue.
++ * @active_queue: bfq_queue under service.
++ * @active_bic: bfq_io_cq (bic) associated with the @active_queue.
++ * @last_position: on-disk position of the last served request.
++ * @last_budget_start: beginning of the last budget.
++ * @last_idling_start: beginning of the last idle slice.
++ * @peak_rate: peak transfer rate observed for a budget.
++ * @peak_rate_samples: number of samples used to calculate @peak_rate.
++ * @bfq_max_budget: maximum budget allotted to a bfq_queue before rescheduling.
++ * @group_list: list of all the bfq_groups active on the device.
++ * @active_list: list of all the bfq_queues active on the device.
++ * @idle_list: list of all the bfq_queues idle on the device.
++ * @bfq_quantum: max number of requests dispatched per dispatch round.
++ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
++ * requests are served in fifo order.
++ * @bfq_back_penalty: weight of backward seeks wrt forward ones.
++ * @bfq_back_max: maximum allowed backward seek.
++ * @bfq_slice_idle: maximum idling time.
++ * @bfq_user_max_budget: user-configured max budget value (0 for auto-tuning).
++ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
++ * async queues.
++ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
++ * to prevent seeky queues to impose long latencies to well
++ * behaved ones (this also implies that seeky queues cannot
++ * receive guarantees in the service domain; after a timeout
++ * they are charged for the whole allocated budget, to try
++ * to preserve a behavior reasonably fair among them, but
++ * without service-domain guarantees).
++ * @bfq_raising_coeff: Maximum factor by which the weight of a boosted
++ * queue is multiplied
++ * @bfq_raising_max_time: maximum duration of a weight-raising period (jiffies)
++ * @bfq_raising_rt_max_time: maximum duration for soft real-time processes
++ * @bfq_raising_min_idle_time: minimum idle period after which weight-raising
++ * may be reactivated for a queue (in jiffies)
++ * @bfq_raising_min_inter_arr_async: minimum period between request arrivals
++ * after which weight-raising may be
++ * reactivated for an already busy queue
++ * (in jiffies)
++ * @bfq_raising_max_softrt_rate: max service-rate for a soft real-time queue,
++ * sectors per seconds
++ * @RT_prod: cached value of the product R*T used for computing the maximum
++ * duration of the weight raising automatically
++ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++ struct request_queue *queue;
++
++ struct bfq_group *root_group;
++
++ struct rb_root rq_pos_tree;
++
++ int busy_queues;
++ int queued;
++ int rq_in_driver;
++ int sync_flight;
++
++ int max_rq_in_driver;
++ int hw_tag_samples;
++ int hw_tag;
++
++ int budgets_assigned;
++
++ struct timer_list idle_slice_timer;
++ struct work_struct unplug_work;
++
++ struct bfq_queue *active_queue;
++ struct bfq_io_cq *active_bic;
++
++ sector_t last_position;
++
++ ktime_t last_budget_start;
++ ktime_t last_idling_start;
++ int peak_rate_samples;
++ u64 peak_rate;
++ unsigned long bfq_max_budget;
++
++ struct hlist_head group_list;
++ struct list_head active_list;
++ struct list_head idle_list;
++
++ unsigned int bfq_quantum;
++ unsigned int bfq_fifo_expire[2];
++ unsigned int bfq_back_penalty;
++ unsigned int bfq_back_max;
++ unsigned int bfq_slice_idle;
++ u64 bfq_class_idle_last_service;
++
++ unsigned int bfq_user_max_budget;
++ unsigned int bfq_max_budget_async_rq;
++ unsigned int bfq_timeout[2];
++
++ bool low_latency;
++
++ /* parameters of the low_latency heuristics */
++ unsigned int bfq_raising_coeff;
++ unsigned int bfq_raising_max_time;
++ unsigned int bfq_raising_rt_max_time;
++ unsigned int bfq_raising_min_idle_time;
++ unsigned int bfq_raising_min_inter_arr_async;
++ unsigned int bfq_raising_max_softrt_rate;
++ u64 RT_prod;
++
++ struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++ BFQ_BFQQ_FLAG_busy = 0, /* has requests or is under service */
++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
++ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
++ BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
++ BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
++ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */
++ BFQ_BFQQ_FLAG_some_coop_idle, /* some cooperator is inactive */
++};
++
++#define BFQ_BFQQ_FNS(name) \
++static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
++{ \
++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
++}
++
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(prio_changed);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(budget_new);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(some_coop_idle);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
++
++#define bfq_log(bfqd, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++ BFQ_BFQQ_TOO_IDLE = 0, /* queue has been idling for too long */
++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
++};
++
++#ifdef CONFIG_CGROUP_BFQIO
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ * both bfq_queues and bfq_groups).
++ * @group_node: node to be inserted into the bfqio_cgroup->group_data
++ * list of the containing cgroup's bfqio_cgroup.
++ * @bfqd_node: node to be inserted into the @bfqd->group_list list
++ * of the groups active on the same device; used for cleanup.
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ * the group, one queue per ioprio value per ioprio_class,
++ * except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ * to avoid too many special cases during group creation/migration.
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ * o @group_node is protected by the bfqio_cgroup lock, and is accessed
++ * via RCU from its readers.
++ * o @bfqd is protected by the queue lock, RCU is used to access it
++ * from the readers.
++ * o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++ struct bfq_entity entity;
++ struct bfq_sched_data sched_data;
++
++ struct hlist_node group_node;
++ struct hlist_node bfqd_node;
++
++ void *bfqd;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct bfq_entity *my_entity;
++};
++
++/**
++ * struct bfqio_cgroup - bfq cgroup data structure.
++ * @css: subsystem state for bfq in the containing cgroup.
++ * @weight: cgroup weight.
++ * @ioprio: cgroup ioprio.
++ * @ioprio_class: cgroup ioprio_class.
++ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
++ * @group_data: list containing the bfq_group belonging to this cgroup.
++ *
++ * @group_data is accessed using RCU, with @lock protecting the updates,
++ * @ioprio and @ioprio_class are protected by @lock.
++ */
++struct bfqio_cgroup {
++ struct cgroup_subsys_state css;
++
++ unsigned short weight, ioprio, ioprio_class;
++
++ spinlock_t lock;
++ struct hlist_head group_data;
++};
++#else
++struct bfq_group {
++ struct bfq_sched_data sched_data;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++};
++#endif
++
++static inline struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sched_data = entity->sched_data;
++ unsigned int idx = entity->ioprio_class - 1;
++
++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++ BUG_ON(sched_data == NULL);
++
++ return sched_data->service_tree + idx;
++}
++
++static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic,
++ int is_sync)
++{
++ return bic->bfqq[!!is_sync];
++}
++
++static inline void bic_set_bfqq(struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, int is_sync)
++{
++ bic->bfqq[!!is_sync] = bfqq;
++}
++
++static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++ return bic->icq.q->elevator->elevator_data;
++}
++
++/**
++ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
++ * @ptr: a pointer to a bfqd.
++ * @flags: storage for the flags to be saved.
++ *
++ * This function allows bfqg->bfqd to be protected by the
++ * queue lock of the bfqd they reference; the pointer is dereferenced
++ * under RCU, so the storage for bfqd is assured to be safe as long
++ * as the RCU read side critical section does not end. After the
++ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
++ * sure that no other writer accessed it. If we raced with a writer,
++ * the function returns NULL, with the queue unlocked, otherwise it
++ * returns the dereferenced pointer, with the queue locked.
++ */
++static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
++ unsigned long *flags)
++{
++ struct bfq_data *bfqd;
++
++ rcu_read_lock();
++ bfqd = rcu_dereference(*(struct bfq_data **)ptr);
++
++ if (bfqd != NULL) {
++ spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
++ if (*ptr == bfqd)
++ goto out;
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++ }
++
++ bfqd = NULL;
++out:
++ rcu_read_unlock();
++ return bfqd;
++}
++
++static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
++ unsigned long *flags)
++{
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg, int is_sync,
++ struct bfq_io_cq *bic, gfp_t gfp_mask);
++static void bfq_end_raising_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg);
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++#endif
+--
+1.8.1.4
+
diff --git a/5000_BFQ-3-block-add-Early-Queue-Merge-EQM-v6r2-for-3.11.0.patch1 b/5000_BFQ-3-block-add-Early-Queue-Merge-EQM-v6r2-for-3.11.0.patch1
new file mode 100644
index 00000000..17d787c6
--- /dev/null
+++ b/5000_BFQ-3-block-add-Early-Queue-Merge-EQM-v6r2-for-3.11.0.patch1
@@ -0,0 +1,1049 @@
+From 9acaa783ecab69925d38c6aca7252ff565a093d0 Mon Sep 17 00:00:00 2001
+From: Mauro Andreolini <mauro.andreolini@unimore.it>
+Date: Fri, 14 Jun 2013 13:46:47 +0200
+Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v6r2 for
+ 3.11.0
+
+A set of processes may happen to perform interleaved reads, i.e., requests
+whose union would give rise to a sequential read pattern. There are two
+typical cases: in the first case, processes read fixed-size chunks of
+data at a fixed distance from each other, while in the second case processes
+may read variable-size chunks at variable distances. The latter case occurs
+for example with KVM, which splits the I/O generated by the guest into
+multiple chunks, and lets these chunks be served by a pool of cooperating
+processes, iteratively assigning the next chunk of I/O to the first
+available process. CFQ uses actual queue merging for the first type of
+processes, whereas it uses preemption to get a sequential read pattern out
+of the read requests performed by the second type of processes. In the end
+it uses two different mechanisms to achieve the same goal: boosting the
+throughput with interleaved I/O.
+
+This patch introduces Early Queue Merge (EQM), a unified mechanism to get a
+sequential read pattern with both types of processes. The main idea is
+checking newly arrived requests against the next request of the active queue
+both in case of actual request insert and in case of request merge. By doing
+so, both the types of processes can be handled by just merging their queues.
+EQM is then simpler and more compact than the pair of mechanisms used in
+CFQ.
+
+Finally, EQM also preserves the typical low-latency properties of BFQ, by
+properly restoring the weight-raising state of a queue when it gets back to
+a non-merged state.
+
+Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+Reviewed-by: Paolo Valente <paolo.valente@unimore.it>
+---
+ block/bfq-iosched.c | 653 ++++++++++++++++++++++++++++++++++++----------------
+ block/bfq-sched.c | 28 ---
+ block/bfq.h | 16 ++
+ 3 files changed, 466 insertions(+), 231 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 0ed2746..bbe79fb 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -444,6 +444,43 @@ static inline unsigned int bfq_wrais_duration(struct bfq_data *bfqd)
+ return dur;
+ }
+
++static inline void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++ if (bic->saved_idle_window)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++ if (bic->raising_time_left && bfqq->bfqd->low_latency) {
++ /*
++ * Start a weight raising period with the duration given by
++ * the raising_time_left snapshot.
++ */
++ bfqq->raising_coeff = bfqq->bfqd->bfq_raising_coeff;
++ bfqq->raising_cur_max_time = bic->raising_time_left;
++ bfqq->last_rais_start_finish = jiffies;
++ }
++ /*
++ * Clear raising_time_left to prevent bfq_bfqq_save_state() from
++ * getting confused about the queue's need of a weight-raising
++ * period.
++ */
++ bic->raising_time_left = 0;
++}
++
++/*
++ * Must be called with the queue_lock held.
++ */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
+ static void bfq_add_rq_rb(struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+@@ -483,11 +520,20 @@ static void bfq_add_rq_rb(struct request *rq)
+ if (! bfqd->low_latency)
+ goto add_bfqq_busy;
+
++ if (bfq_bfqq_just_split(bfqq))
++ goto set_ioprio_changed;
++
+ /*
+- * If the queue is not being boosted and has been idle
+- * for enough time, start a weight-raising period
++ * If the queue:
++ * - is not being boosted,
++ * - has been idle for enough time,
++ * - is not a sync queue or is linked to a bfq_io_cq (it is
++ * shared "for its nature" or it is not shared and its
++ * requests have not been redirected to a shared queue)
++ * start a weight-raising period.
+ */
+- if(old_raising_coeff == 1 && (idle_for_long_time || soft_rt)) {
++ if(old_raising_coeff == 1 && (idle_for_long_time || soft_rt) &&
++ (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
+ bfqq->raising_coeff = bfqd->bfq_raising_coeff;
+ if (idle_for_long_time)
+ bfqq->raising_cur_max_time =
+@@ -517,6 +563,7 @@ static void bfq_add_rq_rb(struct request *rq)
+ raising_cur_max_time));
+ }
+ }
++set_ioprio_changed:
+ if (old_raising_coeff != bfqq->raising_coeff)
+ entity->ioprio_changed = 1;
+ add_bfqq_busy:
+@@ -695,89 +742,35 @@ static void bfq_end_raising(struct bfq_data *bfqd)
+ spin_unlock_irq(bfqd->queue->queue_lock);
+ }
+
+-static int bfq_allow_merge(struct request_queue *q, struct request *rq,
+- struct bio *bio)
++static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
+ {
+- struct bfq_data *bfqd = q->elevator->elevator_data;
+- struct bfq_io_cq *bic;
+- struct bfq_queue *bfqq;
+-
+- /*
+- * Disallow merge of a sync bio into an async request.
+- */
+- if (bfq_bio_sync(bio) && !rq_is_sync(rq))
+- return 0;
+-
+- /*
+- * Lookup the bfqq that this bio will be queued with. Allow
+- * merge only if rq is queued there.
+- * Queue lock is held here.
+- */
+- bic = bfq_bic_lookup(bfqd, current->io_context);
+- if (bic == NULL)
+- return 0;
+-
+- bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
+- return bfqq == RQ_BFQQ(rq);
+-}
+-
+-static void __bfq_set_active_queue(struct bfq_data *bfqd,
+- struct bfq_queue *bfqq)
+-{
+- if (bfqq != NULL) {
+- bfq_mark_bfqq_must_alloc(bfqq);
+- bfq_mark_bfqq_budget_new(bfqq);
+- bfq_clear_bfqq_fifo_expire(bfqq);
+-
+- bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
+-
+- bfq_log_bfqq(bfqd, bfqq, "set_active_queue, cur-budget = %lu",
+- bfqq->entity.budget);
+- }
+-
+- bfqd->active_queue = bfqq;
+-}
+-
+-/*
+- * Get and set a new active queue for service.
+- */
+-static struct bfq_queue *bfq_set_active_queue(struct bfq_data *bfqd,
+- struct bfq_queue *bfqq)
+-{
+- if (!bfqq)
+- bfqq = bfq_get_next_queue(bfqd);
++ if (request)
++ return blk_rq_pos(io_struct);
+ else
+- bfq_get_next_queue_forced(bfqd, bfqq);
+-
+- __bfq_set_active_queue(bfqd, bfqq);
+- return bfqq;
++ return ((struct bio *)io_struct)->bi_sector;
+ }
+
+-static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
+- struct request *rq)
++static inline sector_t bfq_dist_from(sector_t pos1,
++ sector_t pos2)
+ {
+- if (blk_rq_pos(rq) >= bfqd->last_position)
+- return blk_rq_pos(rq) - bfqd->last_position;
++ if (pos1 >= pos2)
++ return pos1 - pos2;
+ else
+- return bfqd->last_position - blk_rq_pos(rq);
++ return pos2 - pos1;
+ }
+
+-/*
+- * Return true if bfqq has no request pending and rq is close enough to
+- * bfqd->last_position, or if rq is closer to bfqd->last_position than
+- * bfqq->next_rq
+- */
+-static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
++static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
++ sector_t sector)
+ {
+- return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
++ return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
++ BFQQ_SEEK_THR;
+ }
+
+-static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
+ {
+ struct rb_root *root = &bfqd->rq_pos_tree;
+ struct rb_node *parent, *node;
+ struct bfq_queue *__bfqq;
+- sector_t sector = bfqd->last_position;
+
+ if (RB_EMPTY_ROOT(root))
+ return NULL;
+@@ -796,7 +789,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ * position).
+ */
+ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
+- if (bfq_rq_close(bfqd, __bfqq->next_rq))
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
+ return __bfqq;
+
+ if (blk_rq_pos(__bfqq->next_rq) < sector)
+@@ -807,7 +800,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ return NULL;
+
+ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
+- if (bfq_rq_close(bfqd, __bfqq->next_rq))
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
+ return __bfqq;
+
+ return NULL;
+@@ -816,14 +809,12 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ /*
+ * bfqd - obvious
+ * cur_bfqq - passed in so that we don't decide that the current queue
+- * is closely cooperating with itself.
+- *
+- * We are assuming that cur_bfqq has dispatched at least one request,
+- * and that bfqd->last_position reflects a position on the disk associated
+- * with the I/O issued by cur_bfqq.
++ * is closely cooperating with itself
++ * sector - used as a reference point to search for a close queue
+ */
+ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+- struct bfq_queue *cur_bfqq)
++ struct bfq_queue *cur_bfqq,
++ sector_t sector)
+ {
+ struct bfq_queue *bfqq;
+
+@@ -843,7 +834,7 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+ * working closely on the same area of the disk. In that case,
+ * we can group them together and don't waste time idling.
+ */
+- bfqq = bfqq_close(bfqd);
++ bfqq = bfqq_close(bfqd, sector);
+ if (bfqq == NULL || bfqq == cur_bfqq)
+ return NULL;
+
+@@ -870,6 +861,275 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+ return bfqq;
+ }
+
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return NULL;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return NULL;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++
++ /*
++ * Merging is just a redirection: the requests of the process owning
++ * one of the two queues are redirected to the other queue. The latter
++ * queue, in its turn, is set as shared if this is the first time that
++ * the requests of some process are redirected to it.
++ *
++ * We redirect bfqq to new_bfqq and not the opposite, because we
++ * are in the context of the process owning bfqq, hence we have the
++ * io_cq of this process. So we can immediately configure this io_cq
++ * to redirect the requests of the process to new_bfqq.
++ *
++ * NOTE, even if new_bfqq coincides with the active queue, the io_cq of
++ * new_bfqq is not available, because, if the active queue is shared,
++ * bfqd->active_bic may not point to the io_cq of the active queue.
++ * Redirecting the requests of the process owning bfqq to the currently
++ * active queue is in any case the best option, as we feed the active queue
++ * with new requests close to the last request served and, by doing so,
++ * hopefully increase the throughput.
++ */
++ bfqq->new_bfqq = new_bfqq;
++ atomic_add(process_refs, &new_bfqq->ref);
++ return new_bfqq;
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently active queue or
++ * with a close queue among the scheduled queues.
++ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ void *io_struct, bool request)
++{
++ struct bfq_queue *active_bfqq, *new_bfqq;
++
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
++ if (!io_struct)
++ return NULL;
++
++ active_bfqq = bfqd->active_queue;
++
++ if (active_bfqq == NULL || active_bfqq == bfqq || !bfqd->active_bic)
++ goto check_scheduled;
++
++ if (bfq_class_idle(active_bfqq) || bfq_class_idle(bfqq))
++ goto check_scheduled;
++
++ if (bfq_class_rt(active_bfqq) != bfq_class_rt(bfqq))
++ goto check_scheduled;
++
++ if (active_bfqq->entity.parent != bfqq->entity.parent)
++ goto check_scheduled;
++
++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ bfq_bfqq_sync(active_bfqq) && bfq_bfqq_sync(bfqq))
++ if ((new_bfqq = bfq_setup_merge(bfqq, active_bfqq)))
++ return new_bfqq; /* Merge with the active queue */
++
++ /*
++ * Check whether there is a cooperator among currently scheduled
++ * queues. The only thing we need is that the bio/request is not
++ * NULL, as we need it to establish whether a cooperator exists.
++ */
++check_scheduled:
++ new_bfqq = bfq_close_cooperator(bfqd, bfqq,
++ bfq_io_struct_pos(io_struct, request));
++ if (new_bfqq)
++ return bfq_setup_merge(bfqq, new_bfqq);
++
++ return NULL;
++}
++
++static inline void
++bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic == NULL, the queue is already shared or its requests
++ * have already been redirected to a shared queue; both idle window
++ * and weight raising state have already been saved. Do nothing.
++ */
++ if (bfqq->bic == NULL)
++ return;
++ if (bfqq->bic->raising_time_left)
++ /*
++ * This is the queue of a just-started process, and would
++ * deserve weight raising: we set raising_time_left to the full
++ * weight-raising duration to trigger weight-raising when and
++ * if the queue is split and the first request of the queue
++ * is enqueued.
++ */
++ bfqq->bic->raising_time_left = bfq_wrais_duration(bfqq->bfqd);
++ else if (bfqq->raising_coeff > 1) {
++ unsigned long wrais_duration =
++ jiffies - bfqq->last_rais_start_finish;
++ /*
++ * It may happen that a queue's weight raising period lasts
++ * longer than its raising_cur_max_time, as weight raising is
++ * handled only when a request is enqueued or dispatched (it
++ * does not use any timer). If the weight raising period is
++ * about to end, don't save it.
++ */
++ if (bfqq->raising_cur_max_time <= wrais_duration)
++ bfqq->bic->raising_time_left = 0;
++ else
++ bfqq->bic->raising_time_left =
++ bfqq->raising_cur_max_time - wrais_duration;
++ /*
++ * The bfq_queue is becoming shared or the requests of the
++ * process owning the queue are being redirected to a shared
++ * queue. Stop the weight raising period of the queue, as in
++ * both cases it should not be owned by an interactive or soft
++ * real-time application.
++ */
++ bfq_bfqq_end_raising(bfqq);
++ } else
++ bfqq->bic->raising_time_left = 0;
++ bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++}
++
++static inline void
++bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic has a non-NULL value, the bic to which it belongs
++ * is about to begin using a shared bfq_queue.
++ */
++ if (bfqq->bic)
++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (long unsigned)new_bfqq->pid);
++ /* Save weight raising and idle window of the merged queues */
++ bfq_bfqq_save_state(bfqq);
++ bfq_bfqq_save_state(new_bfqq);
++ /*
++ * Grab a reference to the bic, to prevent it from being destroyed
++ * before being possibly touched by a bfq_split_bfqq().
++ */
++ bfq_get_bic_reference(bfqq);
++ bfq_get_bic_reference(new_bfqq);
++ /* Merge queues (that is, let bic redirect its requests to new_bfqq) */
++ bic_set_bfqq(bic, new_bfqq, 1);
++ bfq_mark_bfqq_coop(new_bfqq);
++ /*
++ * new_bfqq now belongs to at least two bics (it is a shared queue): set
++ * new_bfqq->bic to NULL. bfqq either:
++ * - does not belong to any bic any more, and hence bfqq->bic must
++ * be set to NULL, or
++ * - is a queue whose owning bics have already been redirected to a
++ * different queue, hence the queue is destined to not belong to any
++ * bic soon and bfqq->bic is already NULL (therefore the next
++ * assignment causes no harm).
++ */
++ new_bfqq->bic = NULL;
++ bfqq->bic = NULL;
++ bfq_put_queue(bfqq);
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq, *new_bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++ return 0;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (bic == NULL)
++ return 0;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ /*
++ * We take advantage of this function to perform an early merge
++ * of the queues of possible cooperating processes.
++ */
++ if (bfqq != NULL &&
++ (new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false))) {
++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++ /*
++ * If we get here, the bio will be queued in the shared queue,
++ * i.e., new_bfqq, so use new_bfqq to decide whether bio and
++ * rq can be merged.
++ */
++ bfqq = new_bfqq;
++ }
++
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_active_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq != NULL) {
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_mark_bfqq_budget_new(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ bfq_log_bfqq(bfqd, bfqq, "set_active_queue, cur-budget = %lu",
++ bfqq->entity.budget);
++ }
++
++ bfqd->active_queue = bfqq;
++}
++
++/*
++ * Get and set a new active queue for service.
++ */
++static struct bfq_queue *bfq_set_active_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++ __bfq_set_active_queue(bfqd, bfqq);
++ return bfqq;
++}
++
+ /*
+ * If enough samples have been computed, return the current max budget
+ * stored in bfqd, which is dynamically updated according to the
+@@ -1017,63 +1277,6 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
+ return rq;
+ }
+
+-/*
+- * Must be called with the queue_lock held.
+- */
+-static int bfqq_process_refs(struct bfq_queue *bfqq)
+-{
+- int process_refs, io_refs;
+-
+- io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
+- process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
+- BUG_ON(process_refs < 0);
+- return process_refs;
+-}
+-
+-static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+-{
+- int process_refs, new_process_refs;
+- struct bfq_queue *__bfqq;
+-
+- /*
+- * If there are no process references on the new_bfqq, then it is
+- * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
+- * may have dropped their last reference (not just their last process
+- * reference).
+- */
+- if (!bfqq_process_refs(new_bfqq))
+- return;
+-
+- /* Avoid a circular list and skip interim queue merges. */
+- while ((__bfqq = new_bfqq->new_bfqq)) {
+- if (__bfqq == bfqq)
+- return;
+- new_bfqq = __bfqq;
+- }
+-
+- process_refs = bfqq_process_refs(bfqq);
+- new_process_refs = bfqq_process_refs(new_bfqq);
+- /*
+- * If the process for the bfqq has gone away, there is no
+- * sense in merging the queues.
+- */
+- if (process_refs == 0 || new_process_refs == 0)
+- return;
+-
+- /*
+- * Merge in the direction of the lesser amount of work.
+- */
+- if (new_process_refs >= process_refs) {
+- bfqq->new_bfqq = new_bfqq;
+- atomic_add(process_refs, &new_bfqq->ref);
+- } else {
+- new_bfqq->new_bfqq = bfqq;
+- atomic_add(new_process_refs, &bfqq->ref);
+- }
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
+- new_bfqq->pid);
+-}
+-
+ static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
+ {
+ struct bfq_entity *entity = &bfqq->entity;
+@@ -1493,6 +1696,14 @@ static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
+ * is likely to boost the disk throughput);
+ * - the queue is weight-raised (waiting for the request is necessary for
+ * providing the queue with fairness and latency guarantees).
++ *
++ * In any case, idling can be disabled for cooperation issues, if
++ * 1) there is a close cooperator for the queue, or
++ * 2) the queue is shared and some cooperator is likely to be idle (in this
++ * case, by not arming the idle timer, we try to slow down the queue, to
++ * prevent the zones of the disk accessed by the active cooperators to
++ * become too distant from the zone that will be accessed by the currently
++ * idle cooperators).
+ */
+ static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq,
+ int budg_timeout)
+@@ -1507,7 +1718,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq,
+ (bfqd->rq_in_driver == 0 ||
+ budg_timeout ||
+ bfqq->raising_coeff > 1) &&
+- !bfq_close_cooperator(bfqd, bfqq) &&
++ !bfq_close_cooperator(bfqd, bfqq, bfqd->last_position) &&
+ (!bfq_bfqq_coop(bfqq) ||
+ !bfq_bfqq_some_coop_idle(bfqq)) &&
+ !bfq_queue_nonrot_noidle(bfqd, bfqq));
+@@ -1519,7 +1730,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq,
+ */
+ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ {
+- struct bfq_queue *bfqq, *new_bfqq = NULL;
++ struct bfq_queue *bfqq;
+ struct request *next_rq;
+ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
+ int budg_timeout;
+@@ -1530,17 +1741,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+
+ bfq_log_bfqq(bfqd, bfqq, "select_queue: already active queue");
+
+- /*
+- * If another queue has a request waiting within our mean seek
+- * distance, let it run. The expire code will check for close
+- * cooperators and put the close queue at the front of the
+- * service tree. If possible, merge the expiring queue with the
+- * new bfqq.
+- */
+- new_bfqq = bfq_close_cooperator(bfqd, bfqq);
+- if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
+- bfq_setup_merge(bfqq, new_bfqq);
+-
+ budg_timeout = bfq_may_expire_for_budg_timeout(bfqq);
+ if (budg_timeout &&
+ !bfq_bfqq_must_idle(bfqq, budg_timeout))
+@@ -1577,10 +1777,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ bfq_clear_bfqq_wait_request(bfqq);
+ del_timer(&bfqd->idle_slice_timer);
+ }
+- if (new_bfqq == NULL)
+- goto keep_queue;
+- else
+- goto expire;
++ goto keep_queue;
+ }
+ }
+
+@@ -1589,26 +1786,19 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ * queue still has requests in flight or is idling for a new request,
+ * then keep it.
+ */
+- if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
++ if (timer_pending(&bfqd->idle_slice_timer) ||
+ (bfqq->dispatched != 0 &&
+ (bfq_bfqq_idle_window(bfqq) || bfqq->raising_coeff > 1) &&
+- !bfq_queue_nonrot_noidle(bfqd, bfqq)))) {
++ !bfq_queue_nonrot_noidle(bfqd, bfqq))) {
+ bfqq = NULL;
+ goto keep_queue;
+- } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
+- /*
+- * Expiring the queue because there is a close cooperator,
+- * cancel timer.
+- */
+- bfq_clear_bfqq_wait_request(bfqq);
+- del_timer(&bfqd->idle_slice_timer);
+ }
+
+ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
+ expire:
+ bfq_bfqq_expire(bfqd, bfqq, 0, reason);
+ new_queue:
+- bfqq = bfq_set_active_queue(bfqd, new_bfqq);
++ bfqq = bfq_set_active_queue(bfqd);
+ bfq_log(bfqd, "select_queue: new queue %d returned",
+ bfqq != NULL ? bfqq->pid : 0);
+ keep_queue:
+@@ -1617,9 +1807,8 @@ keep_queue:
+
+ static void update_raising_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ {
++ struct bfq_entity *entity = &bfqq->entity;
+ if (bfqq->raising_coeff > 1) { /* queue is being boosted */
+- struct bfq_entity *entity = &bfqq->entity;
+-
+ bfq_log_bfqq(bfqd, bfqq,
+ "raising period dur %u/%u msec, "
+ "old raising coeff %u, w %d(%d)",
+@@ -1656,12 +1845,14 @@ static void update_raising_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ jiffies_to_msecs(bfqq->
+ raising_cur_max_time));
+ bfq_bfqq_end_raising(bfqq);
+- __bfq_entity_update_weight_prio(
+- bfq_entity_service_tree(entity),
+- entity);
+ }
+ }
+ }
++ /* Update weight both if it must be raised and if it must be lowered */
++ if ((entity->weight > entity->orig_weight) != (bfqq->raising_coeff > 1))
++ __bfq_entity_update_weight_prio(
++ bfq_entity_service_tree(entity),
++ entity);
+ }
+
+ /*
+@@ -1901,6 +2092,25 @@ static void bfq_init_icq(struct io_cq *icq)
+ struct bfq_io_cq *bic = icq_to_bic(icq);
+
+ bic->ttime.last_end_request = jiffies;
++ /*
++ * A newly created bic indicates that the process has just
++ * started doing I/O, and is probably mapping into memory its
++ * executable and libraries: it definitely needs weight raising.
++ * There is however the possibility that the process performs,
++ * for a while, I/O close to some other process. EQM intercepts
++ * this behavior and may merge the queue corresponding to the
++ * process with some other queue, BEFORE the weight of the queue
++ * is raised. Merged queues are not weight-raised (they are assumed
++ * to belong to processes that benefit only from high throughput).
++ * If the merge is basically the consequence of an accident, then
++ * the queue will be split soon and will get back its old weight.
++ * It is then important to write down somewhere that this queue
++ * does need weight raising, even if it did not make it to get its
++ * weight raised before being merged. To this purpose, we overload
++ * the field raising_time_left and assign 1 to it, to mark the queue
++ * as needing weight raising.
++ */
++ bic->raising_time_left = 1;
+ }
+
+ static void bfq_exit_icq(struct io_cq *icq)
+@@ -1914,6 +2124,13 @@ static void bfq_exit_icq(struct io_cq *icq)
+ }
+
+ if (bic->bfqq[BLK_RW_SYNC]) {
++ /*
++ * If the bic is using a shared queue, put the reference
++ * taken on the io_context when the bic started using a
++ * shared bfq_queue.
++ */
++ if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
++ put_io_context(icq->ioc);
+ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
+ bic->bfqq[BLK_RW_SYNC] = NULL;
+ }
+@@ -2211,6 +2428,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
+ return;
+
++ /* Idle window just restored, statistics are meaningless. */
++ if (bfq_bfqq_just_split(bfqq))
++ return;
++
+ enable_idle = bfq_bfqq_idle_window(bfqq);
+
+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
+@@ -2251,6 +2472,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
+ !BFQQ_SEEKY(bfqq))
+ bfq_update_idle_window(bfqd, bfqq, bic);
++ bfq_clear_bfqq_just_split(bfqq);
+
+ bfq_log_bfqq(bfqd, bfqq,
+ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
+@@ -2302,13 +2524,45 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
+
+ assert_spin_locked(bfqd->queue->queue_lock);
++
++ /*
++ * An unplug may trigger a requeue of a request from the device
++ * driver: make sure we are in process context while trying to
++ * merge two bfq_queues.
++ */
++ if (!in_interrupt() &&
++ (new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true))) {
++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++ /*
++ * Release the request's reference to the old bfqq
++ * and make sure one is taken to the shared queue.
++ */
++ new_bfqq->allocated[rq_data_dir(rq)]++;
++ bfqq->allocated[rq_data_dir(rq)]--;
++ atomic_inc(&new_bfqq->ref);
++ bfq_put_queue(bfqq);
++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq, new_bfqq);
++ rq->elv.priv[1] = new_bfqq;
++ bfqq = new_bfqq;
++ }
++
+ bfq_init_prio_data(bfqq, RQ_BIC(rq));
+
+ bfq_add_rq_rb(rq);
+
++ /*
++ * Here a newly-created bfq_queue has already started a weight-raising
++ * period: clear raising_time_left to prevent bfq_bfqq_save_state()
++ * from assigning it a full weight-raising period. See the detailed
++ * comments about this field in bfq_init_icq().
++ */
++ if (bfqq->bic != NULL)
++ bfqq->bic->raising_time_left = 0;
+ rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]);
+ list_add_tail(&rq->queuelist, &bfqq->fifo);
+
+@@ -2371,15 +2625,6 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ if (bfq_bfqq_budget_new(bfqq))
+ bfq_set_budget_timeout(bfqd);
+
+- /* Idling is disabled also for cooperation issues:
+- * 1) there is a close cooperator for the queue, or
+- * 2) the queue is shared and some cooperator is likely
+- * to be idle (in this case, by not arming the idle timer,
+- * we try to slow down the queue, to prevent the zones
+- * of the disk accessed by the active cooperators to become
+- * too distant from the zone that will be accessed by the
+- * currently idle cooperators)
+- */
+ if (bfq_bfqq_must_idle(bfqq, budg_timeout))
+ bfq_arm_slice_timer(bfqd);
+ else if (budg_timeout)
+@@ -2449,18 +2694,6 @@ static void bfq_put_request(struct request *rq)
+ }
+ }
+
+-static struct bfq_queue *
+-bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+- struct bfq_queue *bfqq)
+-{
+- bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+- (long unsigned)bfqq->new_bfqq->pid);
+- bic_set_bfqq(bic, bfqq->new_bfqq, 1);
+- bfq_mark_bfqq_coop(bfqq->new_bfqq);
+- bfq_put_queue(bfqq);
+- return bic_to_bfqq(bic, 1);
+-}
+-
+ /*
+ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
+ * was the last process referring to said bfqq.
+@@ -2469,6 +2702,9 @@ static struct bfq_queue *
+ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++ put_io_context(bic->icq.ioc);
++
+ if (bfqq_process_refs(bfqq) == 1) {
+ bfqq->pid = current->pid;
+ bfq_clear_bfqq_some_coop_idle(bfqq);
+@@ -2498,6 +2734,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ struct bfq_queue *bfqq;
+ struct bfq_group *bfqg;
+ unsigned long flags;
++ bool split = false;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+@@ -2516,24 +2753,14 @@ new_queue:
+ bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
+ bic_set_bfqq(bic, bfqq, is_sync);
+ } else {
+- /*
+- * If the queue was seeky for too long, break it apart.
+- */
++ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
+ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
+ if (!bfqq)
+ goto new_queue;
+ }
+-
+- /*
+- * Check to see if this queue is scheduled to merge with
+- * another closely cooperating queue. The merging of queues
+- * happens here as it must be done in process context.
+- * The reference on new_bfqq was taken in merge_bfqqs.
+- */
+- if (bfqq->new_bfqq != NULL)
+- bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
+ }
+
+ bfqq->allocated[rw]++;
+@@ -2544,6 +2771,26 @@ new_queue:
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+
++ /*
++ * If a bfq_queue has only one process reference, it is owned
++ * by only one bfq_io_cq: we can set the bic field of the
++ * bfq_queue to the address of that structure. Also, if the
++ * queue has just been split, mark a flag so that the
++ * information is available to the other scheduler hooks.
++ */
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->bic = bic;
++ if (split) {
++ bfq_mark_bfqq_just_split(bfqq);
++ /*
++ * If the queue has just been split from a shared queue,
++ * restore the idle window and the possible weight
++ * raising period.
++ */
++ bfq_bfqq_resume_state(bfqq, bic);
++ }
++ }
++
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return 0;
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 03f8061..a0edaa2 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -978,34 +978,6 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ return bfqq;
+ }
+
+-/*
+- * Forced extraction of the given queue.
+- */
+-static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
+- struct bfq_queue *bfqq)
+-{
+- struct bfq_entity *entity;
+- struct bfq_sched_data *sd;
+-
+- BUG_ON(bfqd->active_queue != NULL);
+-
+- entity = &bfqq->entity;
+- /*
+- * Bubble up extraction/update from the leaf to the root.
+- */
+- for_each_entity(entity) {
+- sd = entity->sched_data;
+- bfq_update_budget(entity);
+- bfq_update_vtime(bfq_entity_service_tree(entity));
+- bfq_active_extract(bfq_entity_service_tree(entity), entity);
+- sd->active_entity = entity;
+- sd->next_active = NULL;
+- entity->service = 0;
+- }
+-
+- return;
+-}
+-
+ static void __bfq_bfqd_reset_active(struct bfq_data *bfqd)
+ {
+ if (bfqd->active_bic != NULL) {
+diff --git a/block/bfq.h b/block/bfq.h
+index 48ecde9..bb52975 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -188,6 +188,8 @@ struct bfq_group;
+ * @pid: pid of the process owning the queue, used for logging purposes.
+ * @last_rais_start_time: last (idle -> weight-raised) transition attempt
+ * @raising_cur_max_time: current max raising time for this queue
++ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
++ * queue is shared
+ *
+ * A bfq_queue is a leaf request queue; it can be associated to an io_context
+ * or more (if it is an async one). @cgroup holds a reference to the
+@@ -231,6 +233,7 @@ struct bfq_queue {
+ sector_t last_request_pos;
+
+ pid_t pid;
++ struct bfq_io_cq *bic;
+
+ /* weight-raising fields */
+ unsigned int raising_cur_max_time;
+@@ -257,12 +260,23 @@ struct bfq_ttime {
+ * @icq: associated io_cq structure
+ * @bfqq: array of two process queues, the sync and the async
+ * @ttime: associated @bfq_ttime struct
++ * @raising_time_left: snapshot of the time left before weight raising ends
++ * for the sync queue associated to this process; this
++ * snapshot is taken to remember this value while the weight
++ * raising is suspended because the queue is merged with a
++ * shared queue, and is used to set @raising_cur_max_time
++ * when the queue is split from the shared queue and its
++ * weight is raised again
++ * @saved_idle_window: same purpose as the previous field for the idle window
+ */
+ struct bfq_io_cq {
+ struct io_cq icq; /* must be the first member */
+ struct bfq_queue *bfqq[2];
+ struct bfq_ttime ttime;
+ int ioprio;
++
++ unsigned int raising_time_left;
++ unsigned int saved_idle_window;
+ };
+
+ /**
+@@ -403,6 +417,7 @@ enum bfqq_state_flags {
+ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
+ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */
+ BFQ_BFQQ_FLAG_some_coop_idle, /* some cooperator is inactive */
++ BFQ_BFQQ_FLAG_just_split, /* queue has just been split */
+ };
+
+ #define BFQ_BFQQ_FNS(name) \
+@@ -430,6 +445,7 @@ BFQ_BFQQ_FNS(budget_new);
+ BFQ_BFQQ_FNS(coop);
+ BFQ_BFQQ_FNS(split_coop);
+ BFQ_BFQQ_FNS(some_coop_idle);
++BFQ_BFQQ_FNS(just_split);
+ #undef BFQ_BFQQ_FNS
+
+ /* Logging facilities. */
+--
+1.8.1.4
+
diff --git a/5000_BFQ-4-block-Switch-from-BFQ-v6r2-for-3.11.0-to-BFQ-v6r2-fo.patch b/5000_BFQ-4-block-Switch-from-BFQ-v6r2-for-3.11.0-to-BFQ-v6r2-fo.patch
new file mode 100755
index 00000000..f07c0978
--- /dev/null
+++ b/5000_BFQ-4-block-Switch-from-BFQ-v6r2-for-3.11.0-to-BFQ-v6r2-fo.patch
@@ -0,0 +1,362 @@
+From 2e1646d06515b7dd1344db547dfcf9a4640dee8e Mon Sep 17 00:00:00 2001
+From: Arianna Avanzini <avanzini.arianna@gmail.com>
+Date: Wed, 11 Sep 2013 22:26:47 +0200
+Subject: [PATCH] block: Switch from BFQ-v6r2 for 3.11.0 to BFQ-v6r2 for
+ 3.12.0-rc1
+
+---
+ block/bfq-cgroup.c | 115 +++++++++++++++++++++++++++++++----------------------
+ block/bfq.h | 2 +
+ 2 files changed, 70 insertions(+), 47 deletions(-)
+
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+index bb9b851..afae4ca 100644
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -16,9 +16,9 @@
+
+ static DEFINE_MUTEX(bfqio_mutex);
+
+-static bool bfqio_is_removed(struct cgroup *cgroup)
++static bool bfqio_is_removed(struct bfqio_cgroup *bgrp)
+ {
+- return test_bit(CGRP_DEAD, &cgroup->flags);
++ return bgrp ? !bgrp->online : false;
+ }
+
+ static struct bfqio_cgroup bfqio_root_cgroup = {
+@@ -38,10 +38,9 @@ static inline void bfq_init_entity(struct bfq_entity *entity,
+ entity->sched_data = &bfqg->sched_data;
+ }
+
+-static struct bfqio_cgroup *cgroup_to_bfqio(struct cgroup *cgroup)
++static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css)
+ {
+- return container_of(cgroup_subsys_state(cgroup, bfqio_subsys_id),
+- struct bfqio_cgroup, css);
++ return css ? container_of(css, struct bfqio_cgroup, css) : NULL;
+ }
+
+ /*
+@@ -103,20 +102,20 @@ static inline void bfq_group_set_parent(struct bfq_group *bfqg,
+ /**
+ * bfq_group_chain_alloc - allocate a chain of groups.
+ * @bfqd: queue descriptor.
+- * @cgroup: the leaf cgroup this chain starts from.
++ * @css: the leaf cgroup_subsys_state this chain starts from.
+ *
+ * Allocate a chain of groups starting from the one belonging to
+ * @cgroup up to the root cgroup. Stop if a cgroup on the chain
+ * to the root has already an allocated group on @bfqd.
+ */
+ static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
+- struct cgroup *cgroup)
++ struct cgroup_subsys_state *css)
+ {
+ struct bfqio_cgroup *bgrp;
+ struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
+
+- for (; cgroup != NULL; cgroup = cgroup->parent) {
+- bgrp = cgroup_to_bfqio(cgroup);
++ for (; css != NULL; css = css->parent) {
++ bgrp = css_to_bfqio(css);
+
+ bfqg = bfqio_lookup_group(bgrp, bfqd);
+ if (bfqg != NULL) {
+@@ -165,7 +164,7 @@ cleanup:
+ /**
+ * bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy.
+ * @bfqd: the queue descriptor.
+- * @cgroup: the leaf cgroup to start from.
++ * @css: the leaf cgroup_subsys_state to start from.
+ * @leaf: the leaf group (to be associated to @cgroup).
+ *
+ * Try to link a chain of groups to a cgroup hierarchy, connecting the
+@@ -177,7 +176,8 @@ cleanup:
+ * per device) while the bfqio_cgroup lock protects the list of groups
+ * belonging to the same cgroup.
+ */
+-static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup,
++static void bfq_group_chain_link(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css,
+ struct bfq_group *leaf)
+ {
+ struct bfqio_cgroup *bgrp;
+@@ -186,8 +186,8 @@ static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup,
+
+ assert_spin_locked(bfqd->queue->queue_lock);
+
+- for (; cgroup != NULL && leaf != NULL; cgroup = cgroup->parent) {
+- bgrp = cgroup_to_bfqio(cgroup);
++ for (; css != NULL && leaf != NULL; css = css->parent) {
++ bgrp = css_to_bfqio(css);
+ next = leaf->bfqd;
+
+ bfqg = bfqio_lookup_group(bgrp, bfqd);
+@@ -205,9 +205,9 @@ static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup,
+ leaf = next;
+ }
+
+- BUG_ON(cgroup == NULL && leaf != NULL);
+- if (cgroup != NULL && prev != NULL) {
+- bgrp = cgroup_to_bfqio(cgroup);
++ BUG_ON(css == NULL && leaf != NULL);
++ if (css != NULL && prev != NULL) {
++ bgrp = css_to_bfqio(css);
+ bfqg = bfqio_lookup_group(bgrp, bfqd);
+ bfq_group_set_parent(prev, bfqg);
+ }
+@@ -233,18 +233,18 @@ static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup,
+ * have been successful.
+ */
+ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
+- struct cgroup *cgroup)
++ struct cgroup_subsys_state *css)
+ {
+- struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup);
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
+ struct bfq_group *bfqg;
+
+ bfqg = bfqio_lookup_group(bgrp, bfqd);
+ if (bfqg != NULL)
+ return bfqg;
+
+- bfqg = bfq_group_chain_alloc(bfqd, cgroup);
++ bfqg = bfq_group_chain_alloc(bfqd, css);
+ if (bfqg != NULL)
+- bfq_group_chain_link(bfqd, cgroup, bfqg);
++ bfq_group_chain_link(bfqd, css, bfqg);
+ else
+ bfqg = bfqd->root_group;
+
+@@ -315,8 +315,8 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * time here, at the price of slightly more complex code.
+ */
+ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+- struct bfq_io_cq *bic,
+- struct cgroup *cgroup)
++ struct bfq_io_cq *bic,
++ struct cgroup_subsys_state *css)
+ {
+ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
+ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
+@@ -324,9 +324,9 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+ struct bfq_group *bfqg;
+ struct bfqio_cgroup *bgrp;
+
+- bgrp = cgroup_to_bfqio(cgroup);
++ bgrp = css_to_bfqio(css);
+
+- bfqg = bfq_find_alloc_group(bfqd, cgroup);
++ bfqg = bfq_find_alloc_group(bfqd, css);
+ if (async_bfqq != NULL) {
+ entity = &async_bfqq->entity;
+
+@@ -357,14 +357,14 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+ * moved into its new parent group.
+ */
+ static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
+- struct cgroup *cgroup)
++ struct cgroup_subsys_state *css)
+ {
+ struct bfq_data *bfqd;
+ unsigned long uninitialized_var(flags);
+
+ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data), &flags);
+ if (bfqd != NULL) {
+- __bfq_bic_change_cgroup(bfqd, bic, cgroup);
++ __bfq_bic_change_cgroup(bfqd, bic, css);
+ bfq_put_bfqd_unlock(bfqd, &flags);
+ }
+ }
+@@ -394,13 +394,13 @@ static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
+ {
+ struct bfq_data *bfqd = bic_to_bfqd(bic);
+ struct bfq_group *bfqg;
+- struct cgroup *cgroup;
++ struct cgroup_subsys_state *css;
+
+ BUG_ON(bfqd == NULL);
+
+ rcu_read_lock();
+- cgroup = task_cgroup(current, bfqio_subsys_id);
+- bfqg = __bfq_bic_change_cgroup(bfqd, bic, cgroup);
++ css = task_css(current, bfqio_subsys_id);
++ bfqg = __bfq_bic_change_cgroup(bfqd, bic, css);
+ rcu_read_unlock();
+
+ return bfqg;
+@@ -622,17 +622,16 @@ static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
+ }
+
+ #define SHOW_FUNCTION(__VAR) \
+-static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup, \
++static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \
+ struct cftype *cftype) \
+ { \
+- struct bfqio_cgroup *bgrp; \
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
+ u64 ret = -ENODEV; \
+ \
+ mutex_lock(&bfqio_mutex); \
+- if (bfqio_is_removed(cgroup)) \
++ if (bfqio_is_removed(bgrp)) \
+ goto out_unlock; \
+ \
+- bgrp = cgroup_to_bfqio(cgroup); \
+ spin_lock_irq(&bgrp->lock); \
+ ret = bgrp->__VAR; \
+ spin_unlock_irq(&bgrp->lock); \
+@@ -648,11 +647,11 @@ SHOW_FUNCTION(ioprio_class);
+ #undef SHOW_FUNCTION
+
+ #define STORE_FUNCTION(__VAR, __MIN, __MAX) \
+-static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \
++static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\
+ struct cftype *cftype, \
+ u64 val) \
+ { \
+- struct bfqio_cgroup *bgrp; \
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
+ struct bfq_group *bfqg; \
+ int ret = -EINVAL; \
+ \
+@@ -661,12 +660,10 @@ static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \
+ \
+ ret = -ENODEV; \
+ mutex_lock(&bfqio_mutex); \
+- if (bfqio_is_removed(cgroup)) \
++ if (bfqio_is_removed(bgrp)) \
+ goto out_unlock; \
+ ret = 0; \
+ \
+- bgrp = cgroup_to_bfqio(cgroup); \
+- \
+ spin_lock_irq(&bgrp->lock); \
+ bgrp->__VAR = (unsigned short)val; \
+ hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \
+@@ -713,11 +710,11 @@ static struct cftype bfqio_files[] = {
+ { }, /* terminate */
+ };
+
+-static struct cgroup_subsys_state *bfqio_create(struct cgroup *cgroup)
++static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state *parent_css)
+ {
+ struct bfqio_cgroup *bgrp;
+
+- if (cgroup->parent != NULL) {
++ if (parent_css != NULL) {
+ bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
+ if (bgrp == NULL)
+ return ERR_PTR(-ENOMEM);
+@@ -740,13 +737,14 @@ static struct cgroup_subsys_state *bfqio_create(struct cgroup *cgroup)
+ * behavior is that a group containing a task that forked using CLONE_IO
+ * will not be destroyed until the tasks sharing the ioc die.
+ */
+-static int bfqio_can_attach(struct cgroup *cgroup, struct cgroup_taskset *tset)
++static int bfqio_can_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
+ {
+ struct task_struct *task;
+ struct io_context *ioc;
+ int ret = 0;
+
+- cgroup_taskset_for_each(task, cgroup, tset) {
++ cgroup_taskset_for_each(task, css, tset) {
+ /* task_lock() is needed to avoid races with exit_io_context() */
+ task_lock(task);
+ ioc = task->io_context;
+@@ -766,7 +764,8 @@ static int bfqio_can_attach(struct cgroup *cgroup, struct cgroup_taskset *tset)
+ return ret;
+ }
+
+-static void bfqio_attach(struct cgroup *cgroup, struct cgroup_taskset *tset)
++static void bfqio_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
+ {
+ struct task_struct *task;
+ struct io_context *ioc;
+@@ -776,7 +775,7 @@ static void bfqio_attach(struct cgroup *cgroup, struct cgroup_taskset *tset)
+ * IMPORTANT NOTE: The move of more than one process at a time to a
+ * new group has not yet been tested.
+ */
+- cgroup_taskset_for_each(task, cgroup, tset) {
++ cgroup_taskset_for_each(task, css, tset) {
+ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
+ if (ioc) {
+ /*
+@@ -787,16 +786,16 @@ static void bfqio_attach(struct cgroup *cgroup, struct cgroup_taskset *tset)
+ if (!strncmp(icq->q->elevator->type->elevator_name,
+ "bfq", ELV_NAME_MAX))
+ bfq_bic_change_cgroup(icq_to_bic(icq),
+- cgroup);
++ css);
+ rcu_read_unlock();
+ put_io_context(ioc);
+ }
+ }
+ }
+
+-static void bfqio_destroy(struct cgroup *cgroup)
++static void bfqio_destroy(struct cgroup_subsys_state *css)
+ {
+- struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup);
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
+ struct hlist_node *tmp;
+ struct bfq_group *bfqg;
+
+@@ -815,9 +814,31 @@ static void bfqio_destroy(struct cgroup *cgroup)
+ kfree(bgrp);
+ }
+
++static int bfqio_css_online(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++ mutex_lock(&bfqio_mutex);
++ bgrp->online = true;
++ mutex_unlock(&bfqio_mutex);
++
++ return 0;
++}
++
++static void bfqio_css_offline(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++ mutex_lock(&bfqio_mutex);
++ bgrp->online = false;
++ mutex_unlock(&bfqio_mutex);
++}
++
+ struct cgroup_subsys bfqio_subsys = {
+ .name = "bfqio",
+ .css_alloc = bfqio_create,
++ .css_online = bfqio_css_online,
++ .css_offline = bfqio_css_offline,
+ .can_attach = bfqio_can_attach,
+ .attach = bfqio_attach,
+ .css_free = bfqio_destroy,
+diff --git a/block/bfq.h b/block/bfq.h
+index bb52975..885e62c 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -510,6 +510,7 @@ struct bfq_group {
+ /**
+ * struct bfqio_cgroup - bfq cgroup data structure.
+ * @css: subsystem state for bfq in the containing cgroup.
++ * @online: flag marked when the subsystem is inserted.
+ * @weight: cgroup weight.
+ * @ioprio: cgroup ioprio.
+ * @ioprio_class: cgroup ioprio_class.
+@@ -521,6 +522,7 @@ struct bfq_group {
+ */
+ struct bfqio_cgroup {
+ struct cgroup_subsys_state css;
++ bool online;
+
+ unsigned short weight, ioprio, ioprio_class;
+
+--
+1.8.1.4
+