summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2015-06-23 10:51:55 -0400
committerMike Pagano <mpagano@gentoo.org>2015-06-23 10:51:55 -0400
commit9e2ba064763853a6ad591a6385de347cf2ed05d6 (patch)
tree1689e91e2d4100a37e023902df583b06b55e1b53
parentFix readme (diff)
downloadlinux-patches-9e2ba064763853a6ad591a6385de347cf2ed05d6.tar.gz
linux-patches-9e2ba064763853a6ad591a6385de347cf2ed05d6.tar.bz2
linux-patches-9e2ba064763853a6ad591a6385de347cf2ed05d6.zip
Readd 4.0 branch
-rw-r--r--0000_README72
-rw-r--r--1000_linux-4.0.1.patch479
-rw-r--r--1001_linux-4.0.2.patch8587
-rw-r--r--1002_linux-4.0.3.patch2827
-rw-r--r--1003_linux-4.0.4.patch2713
-rw-r--r--1004_linux-4.0.5.patch4937
-rw-r--r--1500_XATTR_USER_PREFIX.patch54
-rw-r--r--1510_fs-enable-link-security-restrictions-by-default.patch22
-rw-r--r--2600_select-REGMAP_IRQ-for-rt5033.patch30
-rw-r--r--2700_ThinkPad-30-brightness-control-fix.patch67
-rw-r--r--2900_dev-root-proc-mount-fix.patch30
-rw-r--r--2905_2disk-resume-image-fix.patch24
-rw-r--r--2910_lz4-compression-fix.patch30
-rw-r--r--4200_fbcondecor-3.19.patch2119
-rw-r--r--5000_enable-additional-cpu-optimizations-for-gcc.patch327
-rw-r--r--5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch104
-rw-r--r--5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch16966
-rw-r--r--5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch1222
-rw-r--r--5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch402
19 files changed, 31012 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 9018993..0f63559 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,78 @@ EXPERIMENTAL
Individual Patch Descriptions:
--------------------------------------------------------------------------
+Patch: 1000_linux-4.0.1.patch
+From: http://www.kernel.org
+Desc: Linux 4.0.1
+
+Patch: 1001_linux-4.0.2.patch
+From: http://www.kernel.org
+Desc: Linux 4.0.2
+
+Patch: 1002_linux-4.0.3.patch
+From: http://www.kernel.org
+Desc: Linux 4.0.3
+
+Patch: 1003_linux-4.0.4.patch
+From: http://www.kernel.org
+Desc: Linux 4.0.4
+
+Patch: 1004_linux-4.0.5.patch
+From: http://www.kernel.org
+Desc: Linux 4.0.5
+
+Patch: 1500_XATTR_USER_PREFIX.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=470644
+Desc: Support for namespace user.pax.* on tmpfs.
+
+Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
+From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc: Enable link security restrictions by default.
+
+Patch: 2600_select-REGMAP_IRQ-for-rt5033.patch
+From: http://git.kernel.org/
+Desc: mfd: rt5033: MFD_RT5033 needs to select REGMAP_IRQ. See bug #546938.
+
+Patch: 2700_ThinkPad-30-brightness-control-fix.patch
+From: Seth Forshee <seth.forshee@canonical.com>
+Desc: ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.
+
+Patch: 2900_dev-root-proc-mount-fix.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=438380
+Desc: Ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs.
+
+Patch: 2905_s2disk-resume-image-fix.patch
+From: Al Viro <viro <at> ZenIV.linux.org.uk>
+Desc: Do not lock when UMH is waiting on current thread spawned by linuxrc. (bug #481344)
+
+Patch: 2910_lz4-compression-fix.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=546422
+Desc: Fix for lz4 compression regression. Thanks to Christian Xia. See bug #546422.
+
+Patch: 4200_fbcondecor-3.19.patch
+From: http://www.mepiscommunity.org/fbcondecor
+Desc: Bootsplash ported by Marco. (Bug #539616)
+
Patch: 4567_distro-Gentoo-Kconfig.patch
From: Tom Wijsman <TomWij@gentoo.org>
Desc: Add Gentoo Linux support config settings and defaults.
+
+Patch: 5000_enable-additional-cpu-optimizations-for-gcc.patch
+From: https://github.com/graysky2/kernel_gcc_patch/
+Desc: Kernel patch enables gcc < v4.9 optimizations for additional CPUs.
+
+Patch: 5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch
+From: http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc: BFQ v7r7 patch 1 for 4.0: Build, cgroups and kconfig bits
+
+Patch: 5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch1
+From: http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc: BFQ v7r7 patch 2 for 4.0: BFQ Scheduler
+
+Patch: 5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch
+From: http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc: BFQ v7r7 patch 3 for 4.0: Early Queue Merge (EQM)
+
+Patch: 5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+From: https://github.com/graysky2/kernel_gcc_patch/
+Desc: Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.
diff --git a/1000_linux-4.0.1.patch b/1000_linux-4.0.1.patch
new file mode 100644
index 0000000..ac58552
--- /dev/null
+++ b/1000_linux-4.0.1.patch
@@ -0,0 +1,479 @@
+diff --git a/Makefile b/Makefile
+index fbd43bfe4445..f499cd2f5738 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 0
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma sheep
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index 4085c4b31047..355d5fea5be9 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
+ struct napi_struct napi;
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+- unsigned int state;
+-#define BNX2X_FP_STATE_IDLE 0
+-#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
+-#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
+-#define BNX2X_FP_STATE_DISABLED (1 << 2)
+-#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
+-#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
+-#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+-#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
+-#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
+-#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
+- /* protect state */
+- spinlock_t lock;
+-#endif /* CONFIG_NET_RX_BUSY_POLL */
++ unsigned long busy_poll_state;
++#endif
+
+ union host_hc_status_block status_blk;
+ /* chip independent shortcuts into sb structure */
+@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
+ #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
++
++enum bnx2x_fp_state {
++ BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
++
++ BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
++ BNX2X_STATE_FP_NAPI_REQ = BIT(1),
++
++ BNX2X_STATE_FP_POLL_BIT = 2,
++ BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
++
++ BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
++};
++
++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
+ {
+- spin_lock_init(&fp->lock);
+- fp->state = BNX2X_FP_STATE_IDLE;
++ WRITE_ONCE(fp->busy_poll_state, 0);
+ }
+
+ /* called from the device poll routine to get ownership of a FP */
+ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+ {
+- bool rc = true;
+-
+- spin_lock_bh(&fp->lock);
+- if (fp->state & BNX2X_FP_LOCKED) {
+- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+- fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
+- rc = false;
+- } else {
+- /* we don't care if someone yielded */
+- fp->state = BNX2X_FP_STATE_NAPI;
++ unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
++
++ while (1) {
++ switch (old) {
++ case BNX2X_STATE_FP_POLL:
++ /* make sure bnx2x_fp_lock_poll() wont starve us */
++ set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
++ &fp->busy_poll_state);
++ /* fallthrough */
++ case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
++ return false;
++ default:
++ break;
++ }
++ prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
++ if (unlikely(prev != old)) {
++ old = prev;
++ continue;
++ }
++ return true;
+ }
+- spin_unlock_bh(&fp->lock);
+- return rc;
+ }
+
+-/* returns true is someone tried to get the FP while napi had it */
+-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+ {
+- bool rc = false;
+-
+- spin_lock_bh(&fp->lock);
+- WARN_ON(fp->state &
+- (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
+-
+- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+- rc = true;
+-
+- /* state ==> idle, unless currently disabled */
+- fp->state &= BNX2X_FP_STATE_DISABLED;
+- spin_unlock_bh(&fp->lock);
+- return rc;
++ smp_wmb();
++ fp->busy_poll_state = 0;
+ }
+
+ /* called from bnx2x_low_latency_poll() */
+ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+ {
+- bool rc = true;
+-
+- spin_lock_bh(&fp->lock);
+- if ((fp->state & BNX2X_FP_LOCKED)) {
+- fp->state |= BNX2X_FP_STATE_POLL_YIELD;
+- rc = false;
+- } else {
+- /* preserve yield marks */
+- fp->state |= BNX2X_FP_STATE_POLL;
+- }
+- spin_unlock_bh(&fp->lock);
+- return rc;
++ return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
+ }
+
+-/* returns true if someone tried to get the FP while it was locked */
+-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+ {
+- bool rc = false;
+-
+- spin_lock_bh(&fp->lock);
+- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+-
+- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+- rc = true;
+-
+- /* state ==> idle, unless currently disabled */
+- fp->state &= BNX2X_FP_STATE_DISABLED;
+- spin_unlock_bh(&fp->lock);
+- return rc;
++ smp_mb__before_atomic();
++ clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
+ }
+
+-/* true if a socket is polling, even if it did not get the lock */
++/* true if a socket is polling */
+ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+ {
+- WARN_ON(!(fp->state & BNX2X_FP_OWNED));
+- return fp->state & BNX2X_FP_USER_PEND;
++ return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
+ }
+
+ /* false if fp is currently owned */
+ static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+ {
+- int rc = true;
+-
+- spin_lock_bh(&fp->lock);
+- if (fp->state & BNX2X_FP_OWNED)
+- rc = false;
+- fp->state |= BNX2X_FP_STATE_DISABLED;
+- spin_unlock_bh(&fp->lock);
++ set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
++ return !bnx2x_fp_ll_polling(fp);
+
+- return rc;
+ }
+ #else
+-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
+ {
+ }
+
+@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+ return true;
+ }
+
+-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+ {
+- return false;
+ }
+
+ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+ return false;
+ }
+
+-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+ {
+- return false;
+ }
+
+ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 0a9faa134a9a..2f63467bce46 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
+ int i;
+
+ for_each_rx_queue_cnic(bp, i) {
+- bnx2x_fp_init_lock(&bp->fp[i]);
++ bnx2x_fp_busy_poll_init(&bp->fp[i]);
+ napi_enable(&bnx2x_fp(bp, i, napi));
+ }
+ }
+@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
+ int i;
+
+ for_each_eth_queue(bp, i) {
+- bnx2x_fp_init_lock(&bp->fp[i]);
++ bnx2x_fp_busy_poll_init(&bp->fp[i]);
+ napi_enable(&bnx2x_fp(bp, i, napi));
+ }
+ }
+@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
+ }
+ }
+
++ bnx2x_fp_unlock_napi(fp);
++
+ /* Fall out from the NAPI loop if needed */
+- if (!bnx2x_fp_unlock_napi(fp) &&
+- !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
++ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+
+ /* No need to update SB for FCoE L2 ring as long as
+ * it's connected to the default SB and the SB
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index f8528a4cf54f..fceb637efd6b 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1713,12 +1713,6 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+ }
+ }
+
+- skb = iptunnel_handle_offloads(skb, udp_sum, type);
+- if (IS_ERR(skb)) {
+- err = -EINVAL;
+- goto err;
+- }
+-
+ skb_scrub_packet(skb, xnet);
+
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+@@ -1738,6 +1732,12 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+ goto err;
+ }
+
++ skb = iptunnel_handle_offloads(skb, udp_sum, type);
++ if (IS_ERR(skb)) {
++ err = -EINVAL;
++ goto err;
++ }
++
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
+ vxh->vx_vni = md->vni;
+@@ -1798,10 +1798,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+ }
+ }
+
+- skb = iptunnel_handle_offloads(skb, udp_sum, type);
+- if (IS_ERR(skb))
+- return PTR_ERR(skb);
+-
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + VXLAN_HLEN + sizeof(struct iphdr)
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
+@@ -1817,6 +1813,10 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+
++ skb = iptunnel_handle_offloads(skb, udp_sum, type);
++ if (IS_ERR(skb))
++ return PTR_ERR(skb);
++
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
+ vxh->vx_vni = md->vni;
+diff --git a/fs/exec.c b/fs/exec.c
+index c7f9b733406d..00400cf522dc 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1265,6 +1265,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ spin_unlock(&p->fs->lock);
+ }
+
++static void bprm_fill_uid(struct linux_binprm *bprm)
++{
++ struct inode *inode;
++ unsigned int mode;
++ kuid_t uid;
++ kgid_t gid;
++
++ /* clear any previous set[ug]id data from a previous binary */
++ bprm->cred->euid = current_euid();
++ bprm->cred->egid = current_egid();
++
++ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
++ return;
++
++ if (task_no_new_privs(current))
++ return;
++
++ inode = file_inode(bprm->file);
++ mode = READ_ONCE(inode->i_mode);
++ if (!(mode & (S_ISUID|S_ISGID)))
++ return;
++
++ /* Be careful if suid/sgid is set */
++ mutex_lock(&inode->i_mutex);
++
++ /* reload atomically mode/uid/gid now that lock held */
++ mode = inode->i_mode;
++ uid = inode->i_uid;
++ gid = inode->i_gid;
++ mutex_unlock(&inode->i_mutex);
++
++ /* We ignore suid/sgid if there are no mappings for them in the ns */
++ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
++ !kgid_has_mapping(bprm->cred->user_ns, gid))
++ return;
++
++ if (mode & S_ISUID) {
++ bprm->per_clear |= PER_CLEAR_ON_SETID;
++ bprm->cred->euid = uid;
++ }
++
++ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
++ bprm->per_clear |= PER_CLEAR_ON_SETID;
++ bprm->cred->egid = gid;
++ }
++}
++
+ /*
+ * Fill the binprm structure from the inode.
+ * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+@@ -1273,36 +1320,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ */
+ int prepare_binprm(struct linux_binprm *bprm)
+ {
+- struct inode *inode = file_inode(bprm->file);
+- umode_t mode = inode->i_mode;
+ int retval;
+
+-
+- /* clear any previous set[ug]id data from a previous binary */
+- bprm->cred->euid = current_euid();
+- bprm->cred->egid = current_egid();
+-
+- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
+- !task_no_new_privs(current) &&
+- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
+- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
+- /* Set-uid? */
+- if (mode & S_ISUID) {
+- bprm->per_clear |= PER_CLEAR_ON_SETID;
+- bprm->cred->euid = inode->i_uid;
+- }
+-
+- /* Set-gid? */
+- /*
+- * If setgid is set but no group execute bit then this
+- * is a candidate for mandatory locking, not a setgid
+- * executable.
+- */
+- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+- bprm->per_clear |= PER_CLEAR_ON_SETID;
+- bprm->cred->egid = inode->i_gid;
+- }
+- }
++ bprm_fill_uid(bprm);
+
+ /* fill in binprm security blob */
+ retval = security_bprm_set_creds(bprm);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a28e09c7825d..36508e69e92a 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1380,7 +1380,8 @@ peek_stack:
+ /* tell verifier to check for equivalent states
+ * after every call and jump
+ */
+- env->explored_states[t + 1] = STATE_LIST_MARK;
++ if (t + 1 < insn_cnt)
++ env->explored_states[t + 1] = STATE_LIST_MARK;
+ } else {
+ /* conditional jump with two edges */
+ ret = push_insn(t, t + 1, FALLTHROUGH, env);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8e4ac97c8477..98d45fe72f51 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4169,19 +4169,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
+ */
+ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ {
+- if (xnet)
+- skb_orphan(skb);
+ skb->tstamp.tv64 = 0;
+ skb->pkt_type = PACKET_HOST;
+ skb->skb_iif = 0;
+ skb->ignore_df = 0;
+ skb_dst_drop(skb);
+- skb->mark = 0;
+ skb_sender_cpu_clear(skb);
+- skb_init_secmark(skb);
+ secpath_reset(skb);
+ nf_reset(skb);
+ nf_reset_trace(skb);
++
++ if (!xnet)
++ return;
++
++ skb_orphan(skb);
++ skb->mark = 0;
+ }
+ EXPORT_SYMBOL_GPL(skb_scrub_packet);
+
+diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
+index 5a4828ba05ad..a566a2e4715b 100644
+--- a/net/ipv4/geneve.c
++++ b/net/ipv4/geneve.c
+@@ -113,10 +113,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ int min_headroom;
+ int err;
+
+- skb = udp_tunnel_handle_offloads(skb, csum);
+- if (IS_ERR(skb))
+- return PTR_ERR(skb);
+-
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
+@@ -131,6 +127,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ if (unlikely(!skb))
+ return -ENOMEM;
+
++ skb = udp_tunnel_handle_offloads(skb, csum);
++ if (IS_ERR(skb))
++ return PTR_ERR(skb);
++
+ gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+ geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 1db253e36045..d520492ba698 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2929,6 +2929,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+ }
+ #endif
+
++ /* Do not fool tcpdump (if any), clean our debris */
++ skb->tstamp.tv64 = 0;
+ return skb;
+ }
+ EXPORT_SYMBOL(tcp_make_synack);
diff --git a/1001_linux-4.0.2.patch b/1001_linux-4.0.2.patch
new file mode 100644
index 0000000..38a75b2
--- /dev/null
+++ b/1001_linux-4.0.2.patch
@@ -0,0 +1,8587 @@
+diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
+index 99ca40e..5c204df 100644
+--- a/Documentation/networking/scaling.txt
++++ b/Documentation/networking/scaling.txt
+@@ -282,7 +282,7 @@ following is true:
+
+ - The current CPU's queue head counter >= the recorded tail counter
+ value in rps_dev_flow[i]
+-- The current CPU is unset (equal to RPS_NO_CPU)
++- The current CPU is unset (>= nr_cpu_ids)
+ - The current CPU is offline
+
+ After this check, the packet is sent to the (possibly updated) current
+diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
+index 4ceef53..d1ad9d5 100644
+--- a/Documentation/virtual/kvm/devices/s390_flic.txt
++++ b/Documentation/virtual/kvm/devices/s390_flic.txt
+@@ -27,6 +27,9 @@ Groups:
+ Copies all floating interrupts into a buffer provided by userspace.
+ When the buffer is too small it returns -ENOMEM, which is the indication
+ for userspace to try again with a bigger buffer.
++ -ENOBUFS is returned when the allocation of a kernelspace buffer has
++ failed.
++ -EFAULT is returned when copying data to userspace failed.
+ All interrupts remain pending, i.e. are not deleted from the list of
+ currently pending interrupts.
+ attr->addr contains the userspace address of the buffer into which all
+diff --git a/Makefile b/Makefile
+index f499cd2..0649a60 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 0
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma sheep
+
+diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+index fec1fca..6c4bc53 100644
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -167,7 +167,13 @@
+
+ macb1: ethernet@f802c000 {
+ phy-mode = "rmii";
++ #address-cells = <1>;
++ #size-cells = <0>;
+ status = "okay";
++
++ ethernet-phy@1 {
++ reg = <0x1>;
++ };
+ };
+
+ dbgu: serial@ffffee00 {
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index a5441d5..3cc8b83 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -154,7 +154,7 @@
+
+ uart2: serial@12200 {
+ compatible = "ns16550a";
+- reg = <0x12000 0x100>;
++ reg = <0x12200 0x100>;
+ reg-shift = <2>;
+ interrupts = <9>;
+ clocks = <&core_clk 0>;
+@@ -163,7 +163,7 @@
+
+ uart3: serial@12300 {
+ compatible = "ns16550a";
+- reg = <0x12100 0x100>;
++ reg = <0x12300 0x100>;
+ reg-shift = <2>;
+ interrupts = <10>;
+ clocks = <&core_clk 0>;
+diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
+index f027754..c41600e 100644
+--- a/arch/arm/boot/dts/exynos5250-spring.dts
++++ b/arch/arm/boot/dts/exynos5250-spring.dts
+@@ -429,7 +429,6 @@
+ &mmc_0 {
+ status = "okay";
+ num-slots = <1>;
+- supports-highspeed;
+ broken-cd;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+@@ -437,11 +436,8 @@
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>;
+-
+- slot@0 {
+- reg = <0>;
+- bus-width = <8>;
+- };
++ bus-width = <8>;
++ cap-mmc-highspeed;
+ };
+
+ /*
+@@ -451,7 +447,6 @@
+ &mmc_1 {
+ status = "okay";
+ num-slots = <1>;
+- supports-highspeed;
+ broken-cd;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+@@ -459,11 +454,8 @@
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>;
+-
+- slot@0 {
+- reg = <0>;
+- bus-width = <4>;
+- };
++ bus-width = <4>;
++ cap-sd-highspeed;
+ };
+
+ &pinctrl_0 {
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index afb9caf..674d03f 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
+index 0db25bc..3a42ac6 100644
+--- a/arch/arm/include/uapi/asm/kvm.h
++++ b/arch/arm/include/uapi/asm/kvm.h
+@@ -195,8 +195,14 @@ struct kvm_arch_memory_slot {
+ #define KVM_ARM_IRQ_CPU_IRQ 0
+ #define KVM_ARM_IRQ_CPU_FIQ 1
+
+-/* Highest supported SPI, from VGIC_NR_IRQS */
++/*
++ * This used to hold the highest supported SPI, but it is now obsolete
++ * and only here to provide source code level compatibility with older
++ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
++ */
++#ifndef __KERNEL__
+ #define KVM_ARM_IRQ_GIC_MAX 127
++#endif
+
+ /* PSCI interface */
+ #define KVM_PSCI_FN_BASE 0x95c1ba5e
+diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
+index c4cc50e..cfb354f 100644
+--- a/arch/arm/kernel/hibernate.c
++++ b/arch/arm/kernel/hibernate.c
+@@ -22,6 +22,7 @@
+ #include <asm/suspend.h>
+ #include <asm/memory.h>
+ #include <asm/sections.h>
++#include "reboot.h"
+
+ int pfn_is_nosave(unsigned long pfn)
+ {
+@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
+
+ ret = swsusp_save();
+ if (ret == 0)
+- soft_restart(virt_to_phys(cpu_resume));
++ _soft_restart(virt_to_phys(cpu_resume), false);
+ return ret;
+ }
+
+@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
+ for (pbe = restore_pblist; pbe; pbe = pbe->next)
+ copy_page(pbe->orig_address, pbe->address);
+
+- soft_restart(virt_to_phys(cpu_resume));
++ _soft_restart(virt_to_phys(cpu_resume), false);
+ }
+
+ static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index fdfa3a7..2bf1a16 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -41,6 +41,7 @@
+ #include <asm/system_misc.h>
+ #include <asm/mach/time.h>
+ #include <asm/tls.h>
++#include "reboot.h"
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+ #include <linux/stackprotector.h>
+@@ -95,7 +96,7 @@ static void __soft_restart(void *addr)
+ BUG();
+ }
+
+-void soft_restart(unsigned long addr)
++void _soft_restart(unsigned long addr, bool disable_l2)
+ {
+ u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
+
+@@ -104,7 +105,7 @@ void soft_restart(unsigned long addr)
+ local_fiq_disable();
+
+ /* Disable the L2 if we're the last man standing. */
+- if (num_online_cpus() == 1)
++ if (disable_l2)
+ outer_disable();
+
+ /* Change to the new stack and continue with the reset. */
+@@ -114,6 +115,11 @@ void soft_restart(unsigned long addr)
+ BUG();
+ }
+
++void soft_restart(unsigned long addr)
++{
++ _soft_restart(addr, num_online_cpus() == 1);
++}
++
+ /*
+ * Function pointers to optional machine specific functions
+ */
+diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
+new file mode 100644
+index 0000000..c87f058
+--- /dev/null
++++ b/arch/arm/kernel/reboot.h
+@@ -0,0 +1,6 @@
++#ifndef REBOOT_H
++#define REBOOT_H
++
++extern void _soft_restart(unsigned long addr, bool disable_l2);
++
++#endif
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 5560f74..b652af5 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -651,8 +651,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+ if (!irqchip_in_kernel(kvm))
+ return -ENXIO;
+
+- if (irq_num < VGIC_NR_PRIVATE_IRQS ||
+- irq_num > KVM_ARM_IRQ_GIC_MAX)
++ if (irq_num < VGIC_NR_PRIVATE_IRQS)
+ return -EINVAL;
+
+ return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
+diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
+index 8b9f5e2..4f4e222 100644
+--- a/arch/arm/mach-mvebu/pmsu.c
++++ b/arch/arm/mach-mvebu/pmsu.c
+@@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void)
+ void __iomem *mpsoc_base;
+ u32 reg;
+
++ pr_warn("CPU idle is currently broken on Armada 38x: disabling");
++ return 0;
++
+ np = of_find_compatible_node(NULL, NULL,
+ "marvell,armada-380-coherency-fabric");
+ if (!np)
+@@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void)
+ return 0;
+ of_node_put(np);
+
++ /*
++ * Currently the CPU idle support for Armada 38x is broken, as
++ * the CPU hotplug uses some of the CPU idle functions it is
++ * broken too, so let's disable it
++ */
++ if (of_machine_is_compatible("marvell,armada380")) {
++ cpu_hotplug_disable();
++ pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
++ }
++
+ if (of_machine_is_compatible("marvell,armadaxp"))
+ ret = armada_xp_cpuidle_init();
+ else if (of_machine_is_compatible("marvell,armada370"))
+@@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void)
+ return ret;
+
+ mvebu_v7_pmsu_enable_l2_powerdown_onidle();
+- platform_device_register(&mvebu_v7_cpuidle_device);
++ if (mvebu_v7_cpuidle_device.name)
++ platform_device_register(&mvebu_v7_cpuidle_device);
+ cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
+
+ return 0;
+diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
+index 7bc6668..dcbe17f 100644
+--- a/arch/arm/mach-s3c64xx/crag6410.h
++++ b/arch/arm/mach-s3c64xx/crag6410.h
+@@ -14,6 +14,7 @@
+ #include <mach/gpio-samsung.h>
+
+ #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
++#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
+
+ #define PCA935X_GPIO_BASE GPIO_BOARD_START
+ #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
+diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
+index 10b913b..65c426b 100644
+--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
++++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
+@@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {
+
+ static struct wm831x_pdata crag_pmic_pdata = {
+ .wm831x_num = 1,
++ .irq_base = BANFF_PMIC_IRQ_BASE,
+ .gpio_base = BANFF_PMIC_GPIO_BASE,
+ .soft_shutdown = true,
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 1b8e973..a6186c2 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -361,6 +361,27 @@ config ARM64_ERRATUM_832075
+
+ If unsure, say Y.
+
++config ARM64_ERRATUM_845719
++ bool "Cortex-A53: 845719: a load might read incorrect data"
++ depends on COMPAT
++ default y
++ help
++ This option adds an alternative code sequence to work around ARM
++ erratum 845719 on Cortex-A53 parts up to r0p4.
++
++ When running a compat (AArch32) userspace on an affected Cortex-A53
++ part, a load at EL0 from a virtual address that matches the bottom 32
++ bits of the virtual address used by a recent load at (AArch64) EL1
++ might return incorrect data.
++
++ The workaround is to write the contextidr_el1 register on exception
++ return to a 32-bit task.
++ Please note that this does not necessarily enable the workaround,
++ as it depends on the alternative framework, which will only patch
++ the kernel if an affected CPU is detected.
++
++ If unsure, say Y.
++
+ endmenu
+
+
+@@ -470,6 +491,10 @@ config HOTPLUG_CPU
+
+ source kernel/Kconfig.preempt
+
++config UP_LATE_INIT
++ def_bool y
++ depends on !SMP
++
+ config HZ
+ int
+ default 100
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 69ceedc..4d2a925 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
+ core-$(CONFIG_XEN) += arch/arm64/xen/
+ core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
+ libs-y := arch/arm64/lib/ $(libs-y)
+-libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
++core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+ # Default target when executing plain make
+ KBUILD_IMAGE := Image.gz
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index b6c16d5..3f0c53c 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -23,8 +23,9 @@
+
+ #define ARM64_WORKAROUND_CLEAN_CACHE 0
+ #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
++#define ARM64_WORKAROUND_845719 2
+
+-#define ARM64_NCAPS 2
++#define ARM64_NCAPS 3
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
+index 59e2823..8dcd61e 100644
+--- a/arch/arm64/include/asm/smp_plat.h
++++ b/arch/arm64/include/asm/smp_plat.h
+@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void)
+ extern u64 __cpu_logical_map[NR_CPUS];
+ #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
++void __init do_post_cpus_up_work(void);
++
+ #endif /* __ASM_SMP_PLAT_H */
+diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
+index 3ef77a4..bc49a18 100644
+--- a/arch/arm64/include/uapi/asm/kvm.h
++++ b/arch/arm64/include/uapi/asm/kvm.h
+@@ -188,8 +188,14 @@ struct kvm_arch_memory_slot {
+ #define KVM_ARM_IRQ_CPU_IRQ 0
+ #define KVM_ARM_IRQ_CPU_FIQ 1
+
+-/* Highest supported SPI, from VGIC_NR_IRQS */
++/*
++ * This used to hold the highest supported SPI, but it is now obsolete
++ * and only here to provide source code level compatibility with older
++ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
++ */
++#ifndef __KERNEL__
+ #define KVM_ARM_IRQ_GIC_MAX 127
++#endif
+
+ /* PSCI interface */
+ #define KVM_PSCI_FN_BASE 0x95c1ba5e
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index fa62637..ad6d523 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -88,7 +88,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 832075",
+ .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
+- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
++ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
++ (1 << MIDR_VARIANT_SHIFT) | 2),
++ },
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_845719
++ {
++ /* Cortex-A53 r0p[01234] */
++ .desc = "ARM erratum 845719",
++ .capability = ARM64_WORKAROUND_845719,
++ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
+ },
+ #endif
+ {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index cf21bb3..959fe87 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -21,8 +21,10 @@
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+
++#include <asm/alternative-asm.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
++#include <asm/cpufeature.h>
+ #include <asm/errno.h>
+ #include <asm/esr.h>
+ #include <asm/thread_info.h>
+@@ -120,6 +122,24 @@
+ ct_user_enter
+ ldr x23, [sp, #S_SP] // load return stack pointer
+ msr sp_el0, x23
++
++#ifdef CONFIG_ARM64_ERRATUM_845719
++ alternative_insn \
++ "nop", \
++ "tbz x22, #4, 1f", \
++ ARM64_WORKAROUND_845719
++#ifdef CONFIG_PID_IN_CONTEXTIDR
++ alternative_insn \
++ "nop; nop", \
++ "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
++ ARM64_WORKAROUND_845719
++#else
++ alternative_insn \
++ "nop", \
++ "msr contextidr_el1, xzr; 1:", \
++ ARM64_WORKAROUND_845719
++#endif
++#endif
+ .endif
+ msr elr_el1, x21 // set up the return data
+ msr spsr_el1, x22
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 07f9305..c237ffb 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -426,6 +426,7 @@ __create_page_tables:
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
++ dmb sy
+ bl __inval_cache_range
+
+ mov lr, x27
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index e8420f6..781f469 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -207,6 +207,18 @@ static void __init smp_build_mpidr_hash(void)
+ }
+ #endif
+
++void __init do_post_cpus_up_work(void)
++{
++ apply_alternatives_all();
++}
++
++#ifdef CONFIG_UP_LATE_INIT
++void __init up_late_init(void)
++{
++ do_post_cpus_up_work();
++}
++#endif /* CONFIG_UP_LATE_INIT */
++
+ static void __init setup_processor(void)
+ {
+ struct cpu_info *cpu_info;
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 328b8ce..4257369 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -309,7 +309,7 @@ void cpu_die(void)
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+- apply_alternatives_all();
++ do_post_cpus_up_work();
+ }
+
+ void __init smp_prepare_boot_cpu(void)
+diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
+index 356ee84..04845aa 100644
+--- a/arch/c6x/kernel/time.c
++++ b/arch/c6x/kernel/time.c
+@@ -49,7 +49,7 @@ u64 sched_clock(void)
+ return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+ }
+
+-void time_init(void)
++void __init time_init(void)
+ {
+ u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+
+diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
+index e41c56e..1e38f0e 100644
+--- a/arch/mips/include/asm/asm-eva.h
++++ b/arch/mips/include/asm/asm-eva.h
+@@ -11,6 +11,36 @@
+ #define __ASM_ASM_EVA_H
+
+ #ifndef __ASSEMBLY__
++
++/* Kernel variants */
++
++#define kernel_cache(op, base) "cache " op ", " base "\n"
++#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
++#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
++#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
++#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
++#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
++#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
++#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
++#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
++#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
++#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
++#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
++#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
++#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
++
++#ifdef CONFIG_32BIT
++/*
++ * No 'sd' or 'ld' instructions in 32-bit but the code will
++ * do the correct thing
++ */
++#define kernel_sd(reg, addr) user_sw(reg, addr)
++#define kernel_ld(reg, addr) user_lw(reg, addr)
++#else
++#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
++#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
++#endif /* CONFIG_32BIT */
++
+ #ifdef CONFIG_EVA
+
+ #define __BUILD_EVA_INSN(insn, reg, addr) \
+@@ -41,37 +71,60 @@
+
+ #else
+
+-#define user_cache(op, base) "cache " op ", " base "\n"
+-#define user_ll(reg, addr) "ll " reg ", " addr "\n"
+-#define user_sc(reg, addr) "sc " reg ", " addr "\n"
+-#define user_lw(reg, addr) "lw " reg ", " addr "\n"
+-#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
+-#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
+-#define user_lh(reg, addr) "lh " reg ", " addr "\n"
+-#define user_lb(reg, addr) "lb " reg ", " addr "\n"
+-#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
+-#define user_sw(reg, addr) "sw " reg ", " addr "\n"
+-#define user_swl(reg, addr) "swl " reg ", " addr "\n"
+-#define user_swr(reg, addr) "swr " reg ", " addr "\n"
+-#define user_sh(reg, addr) "sh " reg ", " addr "\n"
+-#define user_sb(reg, addr) "sb " reg ", " addr "\n"
++#define user_cache(op, base) kernel_cache(op, base)
++#define user_ll(reg, addr) kernel_ll(reg, addr)
++#define user_sc(reg, addr) kernel_sc(reg, addr)
++#define user_lw(reg, addr) kernel_lw(reg, addr)
++#define user_lwl(reg, addr) kernel_lwl(reg, addr)
++#define user_lwr(reg, addr) kernel_lwr(reg, addr)
++#define user_lh(reg, addr) kernel_lh(reg, addr)
++#define user_lb(reg, addr) kernel_lb(reg, addr)
++#define user_lbu(reg, addr) kernel_lbu(reg, addr)
++#define user_sw(reg, addr) kernel_sw(reg, addr)
++#define user_swl(reg, addr) kernel_swl(reg, addr)
++#define user_swr(reg, addr) kernel_swr(reg, addr)
++#define user_sh(reg, addr) kernel_sh(reg, addr)
++#define user_sb(reg, addr) kernel_sb(reg, addr)
+
+ #ifdef CONFIG_32BIT
+-/*
+- * No 'sd' or 'ld' instructions in 32-bit but the code will
+- * do the correct thing
+- */
+-#define user_sd(reg, addr) user_sw(reg, addr)
+-#define user_ld(reg, addr) user_lw(reg, addr)
++#define user_sd(reg, addr) kernel_sw(reg, addr)
++#define user_ld(reg, addr) kernel_lw(reg, addr)
+ #else
+-#define user_sd(reg, addr) "sd " reg", " addr "\n"
+-#define user_ld(reg, addr) "ld " reg", " addr "\n"
++#define user_sd(reg, addr) kernel_sd(reg, addr)
++#define user_ld(reg, addr) kernel_ld(reg, addr)
+ #endif /* CONFIG_32BIT */
+
+ #endif /* CONFIG_EVA */
+
+ #else /* __ASSEMBLY__ */
+
++#define kernel_cache(op, base) cache op, base
++#define kernel_ll(reg, addr) ll reg, addr
++#define kernel_sc(reg, addr) sc reg, addr
++#define kernel_lw(reg, addr) lw reg, addr
++#define kernel_lwl(reg, addr) lwl reg, addr
++#define kernel_lwr(reg, addr) lwr reg, addr
++#define kernel_lh(reg, addr) lh reg, addr
++#define kernel_lb(reg, addr) lb reg, addr
++#define kernel_lbu(reg, addr) lbu reg, addr
++#define kernel_sw(reg, addr) sw reg, addr
++#define kernel_swl(reg, addr) swl reg, addr
++#define kernel_swr(reg, addr) swr reg, addr
++#define kernel_sh(reg, addr) sh reg, addr
++#define kernel_sb(reg, addr) sb reg, addr
++
++#ifdef CONFIG_32BIT
++/*
++ * No 'sd' or 'ld' instructions in 32-bit but the code will
++ * do the correct thing
++ */
++#define kernel_sd(reg, addr) user_sw(reg, addr)
++#define kernel_ld(reg, addr) user_lw(reg, addr)
++#else
++#define kernel_sd(reg, addr) sd reg, addr
++#define kernel_ld(reg, addr) ld reg, addr
++#endif /* CONFIG_32BIT */
++
+ #ifdef CONFIG_EVA
+
+ #define __BUILD_EVA_INSN(insn, reg, addr) \
+@@ -101,31 +154,27 @@
+ #define user_sd(reg, addr) user_sw(reg, addr)
+ #else
+
+-#define user_cache(op, base) cache op, base
+-#define user_ll(reg, addr) ll reg, addr
+-#define user_sc(reg, addr) sc reg, addr
+-#define user_lw(reg, addr) lw reg, addr
+-#define user_lwl(reg, addr) lwl reg, addr
+-#define user_lwr(reg, addr) lwr reg, addr
+-#define user_lh(reg, addr) lh reg, addr
+-#define user_lb(reg, addr) lb reg, addr
+-#define user_lbu(reg, addr) lbu reg, addr
+-#define user_sw(reg, addr) sw reg, addr
+-#define user_swl(reg, addr) swl reg, addr
+-#define user_swr(reg, addr) swr reg, addr
+-#define user_sh(reg, addr) sh reg, addr
+-#define user_sb(reg, addr) sb reg, addr
++#define user_cache(op, base) kernel_cache(op, base)
++#define user_ll(reg, addr) kernel_ll(reg, addr)
++#define user_sc(reg, addr) kernel_sc(reg, addr)
++#define user_lw(reg, addr) kernel_lw(reg, addr)
++#define user_lwl(reg, addr) kernel_lwl(reg, addr)
++#define user_lwr(reg, addr) kernel_lwr(reg, addr)
++#define user_lh(reg, addr) kernel_lh(reg, addr)
++#define user_lb(reg, addr) kernel_lb(reg, addr)
++#define user_lbu(reg, addr) kernel_lbu(reg, addr)
++#define user_sw(reg, addr) kernel_sw(reg, addr)
++#define user_swl(reg, addr) kernel_swl(reg, addr)
++#define user_swr(reg, addr) kernel_swr(reg, addr)
++#define user_sh(reg, addr) kernel_sh(reg, addr)
++#define user_sb(reg, addr) kernel_sb(reg, addr)
+
+ #ifdef CONFIG_32BIT
+-/*
+- * No 'sd' or 'ld' instructions in 32-bit but the code will
+- * do the correct thing
+- */
+-#define user_sd(reg, addr) user_sw(reg, addr)
+-#define user_ld(reg, addr) user_lw(reg, addr)
++#define user_sd(reg, addr) kernel_sw(reg, addr)
++#define user_ld(reg, addr) kernel_lw(reg, addr)
+ #else
+-#define user_sd(reg, addr) sd reg, addr
+-#define user_ld(reg, addr) ld reg, addr
++#define user_sd(reg, addr) kernel_sd(reg, addr)
++#define user_ld(reg, addr) kernel_sd(reg, addr)
+ #endif /* CONFIG_32BIT */
+
+ #endif /* CONFIG_EVA */
+diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
+index dd083e9..9f26b07 100644
+--- a/arch/mips/include/asm/fpu.h
++++ b/arch/mips/include/asm/fpu.h
+@@ -170,6 +170,7 @@ static inline void lose_fpu(int save)
+ }
+ disable_msa();
+ clear_thread_flag(TIF_USEDMSA);
++ __disable_fpu();
+ } else if (is_fpu_owner()) {
+ if (save)
+ _save_fp(current);
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index ac4fc71..f722b05 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -322,6 +322,7 @@ enum mips_mmu_types {
+ #define T_TRAP 13 /* Trap instruction */
+ #define T_VCEI 14 /* Virtual coherency exception */
+ #define T_FPE 15 /* Floating point exception */
++#define T_MSADIS 21 /* MSA disabled exception */
+ #define T_WATCH 23 /* Watch address reference */
+ #define T_VCED 31 /* Virtual coherency data */
+
+@@ -578,6 +579,7 @@ struct kvm_mips_callbacks {
+ int (*handle_syscall)(struct kvm_vcpu *vcpu);
+ int (*handle_res_inst)(struct kvm_vcpu *vcpu);
+ int (*handle_break)(struct kvm_vcpu *vcpu);
++ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
+ int (*vm_init)(struct kvm *kvm);
+ int (*vcpu_init)(struct kvm_vcpu *vcpu);
+ int (*vcpu_setup)(struct kvm_vcpu *vcpu);
+diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
+index bbb6969..7659da2 100644
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -109,10 +109,11 @@ static u32 unaligned_action;
+ extern void show_registers(struct pt_regs *regs);
+
+ #ifdef __BIG_ENDIAN
+-#define LoadHW(addr, value, res) \
++#define _LoadHW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ (".set\tnoat\n" \
+- "1:\t"user_lb("%0", "0(%2)")"\n" \
+- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
++ "1:\t"type##_lb("%0", "0(%2)")"\n" \
++ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -127,13 +128,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define LoadW(addr, value, res) \
++#define _LoadW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_lwl("%0", "(%2)")"\n" \
+- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
++ "1:\t"type##_lwl("%0", "(%2)")"\n" \
++ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+@@ -146,21 +149,24 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has no lwl instruction */
+-#define LoadW(addr, value, res) \
++#define _LoadW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n" \
+ ".set\tnoat\n\t" \
+- "1:"user_lb("%0", "0(%2)")"\n\t" \
+- "2:"user_lbu("$1", "1(%2)")"\n\t" \
++ "1:"type##_lb("%0", "0(%2)")"\n\t" \
++ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "3:"user_lbu("$1", "2(%2)")"\n\t" \
++ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "4:"user_lbu("$1", "3(%2)")"\n\t" \
++ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -178,14 +184,17 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+-#define LoadHWU(addr, value, res) \
++#define _LoadHWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+- "1:\t"user_lbu("%0", "0(%2)")"\n" \
+- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
++ "1:\t"type##_lbu("%0", "0(%2)")"\n" \
++ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -201,13 +210,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define LoadWU(addr, value, res) \
++#define _LoadWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_lwl("%0", "(%2)")"\n" \
+- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
++ "1:\t"type##_lwl("%0", "(%2)")"\n" \
++ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+@@ -222,9 +233,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define LoadDW(addr, value, res) \
++#define _LoadDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, (%2)\n" \
+ "2:\tldr\t%0, 7(%2)\n\t" \
+@@ -240,21 +253,24 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has not lwl and ldl instructions */
+-#define LoadWU(addr, value, res) \
++#define _LoadWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+- "1:"user_lbu("%0", "0(%2)")"\n\t" \
+- "2:"user_lbu("$1", "1(%2)")"\n\t" \
++ "1:"type##_lbu("%0", "0(%2)")"\n\t" \
++ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "3:"user_lbu("$1", "2(%2)")"\n\t" \
++ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "4:"user_lbu("$1", "3(%2)")"\n\t" \
++ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -272,9 +288,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define LoadDW(addr, value, res) \
++#define _LoadDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+@@ -319,16 +337,19 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+
+-#define StoreHW(addr, value, res) \
++#define _StoreHW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+- "1:\t"user_sb("%1", "1(%2)")"\n" \
++ "1:\t"type##_sb("%1", "1(%2)")"\n" \
+ "srl\t$1, %1, 0x8\n" \
+- "2:\t"user_sb("$1", "0(%2)")"\n" \
++ "2:\t"type##_sb("$1", "0(%2)")"\n" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+@@ -342,13 +363,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT));\
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define StoreW(addr, value, res) \
++#define _StoreW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_swl("%1", "(%2)")"\n" \
+- "2:\t"user_swr("%1", "3(%2)")"\n\t" \
++ "1:\t"type##_swl("%1", "(%2)")"\n" \
++ "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+@@ -361,9 +384,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define StoreDW(addr, value, res) \
++#define _StoreDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1,(%2)\n" \
+ "2:\tsdr\t%1, 7(%2)\n\t" \
+@@ -379,20 +404,23 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has no swl and sdl instructions */
+-#define StoreW(addr, value, res) \
++#define _StoreW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+- "1:"user_sb("%1", "3(%2)")"\n\t" \
++ "1:"type##_sb("%1", "3(%2)")"\n\t" \
+ "srl\t$1, %1, 0x8\n\t" \
+- "2:"user_sb("$1", "2(%2)")"\n\t" \
++ "2:"type##_sb("$1", "2(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+- "3:"user_sb("$1", "1(%2)")"\n\t" \
++ "3:"type##_sb("$1", "1(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+- "4:"user_sb("$1", "0(%2)")"\n\t" \
++ "4:"type##_sb("$1", "0(%2)")"\n\t" \
+ ".set\tpop\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+@@ -409,9 +437,11 @@ extern void show_registers(struct pt_regs *regs);
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+- : "memory");
++ : "memory"); \
++} while(0)
+
+ #define StoreDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+@@ -451,15 +481,18 @@ extern void show_registers(struct pt_regs *regs);
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+- : "memory");
++ : "memory"); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+ #else /* __BIG_ENDIAN */
+
+-#define LoadHW(addr, value, res) \
++#define _LoadHW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ (".set\tnoat\n" \
+- "1:\t"user_lb("%0", "1(%2)")"\n" \
+- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
++ "1:\t"type##_lb("%0", "1(%2)")"\n" \
++ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -474,13 +507,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define LoadW(addr, value, res) \
++#define _LoadW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_lwl("%0", "3(%2)")"\n" \
+- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
++ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
++ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+@@ -493,21 +528,24 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has no lwl instruction */
+-#define LoadW(addr, value, res) \
++#define _LoadW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n" \
+ ".set\tnoat\n\t" \
+- "1:"user_lb("%0", "3(%2)")"\n\t" \
+- "2:"user_lbu("$1", "2(%2)")"\n\t" \
++ "1:"type##_lb("%0", "3(%2)")"\n\t" \
++ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "3:"user_lbu("$1", "1(%2)")"\n\t" \
++ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "4:"user_lbu("$1", "0(%2)")"\n\t" \
++ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -525,15 +563,18 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+
+-#define LoadHWU(addr, value, res) \
++#define _LoadHWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+- "1:\t"user_lbu("%0", "1(%2)")"\n" \
+- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
++ "1:\t"type##_lbu("%0", "1(%2)")"\n" \
++ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -549,13 +590,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define LoadWU(addr, value, res) \
++#define _LoadWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_lwl("%0", "3(%2)")"\n" \
+- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
++ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
++ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+@@ -570,9 +613,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define LoadDW(addr, value, res) \
++#define _LoadDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, 7(%2)\n" \
+ "2:\tldr\t%0, (%2)\n\t" \
+@@ -588,21 +633,24 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has not lwl and ldl instructions */
+-#define LoadWU(addr, value, res) \
++#define _LoadWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+- "1:"user_lbu("%0", "3(%2)")"\n\t" \
+- "2:"user_lbu("$1", "2(%2)")"\n\t" \
++ "1:"type##_lbu("%0", "3(%2)")"\n\t" \
++ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "3:"user_lbu("$1", "1(%2)")"\n\t" \
++ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "4:"user_lbu("$1", "0(%2)")"\n\t" \
++ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -620,9 +668,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define LoadDW(addr, value, res) \
++#define _LoadDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+@@ -667,15 +717,17 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+-#define StoreHW(addr, value, res) \
++#define _StoreHW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+- "1:\t"user_sb("%1", "0(%2)")"\n" \
++ "1:\t"type##_sb("%1", "0(%2)")"\n" \
+ "srl\t$1,%1, 0x8\n" \
+- "2:\t"user_sb("$1", "1(%2)")"\n" \
++ "2:\t"type##_sb("$1", "1(%2)")"\n" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+@@ -689,12 +741,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT));\
++} while(0)
++
+ #ifndef CONFIG_CPU_MIPSR6
+-#define StoreW(addr, value, res) \
++#define _StoreW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_swl("%1", "3(%2)")"\n" \
+- "2:\t"user_swr("%1", "(%2)")"\n\t" \
++ "1:\t"type##_swl("%1", "3(%2)")"\n" \
++ "2:\t"type##_swr("%1", "(%2)")"\n\t"\
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+@@ -707,9 +762,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define StoreDW(addr, value, res) \
++#define _StoreDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1, 7(%2)\n" \
+ "2:\tsdr\t%1, (%2)\n\t" \
+@@ -725,20 +782,23 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has no swl and sdl instructions */
+-#define StoreW(addr, value, res) \
++#define _StoreW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+- "1:"user_sb("%1", "0(%2)")"\n\t" \
++ "1:"type##_sb("%1", "0(%2)")"\n\t" \
+ "srl\t$1, %1, 0x8\n\t" \
+- "2:"user_sb("$1", "1(%2)")"\n\t" \
++ "2:"type##_sb("$1", "1(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+- "3:"user_sb("$1", "2(%2)")"\n\t" \
++ "3:"type##_sb("$1", "2(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+- "4:"user_sb("$1", "3(%2)")"\n\t" \
++ "4:"type##_sb("$1", "3(%2)")"\n\t" \
+ ".set\tpop\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+@@ -755,9 +815,11 @@ extern void show_registers(struct pt_regs *regs);
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+- : "memory");
++ : "memory"); \
++} while(0)
+
+-#define StoreDW(addr, value, res) \
++#define _StoreDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+@@ -797,10 +859,28 @@ extern void show_registers(struct pt_regs *regs);
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+- : "memory");
++ : "memory"); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #endif
+
++#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
++#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
++#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
++#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
++#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
++#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
++#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
++#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
++#define LoadDW(addr, value, res) _LoadDW(addr, value, res)
++
++#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
++#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
++#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
++#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
++#define StoreDW(addr, value, res) _StoreDW(addr, value, res)
++
+ static void emulate_load_store_insn(struct pt_regs *regs,
+ void __user *addr, unsigned int __user *pc)
+ {
+@@ -872,7 +952,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ set_fs(seg);
+ goto sigbus;
+ }
+- LoadHW(addr, value, res);
++ LoadHWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -885,7 +965,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ set_fs(seg);
+ goto sigbus;
+ }
+- LoadW(addr, value, res);
++ LoadWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -898,7 +978,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ set_fs(seg);
+ goto sigbus;
+ }
+- LoadHWU(addr, value, res);
++ LoadHWUE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -913,7 +993,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+- StoreHW(addr, value, res);
++ StoreHWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -926,7 +1006,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+- StoreW(addr, value, res);
++ StoreWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -943,7 +1023,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+- LoadHW(addr, value, res);
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ LoadHW(addr, value, res);
++ else
++ LoadHWE(addr, value, res);
++ } else {
++ LoadHW(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+@@ -954,7 +1042,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+- LoadW(addr, value, res);
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ LoadW(addr, value, res);
++ else
++ LoadWE(addr, value, res);
++ } else {
++ LoadW(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+@@ -965,7 +1061,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+- LoadHWU(addr, value, res);
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ LoadHWU(addr, value, res);
++ else
++ LoadHWUE(addr, value, res);
++ } else {
++ LoadHWU(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+@@ -1024,7 +1128,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+- StoreHW(addr, value, res);
++
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ StoreHW(addr, value, res);
++ else
++ StoreHWE(addr, value, res);
++ } else {
++ StoreHW(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ break;
+@@ -1035,7 +1148,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+- StoreW(addr, value, res);
++
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ StoreW(addr, value, res);
++ else
++ StoreWE(addr, value, res);
++ } else {
++ StoreW(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ break;
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index fb3e8df..838d3a6 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+ case T_SYSCALL:
+ case T_BREAK:
+ case T_RES_INST:
++ case T_MSADIS:
+ break;
+
+ case T_COP_UNUSABLE:
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index c9eccf5..f5e7dda 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ ret = kvm_mips_callbacks->handle_break(vcpu);
+ break;
+
++ case T_MSADIS:
++ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
++ break;
++
+ default:
+ kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
+ exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
+index fd7257b..4372cc8 100644
+--- a/arch/mips/kvm/trap_emul.c
++++ b/arch/mips/kvm/trap_emul.c
+@@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+ return ret;
+ }
+
++static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
++{
++ struct kvm_run *run = vcpu->run;
++ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
++ unsigned long cause = vcpu->arch.host_cp0_cause;
++ enum emulation_result er = EMULATE_DONE;
++ int ret = RESUME_GUEST;
++
++ /* No MSA supported in guest, guest reserved instruction exception */
++ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
++
++ switch (er) {
++ case EMULATE_DONE:
++ ret = RESUME_GUEST;
++ break;
++
++ case EMULATE_FAIL:
++ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++ ret = RESUME_HOST;
++ break;
++
++ default:
++ BUG();
++ }
++ return ret;
++}
++
+ static int kvm_trap_emul_vm_init(struct kvm *kvm)
+ {
+ return 0;
+@@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+ .handle_syscall = kvm_trap_emul_handle_syscall,
+ .handle_res_inst = kvm_trap_emul_handle_res_inst,
+ .handle_break = kvm_trap_emul_handle_break,
++ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
+
+ .vm_init = kvm_trap_emul_vm_init,
+ .vcpu_init = kvm_trap_emul_vcpu_init,
+diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
+index 21221ed..0f75b6b 100644
+--- a/arch/mips/loongson/loongson-3/irq.c
++++ b/arch/mips/loongson/loongson-3/irq.c
+@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
+
+ static struct irqaction cascade_irqaction = {
+ .handler = no_action,
++ .flags = IRQF_NO_SUSPEND,
+ .name = "cascade",
+ };
+
+diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
+index 8fddd2cd..efe366d 100644
+--- a/arch/mips/mti-malta/malta-memory.c
++++ b/arch/mips/mti-malta/malta-memory.c
+@@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
+ pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
+ physical_memsize = 0x02000000;
+ } else {
++ if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
++ pr_warn("Unsupported memsize value (0x%lx) detected! "
++ "Using 0x10000000 (256M) instead\n",
++ memsize);
++ memsize = 256 << 20;
++ }
+ /* If ememsize is set, then set physical_memsize to that */
+ physical_memsize = ememsize ? : memsize;
+ }
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index 32a7c82..e7567c8 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
+ END(swsusp_arch_suspend)
+
+ LEAF(swsusp_arch_resume)
++ /* Avoid TLB mismatch during and after kernel resume */
++ jal local_flush_tlb_all
+ PTR_L t0, restore_pblist
+ 0:
+ PTR_L t1, PBE_ADDRESS(t0) /* source */
+@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
+ bne t1, t3, 1b
+ PTR_L t0, PBE_NEXT(t0)
+ bnez t0, 0b
+- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ PTR_LA t0, saved_regs
+ PTR_L ra, PT_R31(t0)
+ PTR_L sp, PT_R29(t0)
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index ae77b7e..c641983 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -61,12 +61,22 @@ struct cache_type_info {
+ };
+
+ /* These are used to index the cache_type_info array. */
+-#define CACHE_TYPE_UNIFIED 0
+-#define CACHE_TYPE_INSTRUCTION 1
+-#define CACHE_TYPE_DATA 2
++#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
++#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
++#define CACHE_TYPE_INSTRUCTION 2
++#define CACHE_TYPE_DATA 3
+
+ static const struct cache_type_info cache_type_info[] = {
+ {
++ /* Embedded systems that use cache-size, cache-block-size,
++ * etc. for the Unified (typically L2) cache. */
++ .name = "Unified",
++ .size_prop = "cache-size",
++ .line_size_props = { "cache-line-size",
++ "cache-block-size", },
++ .nr_sets_prop = "cache-sets",
++ },
++ {
+ /* PowerPC Processor binding says the [di]-cache-*
+ * must be equal on unified caches, so just use
+ * d-cache properties. */
+@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
+ {
+ struct cache *iter;
+
+- if (cache->type == CACHE_TYPE_UNIFIED)
++ if (cache->type == CACHE_TYPE_UNIFIED ||
++ cache->type == CACHE_TYPE_UNIFIED_D)
+ return cache;
+
+ list_for_each_entry(iter, &cache_list, list)
+@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
+ return of_get_property(np, "cache-unified", NULL);
+ }
+
+-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
+- int level)
++/*
++ * Unified caches can have two different sets of tags. Most embedded
++ * use cache-size, etc. for the unified cache size, but open firmware systems
++ * use d-cache-size, etc. Check on initialization for which type we have, and
++ * return the appropriate structure type. Assume it's embedded if it isn't
++ * open firmware. If it's yet a 3rd type, then there will be missing entries
++ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
++ * to be extended further.
++ */
++static int cache_is_unified_d(const struct device_node *np)
+ {
+- struct cache *cache;
++ return of_get_property(np,
++ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
++ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
++}
+
++/*
++ */
++static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
++{
+ pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+
+- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
+-
+- return cache;
++ return new_cache(cache_is_unified_d(node), level, node);
+ }
+
+ static struct cache *cache_do_one_devnode_split(struct device_node *node,
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 7e408bf..cecbe00 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ pmd = pmd_offset(pud, start);
+ pud_clear(pud);
+ pmd_free_tlb(tlb, pmd, start);
++ mm_dec_nr_pmds(tlb->mm);
+ }
+
+ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 2396dda..ead5535 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ sp = regs->gpr[1];
+ perf_callchain_store(entry, next_ip);
+
+- for (;;) {
++ while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ fp = (unsigned long __user *) sp;
+ if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
+ return;
+diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
+index 4c11421..3af8324 100644
+--- a/arch/powerpc/platforms/cell/interrupt.c
++++ b/arch/powerpc/platforms/cell/interrupt.c
+@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
+
+ void iic_setup_cpu(void)
+ {
+- out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
++ out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
+ }
+
+ u8 iic_get_target_id(int cpu)
+diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
+index c7c8720..63db1b0 100644
+--- a/arch/powerpc/platforms/cell/iommu.c
++++ b/arch/powerpc/platforms/cell/iommu.c
+@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
+
+ io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
+
+- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
++ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
+ io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
+
+ mb();
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 6c9ff2b..1d9369e 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -1777,7 +1777,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
+ region.start += phb->ioda.io_segsize;
+ index++;
+ }
+- } else if (res->flags & IORESOURCE_MEM) {
++ } else if ((res->flags & IORESOURCE_MEM) &&
++ !pnv_pci_is_mem_pref_64(res->flags)) {
+ region.start = res->start -
+ hose->mem_offset[0] -
+ phb->ioda.m32_pci_base;
+diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
+index 1c4c5ac..d3236c9 100644
+--- a/arch/s390/kernel/suspend.c
++++ b/arch/s390/kernel/suspend.c
+@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
+ {
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
++ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
++ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ /* Always save lowcore pages (LC protection might be enabled). */
+ if (pfn <= LC_PAGES)
+@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
++ if (pfn >= stext_pfn && pfn <= eshared_pfn)
++ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+ if (tprot(PFN_PHYS(pfn)))
+ return 1;
+ return 0;
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 073b5f3..e7bc2fd 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -17,6 +17,7 @@
+ #include <linux/signal.h>
+ #include <linux/slab.h>
+ #include <linux/bitmap.h>
++#include <linux/vmalloc.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/uaccess.h>
+ #include <asm/sclp.h>
+@@ -1332,10 +1333,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
+ return rc;
+ }
+
+-void kvm_s390_reinject_io_int(struct kvm *kvm,
++int kvm_s390_reinject_io_int(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+ {
+- __inject_vm(kvm, inti);
++ return __inject_vm(kvm, inti);
+ }
+
+ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+@@ -1455,61 +1456,66 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
+ spin_unlock(&fi->lock);
+ }
+
+-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
+- u8 *addr)
++static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
++ struct kvm_s390_irq *irq)
+ {
+- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
+- struct kvm_s390_irq irq = {0};
+-
+- irq.type = inti->type;
++ irq->type = inti->type;
+ switch (inti->type) {
+ case KVM_S390_INT_PFAULT_INIT:
+ case KVM_S390_INT_PFAULT_DONE:
+ case KVM_S390_INT_VIRTIO:
+ case KVM_S390_INT_SERVICE:
+- irq.u.ext = inti->ext;
++ irq->u.ext = inti->ext;
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+- irq.u.io = inti->io;
++ irq->u.io = inti->io;
+ break;
+ case KVM_S390_MCHK:
+- irq.u.mchk = inti->mchk;
++ irq->u.mchk = inti->mchk;
+ break;
+- default:
+- return -EINVAL;
+ }
+-
+- if (copy_to_user(uptr, &irq, sizeof(irq)))
+- return -EFAULT;
+-
+- return 0;
+ }
+
+-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
++static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
+ {
+ struct kvm_s390_interrupt_info *inti;
+ struct kvm_s390_float_interrupt *fi;
++ struct kvm_s390_irq *buf;
++ int max_irqs;
+ int ret = 0;
+ int n = 0;
+
++ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
++ return -EINVAL;
++
++ /*
++ * We are already using -ENOMEM to signal
++ * userspace it may retry with a bigger buffer,
++ * so we need to use something else for this case
++ */
++ buf = vzalloc(len);
++ if (!buf)
++ return -ENOBUFS;
++
++ max_irqs = len / sizeof(struct kvm_s390_irq);
++
+ fi = &kvm->arch.float_int;
+ spin_lock(&fi->lock);
+-
+ list_for_each_entry(inti, &fi->list, list) {
+- if (len < sizeof(struct kvm_s390_irq)) {
++ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ break;
+ }
+- ret = copy_irq_to_user(inti, buf);
+- if (ret)
+- break;
+- buf += sizeof(struct kvm_s390_irq);
+- len -= sizeof(struct kvm_s390_irq);
++ inti_to_irq(inti, &buf[n]);
+ n++;
+ }
+-
+ spin_unlock(&fi->lock);
++ if (!ret && n > 0) {
++ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
++ ret = -EFAULT;
++ }
++ vfree(buf);
+
+ return ret < 0 ? ret : n;
+ }
+@@ -1520,7 +1526,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+
+ switch (attr->group) {
+ case KVM_DEV_FLIC_GET_ALL_IRQS:
+- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
++ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
+ attr->attr);
+ break;
+ default:
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index c34109a..6995a30 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+ int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ u64 cr6, u64 schid);
+-void kvm_s390_reinject_io_int(struct kvm *kvm,
+- struct kvm_s390_interrupt_info *inti);
++int kvm_s390_reinject_io_int(struct kvm *kvm,
++ struct kvm_s390_interrupt_info *inti);
+ int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
+
+ /* implemented in intercept.c */
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 3511169..b982fbc 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
+ struct kvm_s390_interrupt_info *inti;
+ unsigned long len;
+ u32 tpi_data[3];
+- int cc, rc;
++ int rc;
+ u64 addr;
+
+- rc = 0;
+ addr = kvm_s390_get_base_disp_s(vcpu);
+ if (addr & 3)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+- cc = 0;
++
+ inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
+- if (!inti)
+- goto no_interrupt;
+- cc = 1;
++ if (!inti) {
++ kvm_s390_set_psw_cc(vcpu, 0);
++ return 0;
++ }
++
+ tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
+ tpi_data[1] = inti->io.io_int_parm;
+ tpi_data[2] = inti->io.io_int_word;
+@@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
+ */
+ len = sizeof(tpi_data) - 4;
+ rc = write_guest(vcpu, addr, &tpi_data, len);
+- if (rc)
+- return kvm_s390_inject_prog_cond(vcpu, rc);
++ if (rc) {
++ rc = kvm_s390_inject_prog_cond(vcpu, rc);
++ goto reinject_interrupt;
++ }
+ } else {
+ /*
+ * Store the three-word I/O interruption code into
+ * the appropriate lowcore area.
+ */
+ len = sizeof(tpi_data);
+- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
++ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
++ /* failed writes to the low core are not recoverable */
+ rc = -EFAULT;
++ goto reinject_interrupt;
++ }
+ }
++
++ /* irq was successfully handed to the guest */
++ kfree(inti);
++ kvm_s390_set_psw_cc(vcpu, 1);
++ return 0;
++reinject_interrupt:
+ /*
+ * If we encounter a problem storing the interruption code, the
+ * instruction is suppressed from the guest's view: reinject the
+ * interrupt.
+ */
+- if (!rc)
++ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
+ kfree(inti);
+- else
+- kvm_s390_reinject_io_int(vcpu->kvm, inti);
+-no_interrupt:
+- /* Set condition code and we're done. */
+- if (!rc)
+- kvm_s390_set_psw_cc(vcpu, cc);
++ rc = -EFAULT;
++ }
++ /* don't set the cc, a pgm irq was injected or we drop to user space */
+ return rc ? -EFAULT : 0;
+ }
+
+@@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+ for (n = mem->count - 1; n > 0 ; n--)
+ memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+
++ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
+ mem->vm[0].cpus_total = cpus;
+ mem->vm[0].cpus_configured = cpus;
+ mem->vm[0].cpus_standby = 0;
+diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
+index 47f29b1..e7814b7 100644
+--- a/arch/x86/include/asm/insn.h
++++ b/arch/x86/include/asm/insn.h
+@@ -69,7 +69,7 @@ struct insn {
+ const insn_byte_t *next_byte;
+ };
+
+-#define MAX_INSN_SIZE 16
++#define MAX_INSN_SIZE 15
+
+ #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+ #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index a1410db..653dfa7 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ :: "a" (eax), "c" (ecx));
+ }
+
++static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
++{
++ trace_hardirqs_on();
++ /* "mwait %eax, %ecx;" */
++ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
++ :: "a" (eax), "c" (ecx));
++}
++
+ /*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
+index d6b078e..25b1cc0 100644
+--- a/arch/x86/include/asm/pvclock.h
++++ b/arch/x86/include/asm/pvclock.h
+@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
+
+ struct pvclock_vsyscall_time_info {
+ struct pvclock_vcpu_time_info pvti;
++ u32 migrate_count;
+ } __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+ #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+index 0739833..666bcf1 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -557,6 +557,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
++ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -564,6 +566,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
++ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -587,6 +591,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
++ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -602,6 +608,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
++ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ EVENT_CONSTRAINT_END
+ };
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 046e2d6..a388bb8 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -24,6 +24,7 @@
+ #include <asm/syscalls.h>
+ #include <asm/idle.h>
+ #include <asm/uaccess.h>
++#include <asm/mwait.h>
+ #include <asm/i387.h>
+ #include <asm/fpu-internal.h>
+ #include <asm/debugreg.h>
+@@ -399,6 +400,53 @@ static void amd_e400_idle(void)
+ default_idle();
+ }
+
++/*
++ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
++ * We can't rely on cpuidle installing MWAIT, because it will not load
++ * on systems that support only C1 -- so the boot default must be MWAIT.
++ *
++ * Some AMD machines are the opposite, they depend on using HALT.
++ *
++ * So for default C1, which is used during boot until cpuidle loads,
++ * use MWAIT-C1 on Intel HW that has it, else use HALT.
++ */
++static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
++{
++ if (c->x86_vendor != X86_VENDOR_INTEL)
++ return 0;
++
++ if (!cpu_has(c, X86_FEATURE_MWAIT))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * MONITOR/MWAIT with no hints, used for default default C1 state.
++ * This invokes MWAIT with interrutps enabled and no flags,
++ * which is backwards compatible with the original MWAIT implementation.
++ */
++
++static void mwait_idle(void)
++{
++ if (!current_set_polling_and_test()) {
++ if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
++ smp_mb(); /* quirk */
++ clflush((void *)&current_thread_info()->flags);
++ smp_mb(); /* quirk */
++ }
++
++ __monitor((void *)&current_thread_info()->flags, 0, 0);
++ if (!need_resched())
++ __sti_mwait(0, 0);
++ else
++ local_irq_enable();
++ } else {
++ local_irq_enable();
++ }
++ __current_clr_polling();
++}
++
+ void select_idle_routine(const struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+@@ -412,6 +460,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
+ /* E400: APIC timer interrupt does not wake up CPU from C1e */
+ pr_info("using AMD E400 aware idle routine\n");
+ x86_idle = amd_e400_idle;
++ } else if (prefer_mwait_c1_over_halt(c)) {
++ pr_info("using mwait in idle threads\n");
++ x86_idle = mwait_idle;
+ } else
+ x86_idle = default_idle;
+ }
+diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
+index 2f355d2..e5ecd20 100644
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
+ set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
+ }
+
++static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
++
++static struct pvclock_vsyscall_time_info *
++pvclock_get_vsyscall_user_time_info(int cpu)
++{
++ if (!pvclock_vdso_info) {
++ BUG();
++ return NULL;
++ }
++
++ return &pvclock_vdso_info[cpu];
++}
++
++struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
++{
++ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
++}
++
+ #ifdef CONFIG_X86_64
++static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
++ void *v)
++{
++ struct task_migration_notifier *mn = v;
++ struct pvclock_vsyscall_time_info *pvti;
++
++ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
++
++ /* this is NULL when pvclock vsyscall is not initialized */
++ if (unlikely(pvti == NULL))
++ return NOTIFY_DONE;
++
++ pvti->migrate_count++;
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block pvclock_migrate = {
++ .notifier_call = pvclock_task_migrate,
++};
++
+ /*
+ * Initialize the generic pvclock vsyscall state. This will allocate
+ * a/some page(s) for the per-vcpu pvclock information, set up a
+@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
+
+ WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
+
++ pvclock_vdso_info = i;
++
+ for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
+ __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
+ __pa(i) + (idx*PAGE_SIZE),
+ PAGE_KERNEL_VVAR);
+ }
+
++
++ register_task_migration_notifier(&pvclock_migrate);
++
+ return 0;
+ }
+ #endif
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index ae4f6d3..a60bd3a 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3621,8 +3621,16 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+
+ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ {
+- unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
+- KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
++ /*
++ * Pass through host's Machine Check Enable value to hw_cr4, which
++ * is in force while we are in guest mode. Do not let guests control
++ * this bit, even if host CR4.MCE == 0.
++ */
++ unsigned long hw_cr4 =
++ (cr4_read_shadow() & X86_CR4_MCE) |
++ (cr4 & ~X86_CR4_MCE) |
++ (to_vmx(vcpu)->rmode.vm86_active ?
++ KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+
+ if (cr4 & X86_CR4_VMXE) {
+ /*
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 32bf19e..e222ba5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5775,7 +5775,6 @@ int kvm_arch_init(void *opaque)
+ kvm_set_mmio_spte_mask();
+
+ kvm_x86_ops = ops;
+- kvm_init_msr_list();
+
+ kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
+ PT_DIRTY_MASK, PT64_NX_MASK, 0);
+@@ -7209,7 +7208,14 @@ void kvm_arch_hardware_disable(void)
+
+ int kvm_arch_hardware_setup(void)
+ {
+- return kvm_x86_ops->hardware_setup();
++ int r;
++
++ r = kvm_x86_ops->hardware_setup();
++ if (r != 0)
++ return r;
++
++ kvm_init_msr_list();
++ return 0;
+ }
+
+ void kvm_arch_hardware_unsetup(void)
+diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
+index 1313ae6..85994f5 100644
+--- a/arch/x86/lib/insn.c
++++ b/arch/x86/lib/insn.c
+@@ -52,6 +52,13 @@
+ */
+ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+ {
++ /*
++ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
++ * even if the input buffer is long enough to hold them.
++ */
++ if (buf_len > MAX_INSN_SIZE)
++ buf_len = MAX_INSN_SIZE;
++
+ memset(insn, 0, sizeof(*insn));
+ insn->kaddr = kaddr;
+ insn->end_kaddr = kaddr + buf_len;
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index 1f33b3d..0a42327 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -82,7 +82,7 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
+ clac();
+
+ /* If the destination is a kernel buffer, we always clear the end */
+- if ((unsigned long)to >= TASK_SIZE_MAX)
++ if (!__addr_ok(to))
+ memset(to, 0, len);
+ return len;
+ }
+diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
+index 9793322..40d2473 100644
+--- a/arch/x86/vdso/vclock_gettime.c
++++ b/arch/x86/vdso/vclock_gettime.c
+@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
+ cycle_t ret;
+ u64 last;
+ u32 version;
++ u32 migrate_count;
+ u8 flags;
+ unsigned cpu, cpu1;
+
+
+ /*
+- * Note: hypervisor must guarantee that:
+- * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
+- * 2. that per-CPU pvclock time info is updated if the
+- * underlying CPU changes.
+- * 3. that version is increased whenever underlying CPU
+- * changes.
+- *
++ * When looping to get a consistent (time-info, tsc) pair, we
++ * also need to deal with the possibility we can switch vcpus,
++ * so make sure we always re-fetch time-info for the current vcpu.
+ */
+ do {
+ cpu = __getcpu() & VGETCPU_CPU_MASK;
+@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
+ * __getcpu() calls (Gleb).
+ */
+
+- pvti = get_pvti(cpu);
++ /* Make sure migrate_count will change if we leave the VCPU. */
++ do {
++ pvti = get_pvti(cpu);
++ migrate_count = pvti->migrate_count;
++
++ cpu1 = cpu;
++ cpu = __getcpu() & VGETCPU_CPU_MASK;
++ } while (unlikely(cpu != cpu1));
+
+ version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
+
+ /*
+ * Test we're still on the cpu as well as the version.
+- * We could have been migrated just after the first
+- * vgetcpu but before fetching the version, so we
+- * wouldn't notice a version change.
++ * - We must read TSC of pvti's VCPU.
++ * - KVM doesn't follow the versioning protocol, so data could
++ * change before version if we left the VCPU.
+ */
+- cpu1 = __getcpu() & VGETCPU_CPU_MASK;
+- } while (unlikely(cpu != cpu1 ||
+- (pvti->pvti.version & 1) ||
+- pvti->pvti.version != version));
++ smp_rmb();
++ } while (unlikely((pvti->pvti.version & 1) ||
++ pvti->pvti.version != version ||
++ pvti->migrate_count != migrate_count));
+
+ if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
+ *mode = VCLOCK_NONE;
+diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
+index e31d494..87be10e 100644
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE
+
+ If unsure, leave the default value here.
+
++config XTFPGA_LCD
++ bool "Enable XTFPGA LCD driver"
++ depends on XTENSA_PLATFORM_XTFPGA
++ default n
++ help
++ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
++ progress messages there during bootup/shutdown. It may be useful
++ during board bringup.
++
++ If unsure, say N.
++
++config XTFPGA_LCD_BASE_ADDR
++ hex "XTFPGA LCD base address"
++ depends on XTFPGA_LCD
++ default "0x0d0c0000"
++ help
++ Base address of the LCD controller inside KIO region.
++ Different boards from XTFPGA family have LCD controller at different
++ addresses. Please consult prototyping user guide for your board for
++ the correct address. Wrong address here may lead to hardware lockup.
++
++config XTFPGA_LCD_8BIT_ACCESS
++ bool "Use 8-bit access to XTFPGA LCD"
++ depends on XTFPGA_LCD
++ default n
++ help
++ LCD may be connected with 4- or 8-bit interface, 8-bit access may
++ only be used with 8-bit interface. Please consult prototyping user
++ guide for your board for the correct interface width.
++
+ endmenu
+
+ menu "Executable file formats"
+diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
+index db5bb72..62d8465 100644
+--- a/arch/xtensa/include/uapi/asm/unistd.h
++++ b/arch/xtensa/include/uapi/asm/unistd.h
+@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
+ __SYSCALL(324, sys_name_to_handle_at, 5)
+ #define __NR_open_by_handle_at 325
+ __SYSCALL(325, sys_open_by_handle_at, 3)
+-#define __NR_sync_file_range 326
++#define __NR_sync_file_range2 326
+ __SYSCALL(326, sys_sync_file_range2, 6)
+ #define __NR_perf_event_open 327
+ __SYSCALL(327, sys_perf_event_open, 5)
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index d05f8fe..17b1ef3 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
+ {
+ struct iss_net_private *lp = (struct iss_net_private *)priv;
+
+- spin_lock(&lp->lock);
+ iss_net_poll();
++ spin_lock(&lp->lock);
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+ spin_unlock(&lp->lock);
+ }
+@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
+ struct iss_net_private *lp = netdev_priv(dev);
+ int err;
+
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+
+ err = lp->tp.open(lp);
+ if (err < 0)
+@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
+ while ((err = iss_net_rx(dev)) > 0)
+ ;
+
+- spin_lock(&opened_lock);
++ spin_unlock_bh(&lp->lock);
++ spin_lock_bh(&opened_lock);
+ list_add(&lp->opened_list, &opened);
+- spin_unlock(&opened_lock);
++ spin_unlock_bh(&opened_lock);
++ spin_lock_bh(&lp->lock);
+
+ init_timer(&lp->timer);
+ lp->timer_val = ISS_NET_TIMER_VALUE;
+@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+
+ out:
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return err;
+ }
+
+@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
+ {
+ struct iss_net_private *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+
+ spin_lock(&opened_lock);
+ list_del(&opened);
+@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
+
+ lp->tp.close(lp);
+
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return 0;
+ }
+
+ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct iss_net_private *lp = netdev_priv(dev);
+- unsigned long flags;
+ int len;
+
+ netif_stop_queue(dev);
+- spin_lock_irqsave(&lp->lock, flags);
++ spin_lock_bh(&lp->lock);
+
+ len = lp->tp.write(lp, &skb);
+
+@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
+ }
+
+- spin_unlock_irqrestore(&lp->lock, flags);
++ spin_unlock_bh(&lp->lock);
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+ memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return 0;
+ }
+
+@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
+ *lp = (struct iss_net_private) {
+ .device_list = LIST_HEAD_INIT(lp->device_list),
+ .opened_list = LIST_HEAD_INIT(lp->opened_list),
+- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
+ .dev = dev,
+ .index = index,
+- };
++ };
+
++ spin_lock_init(&lp->lock);
+ /*
+ * If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
+index b9ae206..7839d38 100644
+--- a/arch/xtensa/platforms/xtfpga/Makefile
++++ b/arch/xtensa/platforms/xtfpga/Makefile
+@@ -6,4 +6,5 @@
+ #
+ # Note 2! The CFLAGS definitions are in the main makefile...
+
+-obj-y = setup.o lcd.o
++obj-y += setup.o
++obj-$(CONFIG_XTFPGA_LCD) += lcd.o
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+index 6edd20b..4e0af26 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+@@ -40,9 +40,6 @@
+
+ /* UART */
+ #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
+-/* LCD instruction and data addresses. */
+-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
+-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
+
+ /* Misc. */
+ #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+index 0e43564..4c8541e 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+@@ -11,10 +11,25 @@
+ #ifndef __XTENSA_XTAVNET_LCD_H
+ #define __XTENSA_XTAVNET_LCD_H
+
++#ifdef CONFIG_XTFPGA_LCD
+ /* Display string STR at position POS on the LCD. */
+ void lcd_disp_at_pos(char *str, unsigned char pos);
+
+ /* Shift the contents of the LCD display left or right. */
+ void lcd_shiftleft(void);
+ void lcd_shiftright(void);
++#else
++static inline void lcd_disp_at_pos(char *str, unsigned char pos)
++{
++}
++
++static inline void lcd_shiftleft(void)
++{
++}
++
++static inline void lcd_shiftright(void)
++{
++}
++#endif
++
+ #endif
+diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
+index 2872301..4dc0c1b 100644
+--- a/arch/xtensa/platforms/xtfpga/lcd.c
++++ b/arch/xtensa/platforms/xtfpga/lcd.c
+@@ -1,50 +1,63 @@
+ /*
+- * Driver for the LCD display on the Tensilica LX60 Board.
++ * Driver for the LCD display on the Tensilica XTFPGA board family.
++ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
++ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+-/*
+- *
+- * FIXME: this code is from the examples from the LX60 user guide.
+- *
+- * The lcd_pause function does busy waiting, which is probably not
+- * great. Maybe the code could be changed to use kernel timers, or
+- * change the hardware to not need to wait.
+- */
+-
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+
+ #include <platform/hardware.h>
+ #include <platform/lcd.h>
+-#include <linux/delay.h>
+
+-#define LCD_PAUSE_ITERATIONS 4000
++/* LCD instruction and data addresses. */
++#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
++#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
++
+ #define LCD_CLEAR 0x1
+ #define LCD_DISPLAY_ON 0xc
+
+ /* 8bit and 2 lines display */
+ #define LCD_DISPLAY_MODE8BIT 0x38
++#define LCD_DISPLAY_MODE4BIT 0x28
+ #define LCD_DISPLAY_POS 0x80
+ #define LCD_SHIFT_LEFT 0x18
+ #define LCD_SHIFT_RIGHT 0x1c
+
++static void lcd_put_byte(u8 *addr, u8 data)
++{
++#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++ ACCESS_ONCE(*addr) = data;
++#else
++ ACCESS_ONCE(*addr) = data & 0xf0;
++ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
++#endif
++}
++
+ static int __init lcd_init(void)
+ {
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ mdelay(5);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ udelay(200);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
++ udelay(50);
++#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
++ udelay(50);
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ udelay(50);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
++#endif
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
+ udelay(50);
+- *LCD_INSTR_ADDR = LCD_CLEAR;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
+ mdelay(10);
+ lcd_disp_at_pos("XTENSA LINUX", 0);
+ return 0;
+@@ -52,10 +65,10 @@ static int __init lcd_init(void)
+
+ void lcd_disp_at_pos(char *str, unsigned char pos)
+ {
+- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
+ udelay(100);
+ while (*str != 0) {
+- *LCD_DATA_ADDR = *str;
++ lcd_put_byte(LCD_DATA_ADDR, *str);
+ udelay(200);
+ str++;
+ }
+@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
+
+ void lcd_shiftleft(void)
+ {
+- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
+ udelay(50);
+ }
+
+ void lcd_shiftright(void)
+ {
+- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
+ udelay(50);
+ }
+
+diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
+index 5ed064e..ccf7932 100644
+--- a/drivers/acpi/acpica/evgpe.c
++++ b/drivers/acpi/acpica/evgpe.c
+@@ -92,6 +92,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
+ ACPI_SET_BIT(gpe_register_info->enable_for_run,
+ (u8)register_bit);
+ }
++ gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
+
+ return_ACPI_STATUS(AE_OK);
+ }
+@@ -123,7 +124,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+
+ /* Enable the requested GPE */
+
+- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
++ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+ return_ACPI_STATUS(status);
+ }
+
+@@ -202,7 +203,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+ if (ACPI_SUCCESS(status)) {
+ status =
+ acpi_hw_low_set_gpe(gpe_event_info,
+- ACPI_GPE_DISABLE_SAVE);
++ ACPI_GPE_DISABLE);
+ }
+
+ if (ACPI_FAILURE(status)) {
+diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
+index 84bc550..af6514e 100644
+--- a/drivers/acpi/acpica/hwgpe.c
++++ b/drivers/acpi/acpica/hwgpe.c
+@@ -89,6 +89,8 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
++ * The enable_mask field of the involved GPE register must be
++ * updated by the caller if necessary.
+ *
+ ******************************************************************************/
+
+@@ -119,7 +121,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
+ /* Set or clear just the bit that corresponds to this GPE */
+
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
+- switch (action & ~ACPI_GPE_SAVE_MASK) {
++ switch (action) {
+ case ACPI_GPE_CONDITIONAL_ENABLE:
+
+ /* Only enable if the corresponding enable_mask bit is set */
+@@ -149,9 +151,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
+ /* Write the updated enable mask */
+
+ status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
+- if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
+- gpe_register_info->enable_mask = (u8)enable_mask;
+- }
+ return (status);
+ }
+
+@@ -286,10 +285,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
+ {
+ acpi_status status;
+
++ gpe_register_info->enable_mask = enable_mask;
+ status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
+- if (ACPI_SUCCESS(status)) {
+- gpe_register_info->enable_mask = enable_mask;
+- }
+ return (status);
+ }
+
+diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
+index 9bad45e..7fbc2b9 100644
+--- a/drivers/acpi/acpica/tbinstal.c
++++ b/drivers/acpi/acpica/tbinstal.c
+@@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
+ */
+ acpi_tb_uninstall_table(&new_table_desc);
+ *table_index = i;
+- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(AE_OK);
+ }
+ }
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index bbca783..349f4fd 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -298,7 +298,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
+ struct acpi_device_physical_node *pn;
+ bool offline = true;
+
+- mutex_lock(&adev->physical_node_lock);
++ /*
++ * acpi_container_offline() calls this for all of the container's
++ * children under the container's physical_node_lock lock.
++ */
++ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
+
+ list_for_each_entry(pn, &adev->physical_node_list, node)
+ if (device_supports_offline(pn->dev) && !pn->dev->offline) {
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 876bae5..79bc203 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
+ goto out_put;
+ error = device_add_groups(dev, bus->dev_groups);
+ if (error)
+- goto out_groups;
++ goto out_id;
+ error = sysfs_create_link(&bus->p->devices_kset->kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+- goto out_id;
++ goto out_groups;
+ error = sysfs_create_link(&dev->kobj,
+ &dev->bus->p->subsys.kobj, "subsystem");
+ if (error)
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 6e64563..9c2ba1c 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -62,15 +62,21 @@ static int cache_setup_of_node(unsigned int cpu)
+ return -ENOENT;
+ }
+
+- while (np && index < cache_leaves(cpu)) {
++ while (index < cache_leaves(cpu)) {
+ this_leaf = this_cpu_ci->info_list + index;
+ if (this_leaf->level != 1)
+ np = of_find_next_cache_node(np);
+ else
+ np = of_node_get(np);/* cpu node itself */
++ if (!np)
++ break;
+ this_leaf->of_node = np;
+ index++;
+ }
++
++ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
++ return -ENOENT;
++
+ return 0;
+ }
+
+@@ -189,8 +195,11 @@ static int detect_cache_attributes(unsigned int cpu)
+ * will be set up here only if they are not populated already
+ */
+ ret = cache_shared_cpu_map_setup(cpu);
+- if (ret)
++ if (ret) {
++ pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n",
++ cpu);
+ goto free_ci;
++ }
+ return 0;
+
+ free_ci:
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 9421fed..e68ab79 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
+ }
+
+ r = platform_get_resource(dev, IORESOURCE_IRQ, num);
++ /*
++ * The resources may pass trigger flags to the irqs that need
++ * to be set up. It so happens that the trigger flags for
++ * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
++ * settings.
++ */
++ if (r && r->flags & IORESOURCE_BITS)
++ irqd_set_trigger_type(irq_get_irq_data(r->start),
++ r->flags & IORESOURCE_BITS);
+
+ return r ? r->start : -ENXIO;
+ #endif
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index de4c849..288547a 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
+ /* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0489, 0xE027) },
+ { USB_DEVICE(0x0489, 0xE03D) },
++ { USB_DEVICE(0x04F2, 0xAFF1) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0CF3, 0x3002) },
+ { USB_DEVICE(0x0CF3, 0xE019) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 8bfc4c2..2c527da 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = {
+ /* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index e096e9c..283f00a 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -170,6 +170,41 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
+ device_unregister(&chip->dev);
+ }
+
++static int tpm1_chip_register(struct tpm_chip *chip)
++{
++ int rc;
++
++ if (chip->flags & TPM_CHIP_FLAG_TPM2)
++ return 0;
++
++ rc = tpm_sysfs_add_device(chip);
++ if (rc)
++ return rc;
++
++ rc = tpm_add_ppi(chip);
++ if (rc) {
++ tpm_sysfs_del_device(chip);
++ return rc;
++ }
++
++ chip->bios_dir = tpm_bios_log_setup(chip->devname);
++
++ return 0;
++}
++
++static void tpm1_chip_unregister(struct tpm_chip *chip)
++{
++ if (chip->flags & TPM_CHIP_FLAG_TPM2)
++ return;
++
++ if (chip->bios_dir)
++ tpm_bios_log_teardown(chip->bios_dir);
++
++ tpm_remove_ppi(chip);
++
++ tpm_sysfs_del_device(chip);
++}
++
+ /*
+ * tpm_chip_register() - create a character device for the TPM chip
+ * @chip: TPM chip to use.
+@@ -185,22 +220,13 @@ int tpm_chip_register(struct tpm_chip *chip)
+ {
+ int rc;
+
+- /* Populate sysfs for TPM1 devices. */
+- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+- rc = tpm_sysfs_add_device(chip);
+- if (rc)
+- goto del_misc;
+-
+- rc = tpm_add_ppi(chip);
+- if (rc)
+- goto del_sysfs;
+-
+- chip->bios_dir = tpm_bios_log_setup(chip->devname);
+- }
++ rc = tpm1_chip_register(chip);
++ if (rc)
++ return rc;
+
+ rc = tpm_dev_add_device(chip);
+ if (rc)
+- return rc;
++ goto out_err;
+
+ /* Make the chip available. */
+ spin_lock(&driver_lock);
+@@ -210,10 +236,8 @@ int tpm_chip_register(struct tpm_chip *chip)
+ chip->flags |= TPM_CHIP_FLAG_REGISTERED;
+
+ return 0;
+-del_sysfs:
+- tpm_sysfs_del_device(chip);
+-del_misc:
+- tpm_dev_del_device(chip);
++out_err:
++ tpm1_chip_unregister(chip);
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(tpm_chip_register);
+@@ -238,13 +262,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
+ spin_unlock(&driver_lock);
+ synchronize_rcu();
+
+- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+- if (chip->bios_dir)
+- tpm_bios_log_teardown(chip->bios_dir);
+- tpm_remove_ppi(chip);
+- tpm_sysfs_del_device(chip);
+- }
+-
++ tpm1_chip_unregister(chip);
+ tpm_dev_del_device(chip);
+ }
+ EXPORT_SYMBOL_GPL(tpm_chip_unregister);
+diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
+index a23ac0c..0b7c3e8 100644
+--- a/drivers/clk/at91/clk-usb.c
++++ b/drivers/clk/at91/clk-usb.c
+@@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
+ return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
+ }
+
+-static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+- unsigned long *parent_rate)
++static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
++ unsigned long rate,
++ unsigned long min_rate,
++ unsigned long max_rate,
++ unsigned long *best_parent_rate,
++ struct clk_hw **best_parent_hw)
+ {
+- unsigned long div;
++ struct clk *parent = NULL;
++ long best_rate = -EINVAL;
++ unsigned long tmp_rate;
++ int best_diff = -1;
++ int tmp_diff;
++ int i;
+
+- if (!rate)
+- return -EINVAL;
++ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
++ int div;
+
+- if (rate >= *parent_rate)
+- return *parent_rate;
++ parent = clk_get_parent_by_index(hw->clk, i);
++ if (!parent)
++ continue;
++
++ for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
++ unsigned long tmp_parent_rate;
++
++ tmp_parent_rate = rate * div;
++ tmp_parent_rate = __clk_round_rate(parent,
++ tmp_parent_rate);
++ tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
++ if (tmp_rate < rate)
++ tmp_diff = rate - tmp_rate;
++ else
++ tmp_diff = tmp_rate - rate;
++
++ if (best_diff < 0 || best_diff > tmp_diff) {
++ best_rate = tmp_rate;
++ best_diff = tmp_diff;
++ *best_parent_rate = tmp_parent_rate;
++ *best_parent_hw = __clk_get_hw(parent);
++ }
++
++ if (!best_diff || tmp_rate < rate)
++ break;
++ }
+
+- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+- if (div > SAM9X5_USB_MAX_DIV + 1)
+- div = SAM9X5_USB_MAX_DIV + 1;
++ if (!best_diff)
++ break;
++ }
+
+- return DIV_ROUND_CLOSEST(*parent_rate, div);
++ return best_rate;
+ }
+
+ static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
+@@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+
+ static const struct clk_ops at91sam9x5_usb_ops = {
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+- .round_rate = at91sam9x5_clk_usb_round_rate,
++ .determine_rate = at91sam9x5_clk_usb_determine_rate,
+ .get_parent = at91sam9x5_clk_usb_get_parent,
+ .set_parent = at91sam9x5_clk_usb_set_parent,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+@@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
+ .disable = at91sam9n12_clk_usb_disable,
+ .is_enabled = at91sam9n12_clk_usb_is_enabled,
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+- .round_rate = at91sam9x5_clk_usb_round_rate,
++ .determine_rate = at91sam9x5_clk_usb_determine_rate,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+ };
+
+@@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ init.ops = &at91sam9x5_usb_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
++ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
++ CLK_SET_RATE_PARENT;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+@@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ init.ops = &at91sam9n12_usb_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+- init.flags = CLK_SET_RATE_GATE;
++ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
+index 0039bd7..466f30c 100644
+--- a/drivers/clk/qcom/clk-rcg.c
++++ b/drivers/clk/qcom/clk-rcg.c
+@@ -495,6 +495,57 @@ static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
+ return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
+ }
+
++/*
++ * This type of clock has a glitch-free mux that switches between the output of
++ * the M/N counter and an always on clock source (XO). When clk_set_rate() is
++ * called we need to make sure that we don't switch to the M/N counter if it
++ * isn't clocking because the mux will get stuck and the clock will stop
++ * outputting a clock. This can happen if the framework isn't aware that this
++ * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix
++ * this we switch the mux in the enable/disable ops and reprogram the M/N
++ * counter in the set_rate op. We also make sure to switch away from the M/N
++ * counter in set_rate if software thinks the clock is off.
++ */
++static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long parent_rate)
++{
++ struct clk_rcg *rcg = to_clk_rcg(hw);
++ const struct freq_tbl *f;
++ int ret;
++ u32 gfm = BIT(10);
++
++ f = qcom_find_freq(rcg->freq_tbl, rate);
++ if (!f)
++ return -EINVAL;
++
++ /* Switch to XO to avoid glitches */
++ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
++ ret = __clk_rcg_set_rate(rcg, f);
++ /* Switch back to M/N if it's clocking */
++ if (__clk_is_enabled(hw->clk))
++ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
++
++ return ret;
++}
++
++static int clk_rcg_lcc_enable(struct clk_hw *hw)
++{
++ struct clk_rcg *rcg = to_clk_rcg(hw);
++ u32 gfm = BIT(10);
++
++ /* Use M/N */
++ return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
++}
++
++static void clk_rcg_lcc_disable(struct clk_hw *hw)
++{
++ struct clk_rcg *rcg = to_clk_rcg(hw);
++ u32 gfm = BIT(10);
++
++ /* Use XO */
++ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
++}
++
+ static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
+ {
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+@@ -543,6 +594,17 @@ const struct clk_ops clk_rcg_bypass_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
+
++const struct clk_ops clk_rcg_lcc_ops = {
++ .enable = clk_rcg_lcc_enable,
++ .disable = clk_rcg_lcc_disable,
++ .get_parent = clk_rcg_get_parent,
++ .set_parent = clk_rcg_set_parent,
++ .recalc_rate = clk_rcg_recalc_rate,
++ .determine_rate = clk_rcg_determine_rate,
++ .set_rate = clk_rcg_lcc_set_rate,
++};
++EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops);
++
+ const struct clk_ops clk_dyn_rcg_ops = {
+ .enable = clk_enable_regmap,
+ .is_enabled = clk_is_enabled_regmap,
+diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
+index 687e41f..d09d06b 100644
+--- a/drivers/clk/qcom/clk-rcg.h
++++ b/drivers/clk/qcom/clk-rcg.h
+@@ -96,6 +96,7 @@ struct clk_rcg {
+
+ extern const struct clk_ops clk_rcg_ops;
+ extern const struct clk_ops clk_rcg_bypass_ops;
++extern const struct clk_ops clk_rcg_lcc_ops;
+
+ #define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
+
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 742acfa..381f274 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -243,7 +243,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
+- if (rcg->mnd_width && f->n)
++ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + CFG_REG, mask, cfg);
+diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
+index cbdc31d..a015bb0 100644
+--- a/drivers/clk/qcom/gcc-ipq806x.c
++++ b/drivers/clk/qcom/gcc-ipq806x.c
+@@ -525,8 +525,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = {
+ { 10800000, P_PXO, 1, 2, 5 },
+ { 15060000, P_PLL8, 1, 2, 51 },
+ { 24000000, P_PLL8, 4, 1, 4 },
++ { 25000000, P_PXO, 1, 0, 0 },
+ { 25600000, P_PLL8, 1, 1, 15 },
+- { 27000000, P_PXO, 1, 0, 0 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { }
+diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
+index c9ff27b..a6d3a67 100644
+--- a/drivers/clk/qcom/lcc-ipq806x.c
++++ b/drivers/clk/qcom/lcc-ipq806x.c
+@@ -294,14 +294,14 @@ static struct clk_regmap_mux pcm_clk = {
+ };
+
+ static struct freq_tbl clk_tbl_aif_osr[] = {
+- { 22050, P_PLL4, 1, 147, 20480 },
+- { 32000, P_PLL4, 1, 1, 96 },
+- { 44100, P_PLL4, 1, 147, 10240 },
+- { 48000, P_PLL4, 1, 1, 64 },
+- { 88200, P_PLL4, 1, 147, 5120 },
+- { 96000, P_PLL4, 1, 1, 32 },
+- { 176400, P_PLL4, 1, 147, 2560 },
+- { 192000, P_PLL4, 1, 1, 16 },
++ { 2822400, P_PLL4, 1, 147, 20480 },
++ { 4096000, P_PLL4, 1, 1, 96 },
++ { 5644800, P_PLL4, 1, 147, 10240 },
++ { 6144000, P_PLL4, 1, 1, 64 },
++ { 11289600, P_PLL4, 1, 147, 5120 },
++ { 12288000, P_PLL4, 1, 1, 32 },
++ { 22579200, P_PLL4, 1, 147, 2560 },
++ { 24576000, P_PLL4, 1, 1, 16 },
+ { },
+ };
+
+@@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = {
+ };
+
+ static struct freq_tbl clk_tbl_ahbix[] = {
+- { 131072, P_PLL4, 1, 1, 3 },
++ { 131072000, P_PLL4, 1, 1, 3 },
+ { },
+ };
+
+@@ -386,13 +386,12 @@ static struct clk_rcg ahbix_clk = {
+ .freq_tbl = clk_tbl_ahbix,
+ .clkr = {
+ .enable_reg = 0x38,
+- .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
++ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "ahbix",
+ .parent_names = lcc_pxo_pll4,
+ .num_parents = 2,
+- .ops = &clk_rcg_ops,
+- .flags = CLK_SET_RATE_GATE,
++ .ops = &clk_rcg_lcc_ops,
+ },
+ },
+ };
+diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
+index 51462e8..714d6ba 100644
+--- a/drivers/clk/samsung/clk-exynos4.c
++++ b/drivers/clk/samsung/clk-exynos4.c
+@@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
+ VPLL_LOCK, VPLL_CON0, NULL),
+ };
+
+-static void __init exynos4_core_down_clock(enum exynos4_soc soc)
++static void __init exynos4x12_core_down_clock(void)
+ {
+ unsigned int tmp;
+
+@@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc)
+ __raw_writel(tmp, reg_base + PWR_CTRL1);
+
+ /*
+- * Disable the clock up feature on Exynos4x12, in case it was
+- * enabled by bootloader.
++ * Disable the clock up feature in case it was enabled by bootloader.
+ */
+- if (exynos4_soc == EXYNOS4X12)
+- __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
++ __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
+ }
+
+ /* register exynos4 clocks */
+@@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np,
+ samsung_clk_register_alias(ctx, exynos4_aliases,
+ ARRAY_SIZE(exynos4_aliases));
+
+- exynos4_core_down_clock(soc);
++ if (soc == EXYNOS4X12)
++ exynos4x12_core_down_clock();
+ exynos4_clk_sleep_init();
+
+ samsung_clk_of_add_provider(np, ctx);
+diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
+index 9a893f2..23ce0af 100644
+--- a/drivers/clk/tegra/clk-tegra124.c
++++ b/drivers/clk/tegra/clk-tegra124.c
+@@ -1110,16 +1110,18 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
+ 1, 2);
+ clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
+
+- clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
++ clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
+ clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
+- clks[TEGRA124_CLK_PLLD_DSI] = clk;
++ clks[TEGRA124_CLK_PLL_D_DSI_OUT] = clk;
+
+- clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
+- 0, 48, periph_clk_enb_refcnt);
++ clk = tegra_clk_register_periph_gate("dsia", "pll_d_dsi_out", 0,
++ clk_base, 0, 48,
++ periph_clk_enb_refcnt);
+ clks[TEGRA124_CLK_DSIA] = clk;
+
+- clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
+- 0, 82, periph_clk_enb_refcnt);
++ clk = tegra_clk_register_periph_gate("dsib", "pll_d_dsi_out", 0,
++ clk_base, 0, 82,
++ periph_clk_enb_refcnt);
+ clks[TEGRA124_CLK_DSIB] = clk;
+
+ /* emc mux */
+diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
+index 9ddb754..7a1df61 100644
+--- a/drivers/clk/tegra/clk.c
++++ b/drivers/clk/tegra/clk.c
+@@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np)
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ rst_ctlr.of_node = np;
+- rst_ctlr.nr_resets = clk_num * 32;
++ rst_ctlr.nr_resets = periph_banks * 32;
+ reset_controller_register(&rst_ctlr);
+ }
+
+diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
+index 42f95a4..9a28b7e 100644
+--- a/drivers/crypto/omap-aes.c
++++ b/drivers/crypto/omap-aes.c
+@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
+ return err;
+ }
+
+-static int omap_aes_check_aligned(struct scatterlist *sg)
++static int omap_aes_check_aligned(struct scatterlist *sg, int total)
+ {
++ int len = 0;
++
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, 4))
+ return -1;
+ if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+ return -1;
++
++ len += sg->length;
+ sg = sg_next(sg);
+ }
++
++ if (len != total)
++ return -1;
++
+ return 0;
+ }
+
+@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+ dd->in_sg = req->src;
+ dd->out_sg = req->dst;
+
+- if (omap_aes_check_aligned(dd->in_sg) ||
+- omap_aes_check_aligned(dd->out_sg)) {
++ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
++ omap_aes_check_aligned(dd->out_sg, dd->total)) {
+ if (omap_aes_copy_sgs(dd))
+ pr_err("Failed to copy SGs for unaligned cases\n");
+ dd->sgs_copied = 1;
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index d0bc123..1a54205 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -320,11 +320,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache &= ~mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++ ct->mask_cache_priv &= ~mask;
++
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -332,11 +334,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache |= mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++ ct->mask_cache_priv |= mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -344,11 +348,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache &= ~mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++ ct->mask_cache_priv &= ~mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -356,11 +362,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache |= mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++ ct->mask_cache_priv |= mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
+index bf17a60..1dbfba5 100644
+--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
++++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
+@@ -32,10 +32,16 @@
+ #include <drm/bridge/ptn3460.h>
+
+ #include "exynos_dp_core.h"
++#include "exynos_drm_fimd.h"
+
+ #define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
+ connector)
+
++static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
++{
++ return to_exynos_crtc(dp->encoder->crtc);
++}
++
+ static inline struct exynos_dp_device *
+ display_to_dp(struct exynos_drm_display *d)
+ {
+@@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
+ }
+ }
+
++ fimd_dp_clock_enable(dp_to_crtc(dp), true);
++
+ clk_prepare_enable(dp->clock);
+ exynos_dp_phy_init(dp);
+ exynos_dp_init_dp(dp);
+@@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+ exynos_dp_phy_exit(dp);
+ clk_disable_unprepare(dp->clock);
+
++ fimd_dp_clock_enable(dp_to_crtc(dp), false);
++
+ if (dp->panel) {
+ if (drm_panel_unprepare(dp->panel))
+ DRM_ERROR("failed to turnoff the panel\n");
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+index 33a10ce..5d58f6c 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+@@ -32,6 +32,7 @@
+ #include "exynos_drm_fbdev.h"
+ #include "exynos_drm_crtc.h"
+ #include "exynos_drm_iommu.h"
++#include "exynos_drm_fimd.h"
+
+ /*
+ * FIMD stands for Fully Interactive Mobile Display and
+@@ -1233,6 +1234,24 @@ static int fimd_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
++{
++ struct fimd_context *ctx = crtc->ctx;
++ u32 val;
++
++ /*
++ * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
++ * clock. On these SoCs the bootloader may enable it but any
++ * power domain off/on will reset it to disable state.
++ */
++ if (ctx->driver_data != &exynos5_fimd_driver_data)
++ return;
++
++ val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
++ writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
++}
++EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
++
+ struct platform_driver fimd_driver = {
+ .probe = fimd_probe,
+ .remove = fimd_remove,
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
+new file mode 100644
+index 0000000..b4fcaa5
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
+@@ -0,0 +1,15 @@
++/*
++ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#ifndef _EXYNOS_DRM_FIMD_H_
++#define _EXYNOS_DRM_FIMD_H_
++
++extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
++
++#endif /* _EXYNOS_DRM_FIMD_H_ */
+diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
+index fa140e0..60ab1f7 100644
+--- a/drivers/gpu/drm/i2c/adv7511.c
++++ b/drivers/gpu/drm/i2c/adv7511.c
+@@ -33,6 +33,7 @@ struct adv7511 {
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
++ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_encoder *encoder;
+@@ -379,69 +380,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
+ return false;
+ }
+
+-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+-{
+- struct adv7511 *adv7511 = devid;
+-
+- if (adv7511_hpd(adv7511))
+- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+-
+- wake_up_all(&adv7511->wq);
+-
+- return IRQ_HANDLED;
+-}
+-
+-static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
+- unsigned int irq)
++static int adv7511_irq_process(struct adv7511 *adv7511)
+ {
+ unsigned int irq0, irq1;
+- unsigned int pending;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+- return 0;
++ return ret;
++
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+ if (ret < 0)
+- return 0;
++ return ret;
++
++ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
++ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
++
++ if (irq0 & ADV7511_INT0_HDP)
++ drm_helper_hpd_irq_event(adv7511->encoder->dev);
++
++ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
++ adv7511->edid_read = true;
++
++ if (adv7511->i2c_main->irq)
++ wake_up_all(&adv7511->wq);
++ }
++
++ return 0;
++}
+
+- pending = (irq1 << 8) | irq0;
++static irqreturn_t adv7511_irq_handler(int irq, void *devid)
++{
++ struct adv7511 *adv7511 = devid;
++ int ret;
+
+- return pending & irq;
++ ret = adv7511_irq_process(adv7511);
++ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+ }
+
+-static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
+- int timeout)
++/* -----------------------------------------------------------------------------
++ * EDID retrieval
++ */
++
++static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
+ {
+- unsigned int pending;
+ int ret;
+
+ if (adv7511->i2c_main->irq) {
+ ret = wait_event_interruptible_timeout(adv7511->wq,
+- adv7511_is_interrupt_pending(adv7511, irq),
+- msecs_to_jiffies(timeout));
+- if (ret <= 0)
+- return 0;
+- pending = adv7511_is_interrupt_pending(adv7511, irq);
++ adv7511->edid_read, msecs_to_jiffies(timeout));
+ } else {
+- if (timeout < 25)
+- timeout = 25;
+- do {
+- pending = adv7511_is_interrupt_pending(adv7511, irq);
+- if (pending)
++ for (; timeout > 0; timeout -= 25) {
++ ret = adv7511_irq_process(adv7511);
++ if (ret < 0)
+ break;
++
++ if (adv7511->edid_read)
++ break;
++
+ msleep(25);
+- timeout -= 25;
+- } while (timeout >= 25);
++ }
+ }
+
+- return pending;
++ return adv7511->edid_read ? 0 : -EIO;
+ }
+
+-/* -----------------------------------------------------------------------------
+- * EDID retrieval
+- */
+-
+ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
+ {
+@@ -463,19 +466,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ return ret;
+
+ if (status != 2) {
++ adv7511->edid_read = false;
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+ block);
+- ret = adv7511_wait_for_interrupt(adv7511,
+- ADV7511_INT0_EDID_READY |
+- ADV7511_INT1_DDC_ERROR, 200);
+-
+- if (!(ret & ADV7511_INT0_EDID_READY))
+- return -EIO;
++ ret = adv7511_wait_for_edid(adv7511, 200);
++ if (ret < 0)
++ return ret;
+ }
+
+- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+-
+ /* Break this apart, hopefully more I2C controllers will
+ * support 64 byte transfers than 256 byte transfers
+ */
+@@ -528,7 +526,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
+ /* Reading the EDID only works if the device is powered */
+ if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
++ ADV7511_INT0_EDID_READY);
++ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
++ ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ adv7511->current_edid_segment = -1;
+@@ -563,7 +563,9 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+ adv7511->current_edid_segment = -1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
++ ADV7511_INT0_EDID_READY);
++ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
++ ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ /*
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 5c66b56..ec4d932 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1042,7 +1042,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+ s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
+
+ s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+- s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
++ s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
+
+ s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
+ s->ecochk = I915_READ(GAM_ECOCHK);
+@@ -1124,7 +1124,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
+
+ I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
++ I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+ I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
+ I915_WRITE(GAM_ECOCHK, s->ecochk);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index ede5bbb..07320cb 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -3718,14 +3718,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
++ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+@@ -3887,14 +3885,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
++ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 33b3d0a2..f536ff2 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1740,6 +1740,7 @@ enum punit_power_well {
+ #define GMBUS_CYCLE_INDEX (2<<25)
+ #define GMBUS_CYCLE_STOP (4<<25)
+ #define GMBUS_BYTE_COUNT_SHIFT 16
++#define GMBUS_BYTE_COUNT_MAX 256U
+ #define GMBUS_SLAVE_INDEX_SHIFT 8
+ #define GMBUS_SLAVE_ADDR_SHIFT 1
+ #define GMBUS_SLAVE_READ (1<<0)
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index b31088a..56e437e 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
+ }
+
+ static int
+-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+- u32 gmbus1_index)
++gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
++ unsigned short addr, u8 *buf, unsigned int len,
++ u32 gmbus1_index)
+ {
+ int reg_offset = dev_priv->gpio_mmio_base;
+- u16 len = msg->len;
+- u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+@@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ }
+
+ static int
+-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
++ u32 gmbus1_index)
+ {
+- int reg_offset = dev_priv->gpio_mmio_base;
+- u16 len = msg->len;
+ u8 *buf = msg->buf;
++ unsigned int rx_size = msg->len;
++ unsigned int len;
++ int ret;
++
++ do {
++ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
++
++ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
++ buf, len, gmbus1_index);
++ if (ret)
++ return ret;
++
++ rx_size -= len;
++ buf += len;
++ } while (rx_size != 0);
++
++ return 0;
++}
++
++static int
++gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
++ unsigned short addr, u8 *buf, unsigned int len)
++{
++ int reg_offset = dev_priv->gpio_mmio_base;
++ unsigned int chunk_size = len;
+ u32 val, loop;
+
+ val = loop = 0;
+@@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
++ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+@@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ if (ret)
+ return ret;
+ }
++
++ return 0;
++}
++
++static int
++gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++{
++ u8 *buf = msg->buf;
++ unsigned int tx_size = msg->len;
++ unsigned int len;
++ int ret;
++
++ do {
++ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
++
++ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
++ if (ret)
++ return ret;
++
++ buf += len;
++ tx_size -= len;
++ } while (tx_size != 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 86807ee..9bd5611 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 9c47867..7fe5590 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -459,6 +459,10 @@
+ #define USB_DEVICE_ID_UGCI_FLYING 0x0020
+ #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
+
++#define USB_VENDOR_ID_HP 0x03f0
++#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE 0x0a4a
++#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
++
+ #define USB_VENDOR_ID_HUION 0x256c
+ #define USB_DEVICE_ID_HUION_TABLET 0x006e
+
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index a821277..4e3ae9f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -78,6 +78,8 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 2978f5e..00bc30e 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+- goto error0;
++ goto error_gpadl;
+ }
+
+ init_completion(&open_info->waitevent);
+@@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ err = -EINVAL;
+- goto error0;
++ goto error_gpadl;
+ }
+
+ if (userdatalen)
+@@ -195,6 +195,9 @@ error1:
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
++error_gpadl:
++ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
++
+ error0:
+ free_pages((unsigned long)out,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index 5f96b1b..019d542 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -833,7 +833,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
+ clk_disable(i2c->clk);
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+- return ret;
++ return ret < 0 ? ret : num;
+ }
+
+ static u32 rk3x_i2c_func(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index edf274c..8143162 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
+ adap->bus_recovery_info->set_scl(adap, 1);
+ return i2c_generic_recovery(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
+
+ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+ {
+@@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
+
+ int i2c_recover_bus(struct i2c_adapter *adap)
+ {
+@@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
+ dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
+ return adap->bus_recovery_info->recover_bus(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_recover_bus);
+
+ static int i2c_device_probe(struct device *dev)
+ {
+@@ -1410,6 +1413,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
+
+ dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
+
++ pm_runtime_no_callbacks(&adap->dev);
++
+ #ifdef CONFIG_I2C_COMPAT
+ res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
+ adap->dev.parent);
+diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
+index 593f7ca..06cc1ff 100644
+--- a/drivers/i2c/i2c-mux.c
++++ b/drivers/i2c/i2c-mux.c
+@@ -32,8 +32,9 @@ struct i2c_mux_priv {
+ struct i2c_algorithm algo;
+
+ struct i2c_adapter *parent;
+- void *mux_priv; /* the mux chip/device */
+- u32 chan_id; /* the channel id */
++ struct device *mux_dev;
++ void *mux_priv;
++ u32 chan_id;
+
+ int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+ int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+@@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
+
+ /* Set up private adapter data */
+ priv->parent = parent;
++ priv->mux_dev = mux_dev;
+ priv->mux_priv = mux_priv;
+ priv->chan_id = chan_id;
+ priv->select = select;
+@@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap)
+ char symlink_name[20];
+
+ snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
+- sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
++ sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
+
+ sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
+ i2c_del_adapter(adap);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index b0e5852..44d1d79 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -218,18 +218,10 @@ static struct cpuidle_state byt_cstates[] = {
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+- .name = "C1E-BYT",
+- .desc = "MWAIT 0x01",
+- .flags = MWAIT2flg(0x01),
+- .exit_latency = 15,
+- .target_residency = 30,
+- .enter = &intel_idle,
+- .enter_freeze = intel_idle_freeze, },
+- {
+ .name = "C6N-BYT",
+ .desc = "MWAIT 0x58",
+ .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
+- .exit_latency = 40,
++ .exit_latency = 300,
+ .target_residency = 275,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+@@ -237,7 +229,7 @@ static struct cpuidle_state byt_cstates[] = {
+ .name = "C6S-BYT",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+- .exit_latency = 140,
++ .exit_latency = 500,
+ .target_residency = 560,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+@@ -246,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+- .target_residency = 1500,
++ .target_residency = 4000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 8c014b5..38acb3c 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+
++ if (!size)
++ return ERR_PTR(-EINVAL);
++
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+- if ((PAGE_ALIGN(addr + size) <= size) ||
+- (PAGE_ALIGN(addr + size) <= addr))
++ if (((addr + size) < addr) ||
++ PAGE_ALIGN(addr + size) < (addr + size))
+ return ERR_PTR(-EINVAL);
+
+ if (!can_do_mlock())
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index ed2bd67..fbde33a 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2605,8 +2605,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
+
+ memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+
+- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+- wr->wr.ud.hlen);
++ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
+ *lso_seg_len = halign;
+ return 0;
+ }
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 20e859a..76eb57b 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn,
+ if (scsi_prot_sg_count(sc)) {
+ prot_buf->buf = scsi_prot_sglist(sc);
+ prot_buf->size = scsi_prot_sg_count(sc);
+- prot_buf->data_len = data_buf->data_len >>
+- ilog2(sc->device->sector_size) * 8;
++ prot_buf->data_len = (data_buf->data_len >>
++ ilog2(sc->device->sector_size)) * 8;
+ }
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 075b19c..147029a 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -222,7 +222,7 @@ fail:
+ static void
+ isert_free_rx_descriptors(struct isert_conn *isert_conn)
+ {
+- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
++ struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
+ struct iser_rx_desc *rx_desc;
+ int i;
+
+@@ -719,8 +719,8 @@ out:
+ static void
+ isert_connect_release(struct isert_conn *isert_conn)
+ {
+- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->conn_device;
++ struct ib_device *ib_dev = device->ib_device;
+
+ isert_dbg("conn %p\n", isert_conn);
+
+@@ -728,7 +728,8 @@ isert_connect_release(struct isert_conn *isert_conn)
+ isert_conn_free_fastreg_pool(isert_conn);
+
+ isert_free_rx_descriptors(isert_conn);
+- rdma_destroy_id(isert_conn->conn_cm_id);
++ if (isert_conn->conn_cm_id)
++ rdma_destroy_id(isert_conn->conn_cm_id);
+
+ if (isert_conn->conn_qp) {
+ struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
+@@ -878,12 +879,15 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ return 0;
+ }
+
+-static void
++static int
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
++ isert_conn->conn_cm_id = NULL;
+ isert_put_conn(isert_conn);
++
++ return -1;
+ }
+
+ static int
+@@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+- isert_connect_error(cma_id);
++ ret = isert_connect_error(cma_id);
+ break;
+ default:
+ isert_err("Unhandled RDMA CMA event: %d\n", event->event);
+@@ -1861,11 +1865,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+
+- if (ret)
++ if (ret) {
++ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->pi_err, 0);
+- else
++ } else {
+ target_execute_cmd(se_cmd);
++ }
+ }
+
+ static void
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 27bcdbc..ea6cb64 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -1159,13 +1159,14 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+ bool report_buttons)
+ {
+ struct alps_data *priv = psmouse->private;
+- struct input_dev *dev;
++ struct input_dev *dev, *dev2 = NULL;
+
+ /* Figure out which device to use to report the bare packet */
+ if (priv->proto_version == ALPS_PROTO_V2 &&
+ (priv->flags & ALPS_DUALPOINT)) {
+ /* On V2 devices the DualPoint Stick reports bare packets */
+ dev = priv->dev2;
++ dev2 = psmouse->dev;
+ } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
+ /* Register dev3 mouse if we received PS/2 packet first time */
+ if (!IS_ERR(priv->dev3))
+@@ -1177,7 +1178,7 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+ }
+
+ if (report_buttons)
+- alps_report_buttons(dev, NULL,
++ alps_report_buttons(dev, dev2,
+ packet[0] & 1, packet[0] & 2, packet[0] & 4);
+
+ input_report_rel(dev, REL_X,
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 6e22682..991dc6b 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
+ }
+
+ /*
++ * This writes the reg_07 value again to the hardware at the end of every
++ * set_rate call because the register loses its value. reg_07 allows setting
++ * absolute mode on v4 hardware
++ */
++static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
++ unsigned int rate)
++{
++ struct elantech_data *etd = psmouse->private;
++
++ etd->original_set_rate(psmouse, rate);
++ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
++ psmouse_err(psmouse, "restoring reg_07 failed\n");
++}
++
++/*
+ * Put the touchpad into absolute mode
+ */
+ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+@@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
+ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
+ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
++ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
++ * Asus X750JN 0x381f17 10, 14, 0e clickpad
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+@@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse)
+ goto init_fail;
+ }
+
++ if (etd->fw_version == 0x381f17) {
++ etd->original_set_rate = psmouse->set_rate;
++ psmouse->set_rate = elantech_set_rate_restore_reg_07;
++ }
++
+ if (elantech_set_input_params(psmouse)) {
+ psmouse_err(psmouse, "failed to query touchpad range.\n");
+ goto init_fail;
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 6f3afec..f965d15 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -142,6 +142,7 @@ struct elantech_data {
+ struct finger_pos mt[ETP_MAX_FINGERS];
+ unsigned char parity[256];
+ int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
++ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
+ };
+
+ #ifdef CONFIG_MOUSE_PS2_ELANTECH
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 713a962..41473929 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -925,11 +925,10 @@ static int crypt_convert(struct crypt_config *cc,
+
+ switch (r) {
+ /* async */
++ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
+- /* fall through*/
+- case -EINPROGRESS:
+ ctx->req = NULL;
+ ctx->cc_sector++;
+ continue;
+@@ -1346,10 +1345,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
+ struct crypt_config *cc = io->cc;
+
+- if (error == -EINPROGRESS) {
+- complete(&ctx->restart);
++ if (error == -EINPROGRESS)
+ return;
+- }
+
+ if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+@@ -1360,12 +1357,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
+
+ if (!atomic_dec_and_test(&ctx->cc_pending))
+- return;
++ goto done;
+
+ if (bio_data_dir(io->base_bio) == READ)
+ kcryptd_crypt_read_done(io);
+ else
+ kcryptd_crypt_write_io_submit(io, 1);
++done:
++ if (!completion_done(&ctx->restart))
++ complete(&ctx->restart);
+ }
+
+ static void kcryptd_crypt(struct work_struct *work)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 717daad..e617878 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -249,6 +249,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ const int rw = bio_data_dir(bio);
+ struct mddev *mddev = q->queuedata;
+ unsigned int sectors;
++ int cpu;
+
+ if (mddev == NULL || mddev->pers == NULL
+ || !mddev->ready) {
+@@ -284,7 +285,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ sectors = bio_sectors(bio);
+ mddev->pers->make_request(mddev, bio);
+
+- generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
++ cpu = part_stat_lock();
++ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
++ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
++ part_stat_unlock();
+
+ if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+ wake_up(&mddev->sb_wait);
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 3ed9f42..3b5d7f7 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -313,7 +313,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
+
+ /*
+ * remaps the bio to the target device. we separate two flows.
+- * power 2 flow and a general flow for the sake of perfromance
++ * power 2 flow and a general flow for the sake of performance
+ */
+ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
+ sector_t sector, sector_t *sector_offset)
+@@ -524,6 +524,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
+ split = bio;
+ }
+
++ sector = bio->bi_iter.bi_sector;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
+index 77c78de..7020659 100644
+--- a/drivers/media/rc/img-ir/img-ir-core.c
++++ b/drivers/media/rc/img-ir/img-ir-core.c
+@@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev)
+ {
+ struct img_ir_priv *priv = platform_get_drvdata(pdev);
+
+- free_irq(priv->irq, img_ir_isr);
++ free_irq(priv->irq, priv);
+ img_ir_remove_hw(priv);
+ img_ir_remove_raw(priv);
+
+diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
+index 65a326c..749ad56 100644
+--- a/drivers/media/usb/stk1160/stk1160-v4l.c
++++ b/drivers/media/usb/stk1160/stk1160-v4l.c
+@@ -240,6 +240,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
+ if (mutex_lock_interruptible(&dev->v4l_lock))
+ return -ERESTARTSYS;
+
++ /*
++ * Once URBs are cancelled, the URB complete handler
++ * won't be running. This is required to safely release the
++ * current buffer (dev->isoc_ctl.buf).
++ */
+ stk1160_cancel_isoc(dev);
+
+ /*
+@@ -620,8 +625,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
+ stk1160_info("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
+ }
+- /* It's important to clear current buffer */
+- dev->isoc_ctl.buf = NULL;
++
++ /* It's important to release the current buffer */
++ if (dev->isoc_ctl.buf) {
++ buf = dev->isoc_ctl.buf;
++ dev->isoc_ctl.buf = NULL;
++
++ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
++ stk1160_info("buffer [%p/%d] aborted\n",
++ buf, buf->vb.v4l2_buf.index);
++ }
+ spin_unlock_irqrestore(&dev->buf_lock, flags);
+ }
+
+diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
+index fc145d2..922a750 100644
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+
+ if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
+ if (msb->data_dir == READ) {
+- for (cnt = 0; cnt < msb->current_seg; cnt++)
++ for (cnt = 0; cnt < msb->current_seg; cnt++) {
+ t_len += msb->req_sg[cnt].length
+ / msb->page_size;
+
+@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+ t_len += msb->current_page - 1;
+
+ t_len *= msb->page_size;
++ }
+ }
+ } else
+ t_len = blk_rq_bytes(msb->block_req);
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 2a87f69..1aed3b7 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id,
+ int platform_id;
+ int r;
+
+- if (id < 0)
++ if (id == PLATFORM_DEVID_AUTO)
+ platform_id = id;
+ else
+ platform_id = id + cell->id;
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index e8a4218..459ed1b 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -930,7 +930,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
+ return PTR_ERR(host->clk_sample);
+ }
+
+- host->reset = devm_reset_control_get(&pdev->dev, "ahb");
++ host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
++ if (PTR_ERR(host->reset) == -EPROBE_DEFER)
++ return PTR_ERR(host->reset);
+
+ ret = clk_prepare_enable(host->clk_ahb);
+ if (ret) {
+diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
+index a31c357..dba7e1c 100644
+--- a/drivers/mmc/host/tmio_mmc_pio.c
++++ b/drivers/mmc/host/tmio_mmc_pio.c
+@@ -1073,8 +1073,6 @@ EXPORT_SYMBOL(tmio_mmc_host_alloc);
+ void tmio_mmc_host_free(struct tmio_mmc_host *host)
+ {
+ mmc_free_host(host->mmc);
+-
+- host->mmc = NULL;
+ }
+ EXPORT_SYMBOL(tmio_mmc_host_free);
+
+diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
+index 9d2e16f..b5e1548 100644
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+- bitflips = !!err;
++ bitflips |= !!err;
+ }
+ mutex_unlock(&ubi->buf_mutex);
+
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index d647e50..d16fccf 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ /* Validate the request */
+ err = -EINVAL;
+ if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
++ req.bytes < 0 || req.bytes > vol->usable_leb_size)
+ break;
+
+ err = get_exclusive(desc);
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 16e34b3..8c9a710 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1419,7 +1419,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ * during re-size.
+ */
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+- vol->eba_tbl[aeb->lnum] = aeb->pnum;
++ else
++ vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ }
+ }
+
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 8f7bde6..0bd92d8 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1002,7 +1002,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int shutdown)
+ {
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+- int vol_id = -1, uninitialized_var(lnum);
++ int vol_id = -1, lnum = -1;
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+ #endif
+diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
+index 81d4153..77bf133 100644
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -2165,7 +2165,7 @@ static void macb_configure_caps(struct macb *bp)
+ }
+ }
+
+- if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
++ if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) >= 0x2)
+ bp->caps |= MACB_CAPS_MACB_IS_GEM;
+
+ if (macb_is_gem(bp)) {
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 7f997d3..a71c446 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
++static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
++ struct e1000_rx_ring *rx_ring,
++ int cleaned_count)
++{
++}
+ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+@@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ msleep(1);
+ /* e1000_down has a dependency on max_frame_size */
+ hw->max_frame_size = max_frame;
+- if (netif_running(netdev))
++ if (netif_running(netdev)) {
++ /* prevent buffers from being reallocated */
++ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
+ e1000_down(adapter);
++ }
+
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
+index af829c5..7ace07d 100644
+--- a/drivers/net/ethernet/marvell/pxa168_eth.c
++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
+@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "missing phy-handle\n");
+- return -EINVAL;
++ err = -EINVAL;
++ goto err_netdev;
+ }
+ of_property_read_u32(np, "reg", &pep->phy_addr);
+ pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
+@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
+ pep->smi_bus = mdiobus_alloc();
+ if (pep->smi_bus == NULL) {
+ err = -ENOMEM;
+- goto err_base;
++ goto err_netdev;
+ }
+ pep->smi_bus->priv = pep;
+ pep->smi_bus->name = "pxa168_eth smi";
+@@ -1551,13 +1552,10 @@ err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+ err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+-err_base:
+- iounmap(pep->base);
+ err_netdev:
+ free_netdev(dev);
+ err_clk:
+- clk_disable(clk);
+- clk_put(clk);
++ clk_disable_unprepare(clk);
+ return err;
+ }
+
+@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
+ if (pep->phy)
+ phy_disconnect(pep->phy);
+ if (pep->clk) {
+- clk_disable(pep->clk);
+- clk_put(pep->clk);
+- pep->clk = NULL;
++ clk_disable_unprepare(pep->clk);
+ }
+
+- iounmap(pep->base);
+- pep->base = NULL;
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
+ unregister_netdev(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index a7b58ba..3dccf01 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -981,20 +981,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ /* check if requested function is supported by the device */
+- if ((hfunc == ETH_RSS_HASH_TOP &&
+- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
+- (hfunc == ETH_RSS_HASH_XOR &&
+- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
+- return -EINVAL;
++ if (hfunc == ETH_RSS_HASH_TOP) {
++ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
++ return -EINVAL;
++ if (!(dev->features & NETIF_F_RXHASH))
++ en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
++ return 0;
++ } else if (hfunc == ETH_RSS_HASH_XOR) {
++ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
++ return -EINVAL;
++ if (dev->features & NETIF_F_RXHASH)
++ en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
++ return 0;
++ }
+
+- priv->rss_hash_fn = hfunc;
+- if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
+- en_warn(priv,
+- "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+- if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
+- en_warn(priv,
+- "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+- return 0;
++ return -EINVAL;
+ }
+
+ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
+@@ -1068,6 +1069,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+ priv->prof->rss_rings = rss_rings;
+ if (key)
+ memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
++ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
++ priv->rss_hash_fn = hfunc;
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index af034db..9d15566 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+ {
+ /* note: a 0-length skb is used as an error indication */
+ if (skb->len > 0) {
++ skb_checksum_complete_unset(skb);
+ #ifdef CONFIG_PPP_MULTILINK
+ /* XXX do channel-level decompression here */
+ if (PPP_PROTO(skb) == PPP_MP)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 90a714c..23806c2 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
++ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
+index c93fae9..5fbd223 100644
+--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
++++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
+@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
+
+-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
++WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
+
+ WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
+ AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
+index 0f2cfb0..bf14676 100644
+--- a/drivers/net/wireless/ti/wlcore/debugfs.h
++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
+@@ -26,8 +26,8 @@
+
+ #include "wlcore.h"
+
+-int wl1271_format_buffer(char __user *userbuf, size_t count,
+- loff_t *ppos, char *fmt, ...);
++__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
++ loff_t *ppos, char *fmt, ...);
+
+ int wl1271_debugfs_init(struct wl1271 *wl);
+ void wl1271_debugfs_exit(struct wl1271 *wl);
+diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
+index eb88693..7b53a5c 100644
+--- a/drivers/nfc/st21nfcb/i2c.c
++++ b/drivers/nfc/st21nfcb/i2c.c
+@@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+ return phy->ndlc->hard_fault;
+
+ r = i2c_master_send(client, skb->data, skb->len);
+- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
++ if (r < 0) { /* Retry, chip was in standby */
+ usleep_range(1000, 4000);
+ r = i2c_master_send(client, skb->data, skb->len);
+ }
+@@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
+ struct i2c_client *client = phy->i2c_dev;
+
+ r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
++ if (r < 0) { /* Retry, chip was in standby */
+ usleep_range(1000, 4000);
+ r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+ }
+diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
+index 15c0fab..bceb30b 100644
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -1026,9 +1026,9 @@ static int compal_probe(struct platform_device *pdev)
+ if (err)
+ return err;
+
+- hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+- "compal", data,
+- compal_hwmon_groups);
++ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
++ "compal", data,
++ compal_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto remove;
+@@ -1036,7 +1036,9 @@ static int compal_probe(struct platform_device *pdev)
+
+ /* Power supply */
+ initialize_power_supply_data(data);
+- power_supply_register(&compal_device->dev, &data->psy);
++ err = power_supply_register(&compal_device->dev, &data->psy);
++ if (err < 0)
++ goto remove;
+
+ platform_set_drvdata(pdev, data);
+
+diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
+index 9d69460..96b15e0 100644
+--- a/drivers/power/ipaq_micro_battery.c
++++ b/drivers/power/ipaq_micro_battery.c
+@@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = {
+ static int micro_batt_probe(struct platform_device *pdev)
+ {
+ struct micro_battery *mb;
++ int ret;
+
+ mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+@@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev)
+
+ mb->micro = dev_get_drvdata(pdev->dev.parent);
+ mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
++ if (!mb->wq)
++ return -ENOMEM;
++
+ INIT_DELAYED_WORK(&mb->update, micro_battery_work);
+ platform_set_drvdata(pdev, mb);
+ queue_delayed_work(mb->wq, &mb->update, 1);
+- power_supply_register(&pdev->dev, &micro_batt_power);
+- power_supply_register(&pdev->dev, &micro_ac_power);
++
++ ret = power_supply_register(&pdev->dev, &micro_batt_power);
++ if (ret < 0)
++ goto batt_err;
++
++ ret = power_supply_register(&pdev->dev, &micro_ac_power);
++ if (ret < 0)
++ goto ac_err;
+
+ dev_info(&pdev->dev, "iPAQ micro battery driver\n");
+ return 0;
++
++ac_err:
++ power_supply_unregister(&micro_ac_power);
++batt_err:
++ cancel_delayed_work_sync(&mb->update);
++ destroy_workqueue(mb->wq);
++ return ret;
+ }
+
+ static int micro_batt_remove(struct platform_device *pdev)
+@@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev)
+ power_supply_unregister(&micro_ac_power);
+ power_supply_unregister(&micro_batt_power);
+ cancel_delayed_work_sync(&mb->update);
++ destroy_workqueue(mb->wq);
+
+ return 0;
+ }
+diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
+index 21fc233..176dab2 100644
+--- a/drivers/power/lp8788-charger.c
++++ b/drivers/power/lp8788-charger.c
+@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
+ pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
+ pchg->battery.get_property = lp8788_battery_get_property;
+
+- if (power_supply_register(&pdev->dev, &pchg->battery))
++ if (power_supply_register(&pdev->dev, &pchg->battery)) {
++ power_supply_unregister(&pchg->charger);
+ return -EPERM;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
+index 7ef445a..cf90760 100644
+--- a/drivers/power/twl4030_madc_battery.c
++++ b/drivers/power/twl4030_madc_battery.c
+@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+ {
+ struct twl4030_madc_battery *twl4030_madc_bat;
+ struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
++ int ret = 0;
+
+ twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ if (!twl4030_madc_bat)
+@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+
+ twl4030_madc_bat->pdata = pdata;
+ platform_set_drvdata(pdev, twl4030_madc_bat);
+- power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++ ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++ if (ret < 0)
++ kfree(twl4030_madc_bat);
+
+- return 0;
++ return ret;
+ }
+
+ static int twl4030_madc_battery_remove(struct platform_device *pdev)
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 675b5e7..5a0800d 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -1584,11 +1584,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
+ fp_possible = io_info.fpOkForIo;
+ }
+
+- /* Use smp_processor_id() for now until cmd->request->cpu is CPU
++ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
+ id by default, not CPU group id, otherwise all MSI-X queues won't
+ be utilized */
+ cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
+- smp_processor_id() % instance->msix_vectors : 0;
++ raw_smp_processor_id() % instance->msix_vectors : 0;
+
+ if (fp_possible) {
+ megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
+@@ -1693,7 +1693,10 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+- instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
++ instance->msix_vectors ?
++ raw_smp_processor_id() %
++ instance->msix_vectors :
++ 0;
+ os_timeout_value = scmd->request->timeout / HZ;
+
+ if (instance->secure_jbod_support &&
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 2d5ab6d..454536c 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei)
+ {
+- struct sas_ha_struct *sha = mvi->sas;
+ struct sas_task *task = tei->task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct asd_sas_port *sas_port = dev->port;
+- struct sas_phy *sphy = dev->phy;
+- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
+ struct mvs_slot_info *slot;
+ void *buf_prd;
+ u32 tag = tei->tag, hdr_tag;
+@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ slot->tx = mvi->tx_prod;
+ del_q = TXQ_MODE_I | tag |
+ (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
++ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
+ (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+ mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6b78476..3290a3e 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3100,6 +3100,7 @@ static void scsi_disk_release(struct device *dev)
+ ida_remove(&sd_index_ida, sdkp->index);
+ spin_unlock(&sd_index_lock);
+
++ blk_integrity_unregister(disk);
+ disk->private_data = NULL;
+ put_disk(disk);
+ put_device(&sdkp->device->sdev_gendev);
+diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
+index 14c7d42..5c06d29 100644
+--- a/drivers/scsi/sd_dif.c
++++ b/drivers/scsi/sd_dif.c
+@@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
+
+ disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+
+- if (!sdkp)
++ if (!sdkp->ATO)
+ return;
+
+ if (type == SD_DIF_TYPE3_PROTECTION)
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index efc6e44..bf8c5c1 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -746,21 +746,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ if (bounce_sgl[j].length == PAGE_SIZE) {
+ /* full..move to next entry */
+ sg_kunmap_atomic(bounce_addr);
++ bounce_addr = 0;
+ j++;
++ }
+
+- /* if we need to use another bounce buffer */
+- if (srclen || i != orig_sgl_count - 1)
+- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
++ /* if we need to use another bounce buffer */
++ if (srclen && bounce_addr == 0)
++ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
+
+- } else if (srclen == 0 && i == orig_sgl_count - 1) {
+- /* unmap the last bounce that is < PAGE_SIZE */
+- sg_kunmap_atomic(bounce_addr);
+- }
+ }
+
+ sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ }
+
++ if (bounce_addr)
++ sg_kunmap_atomic(bounce_addr);
++
+ local_irq_restore(flags);
+
+ return total_copied;
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 6fea4af..aea3a67 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
+ if (spi_imx->dma_is_inited) {
+ dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+
+- spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+- spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
+ tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
+@@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
+ master->max_dma_len = MAX_SDMA_BD_BYTES;
+ spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
+ SPI_MASTER_MUST_TX;
++ spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
++ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->dma_is_inited = 1;
+
+ return 0;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 4eb7a98..7bf5186 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -245,7 +245,10 @@ static int spidev_message(struct spidev_data *spidev,
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+- if (total > bufsiz) {
++ /* Check total length of transfers. Also check each
++ * transfer length to avoid arithmetic overflow.
++ */
++ if (total > bufsiz || k_tmp->len > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
+index 7bdb62b..f83e00c 100644
+--- a/drivers/staging/android/sync.c
++++ b/drivers/staging/android/sync.c
+@@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj)
+ list_for_each_entry_safe(pt, next, &obj->active_list_head,
+ active_list) {
+ if (fence_is_signaled_locked(&pt->base))
+- list_del(&pt->active_list);
++ list_del_init(&pt->active_list);
+ }
+
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
+index 6ed35b6..04fc217 100644
+--- a/drivers/staging/panel/panel.c
++++ b/drivers/staging/panel/panel.c
+@@ -335,11 +335,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
+ * LCD types
+ */
+ #define LCD_TYPE_NONE 0
+-#define LCD_TYPE_OLD 1
+-#define LCD_TYPE_KS0074 2
+-#define LCD_TYPE_HANTRONIX 3
+-#define LCD_TYPE_NEXCOM 4
+-#define LCD_TYPE_CUSTOM 5
++#define LCD_TYPE_CUSTOM 1
++#define LCD_TYPE_OLD 2
++#define LCD_TYPE_KS0074 3
++#define LCD_TYPE_HANTRONIX 4
++#define LCD_TYPE_NEXCOM 5
+
+ /*
+ * keypad types
+@@ -502,7 +502,7 @@ MODULE_PARM_DESC(keypad_type,
+ static int lcd_type = NOT_SET;
+ module_param(lcd_type, int, 0000);
+ MODULE_PARM_DESC(lcd_type,
+- "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
++ "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
+
+ static int lcd_height = NOT_SET;
+ module_param(lcd_height, int, 0000);
+diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
+index 07ce3fd..fdf5c56 100644
+--- a/drivers/staging/vt6655/rxtx.c
++++ b/drivers/staging/vt6655/rxtx.c
+@@ -1308,10 +1308,18 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
+ priv->hw->conf.chandef.chan->hw_value);
+ }
+
+- if (current_rate > RATE_11M)
+- pkt_type = (u8)priv->byPacketType;
+- else
++ if (current_rate > RATE_11M) {
++ if (info->band == IEEE80211_BAND_5GHZ) {
++ pkt_type = PK_TYPE_11A;
++ } else {
++ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
++ pkt_type = PK_TYPE_11GB;
++ else
++ pkt_type = PK_TYPE_11GA;
++ }
++ } else {
+ pkt_type = PK_TYPE_11B;
++ }
+
+ /*Set fifo controls */
+ if (pkt_type == PK_TYPE_11A)
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 77d6425..5e35612 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -537,7 +537,7 @@ static struct iscsit_transport iscsi_target_transport = {
+
+ static int __init iscsi_target_init_module(void)
+ {
+- int ret = 0;
++ int ret = 0, size;
+
+ pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+
+@@ -546,6 +546,7 @@ static int __init iscsi_target_init_module(void)
+ pr_err("Unable to allocate memory for iscsit_global\n");
+ return -1;
+ }
++ spin_lock_init(&iscsit_global->ts_bitmap_lock);
+ mutex_init(&auth_id_lock);
+ spin_lock_init(&sess_idr_lock);
+ idr_init(&tiqn_idr);
+@@ -555,15 +556,11 @@ static int __init iscsi_target_init_module(void)
+ if (ret < 0)
+ goto out;
+
+- ret = iscsi_thread_set_init();
+- if (ret < 0)
++ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
++ iscsit_global->ts_bitmap = vzalloc(size);
++ if (!iscsit_global->ts_bitmap) {
++ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
+ goto configfs_out;
+-
+- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
+- TARGET_THREAD_SET_COUNT) {
+- pr_err("iscsi_allocate_thread_sets() returned"
+- " unexpected value!\n");
+- goto ts_out1;
+ }
+
+ lio_qr_cache = kmem_cache_create("lio_qr_cache",
+@@ -572,7 +569,7 @@ static int __init iscsi_target_init_module(void)
+ if (!lio_qr_cache) {
+ pr_err("nable to kmem_cache_create() for"
+ " lio_qr_cache\n");
+- goto ts_out2;
++ goto bitmap_out;
+ }
+
+ lio_dr_cache = kmem_cache_create("lio_dr_cache",
+@@ -617,10 +614,8 @@ dr_out:
+ kmem_cache_destroy(lio_dr_cache);
+ qr_out:
+ kmem_cache_destroy(lio_qr_cache);
+-ts_out2:
+- iscsi_deallocate_thread_sets();
+-ts_out1:
+- iscsi_thread_set_free();
++bitmap_out:
++ vfree(iscsit_global->ts_bitmap);
+ configfs_out:
+ iscsi_target_deregister_configfs();
+ out:
+@@ -630,8 +625,6 @@ out:
+
+ static void __exit iscsi_target_cleanup_module(void)
+ {
+- iscsi_deallocate_thread_sets();
+- iscsi_thread_set_free();
+ iscsit_release_discovery_tpg();
+ iscsit_unregister_transport(&iscsi_target_transport);
+ kmem_cache_destroy(lio_qr_cache);
+@@ -641,6 +634,7 @@ static void __exit iscsi_target_cleanup_module(void)
+
+ iscsi_target_deregister_configfs();
+
++ vfree(iscsit_global->ts_bitmap);
+ kfree(iscsit_global);
+ }
+
+@@ -3715,17 +3709,16 @@ static int iscsit_send_reject(
+
+ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+ {
+- struct iscsi_thread_set *ts = conn->thread_set;
+ int ord, cpu;
+ /*
+- * thread_id is assigned from iscsit_global->ts_bitmap from
+- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
++ * bitmap_id is assigned from iscsit_global->ts_bitmap from
++ * within iscsit_start_kthreads()
+ *
+- * Here we use thread_id to determine which CPU that this
+- * iSCSI connection's iscsi_thread_set will be scheduled to
++ * Here we use bitmap_id to determine which CPU that this
++ * iSCSI connection's RX/TX threads will be scheduled to
+ * execute upon.
+ */
+- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
++ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
+ for_each_online_cpu(cpu) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+@@ -3914,7 +3907,7 @@ check_rsp_state:
+ switch (state) {
+ case ISTATE_SEND_LOGOUTRSP:
+ if (!iscsit_logout_post_handler(cmd, conn))
+- goto restart;
++ return -ECONNRESET;
+ /* fall through */
+ case ISTATE_SEND_STATUS:
+ case ISTATE_SEND_ASYNCMSG:
+@@ -3942,8 +3935,6 @@ check_rsp_state:
+
+ err:
+ return -1;
+-restart:
+- return -EAGAIN;
+ }
+
+ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+@@ -3970,21 +3961,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+ int iscsi_target_tx_thread(void *arg)
+ {
+ int ret = 0;
+- struct iscsi_conn *conn;
+- struct iscsi_thread_set *ts = arg;
++ struct iscsi_conn *conn = arg;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+
+-restart:
+- conn = iscsi_tx_thread_pre_handler(ts);
+- if (!conn)
+- goto out;
+-
+- ret = 0;
+-
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+@@ -3993,11 +3976,9 @@ restart:
+ iscsit_thread_check_cpumask(conn, current, 1);
+
+ wait_event_interruptible(conn->queues_wq,
+- !iscsit_conn_all_queues_empty(conn) ||
+- ts->status == ISCSI_THREAD_SET_RESET);
++ !iscsit_conn_all_queues_empty(conn));
+
+- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+- signal_pending(current))
++ if (signal_pending(current))
+ goto transport_err;
+
+ get_immediate:
+@@ -4008,15 +3989,14 @@ get_immediate:
+ ret = iscsit_handle_response_queue(conn);
+ if (ret == 1)
+ goto get_immediate;
+- else if (ret == -EAGAIN)
+- goto restart;
++ else if (ret == -ECONNRESET)
++ goto out;
+ else if (ret < 0)
+ goto transport_err;
+ }
+
+ transport_err:
+ iscsit_take_action_for_connection_exit(conn);
+- goto restart;
+ out:
+ return 0;
+ }
+@@ -4111,8 +4091,7 @@ int iscsi_target_rx_thread(void *arg)
+ int ret;
+ u8 buffer[ISCSI_HDR_LEN], opcode;
+ u32 checksum = 0, digest = 0;
+- struct iscsi_conn *conn = NULL;
+- struct iscsi_thread_set *ts = arg;
++ struct iscsi_conn *conn = arg;
+ struct kvec iov;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+@@ -4120,11 +4099,6 @@ int iscsi_target_rx_thread(void *arg)
+ */
+ allow_signal(SIGINT);
+
+-restart:
+- conn = iscsi_rx_thread_pre_handler(ts);
+- if (!conn)
+- goto out;
+-
+ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+ struct completion comp;
+ int rc;
+@@ -4134,7 +4108,7 @@ restart:
+ if (rc < 0)
+ goto transport_err;
+
+- goto out;
++ goto transport_err;
+ }
+
+ while (!kthread_should_stop()) {
+@@ -4210,8 +4184,6 @@ transport_err:
+ if (!signal_pending(current))
+ atomic_set(&conn->transport_failed, 1);
+ iscsit_take_action_for_connection_exit(conn);
+- goto restart;
+-out:
+ return 0;
+ }
+
+@@ -4273,7 +4245,24 @@ int iscsit_close_connection(
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
+ complete(&conn->conn_logout_comp);
+
+- iscsi_release_thread_set(conn);
++ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
++ if (conn->tx_thread &&
++ cmpxchg(&conn->tx_thread_active, true, false)) {
++ send_sig(SIGINT, conn->tx_thread, 1);
++ kthread_stop(conn->tx_thread);
++ }
++ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
++ if (conn->rx_thread &&
++ cmpxchg(&conn->rx_thread_active, true, false)) {
++ send_sig(SIGINT, conn->rx_thread, 1);
++ kthread_stop(conn->rx_thread);
++ }
++ }
++
++ spin_lock(&iscsit_global->ts_bitmap_lock);
++ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++ get_order(1));
++ spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+ iscsit_stop_timers_for_cmds(conn);
+ iscsit_stop_nopin_response_timer(conn);
+@@ -4551,15 +4540,13 @@ static void iscsit_logout_post_handler_closesession(
+ struct iscsi_conn *conn)
+ {
+ struct iscsi_session *sess = conn->sess;
+-
+- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
++ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+ iscsit_dec_conn_usage_count(conn);
+- iscsit_stop_session(sess, 1, 1);
++ iscsit_stop_session(sess, sleep, sleep);
+ iscsit_dec_session_usage_count(sess);
+ target_put_session(sess->se_sess);
+ }
+@@ -4567,13 +4554,12 @@ static void iscsit_logout_post_handler_closesession(
+ static void iscsit_logout_post_handler_samecid(
+ struct iscsi_conn *conn)
+ {
+- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
++ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+- iscsit_cause_connection_reinstatement(conn, 1);
++ iscsit_cause_connection_reinstatement(conn, sleep);
+ iscsit_dec_conn_usage_count(conn);
+ }
+
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
+index bdd8731..e008ed2 100644
+--- a/drivers/target/iscsi/iscsi_target_erl0.c
++++ b/drivers/target/iscsi/iscsi_target_erl0.c
+@@ -860,7 +860,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+ }
+ spin_unlock_bh(&conn->state_lock);
+
+- iscsi_thread_set_force_reinstatement(conn);
++ if (conn->tx_thread && conn->tx_thread_active)
++ send_sig(SIGINT, conn->tx_thread, 1);
++ if (conn->rx_thread && conn->rx_thread_active)
++ send_sig(SIGINT, conn->rx_thread, 1);
+
+ sleep:
+ wait_for_completion(&conn->conn_wait_rcfr_comp);
+@@ -885,10 +888,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+ return;
+ }
+
+- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
+- spin_unlock_bh(&conn->state_lock);
+- return;
+- }
++ if (conn->tx_thread && conn->tx_thread_active)
++ send_sig(SIGINT, conn->tx_thread, 1);
++ if (conn->rx_thread && conn->rx_thread_active)
++ send_sig(SIGINT, conn->rx_thread, 1);
+
+ atomic_set(&conn->connection_reinstatement, 1);
+ if (!sleep) {
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 153fb66..345f073 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -699,6 +699,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+ iscsit_start_nopin_timer(conn);
+ }
+
++int iscsit_start_kthreads(struct iscsi_conn *conn)
++{
++ int ret = 0;
++
++ spin_lock(&iscsit_global->ts_bitmap_lock);
++ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
++ ISCSIT_BITMAP_BITS, get_order(1));
++ spin_unlock(&iscsit_global->ts_bitmap_lock);
++
++ if (conn->bitmap_id < 0) {
++ pr_err("bitmap_find_free_region() failed for"
++ " iscsit_start_kthreads()\n");
++ return -ENOMEM;
++ }
++
++ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
++ "%s", ISCSI_TX_THREAD_NAME);
++ if (IS_ERR(conn->tx_thread)) {
++ pr_err("Unable to start iscsi_target_tx_thread\n");
++ ret = PTR_ERR(conn->tx_thread);
++ goto out_bitmap;
++ }
++ conn->tx_thread_active = true;
++
++ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
++ "%s", ISCSI_RX_THREAD_NAME);
++ if (IS_ERR(conn->rx_thread)) {
++ pr_err("Unable to start iscsi_target_rx_thread\n");
++ ret = PTR_ERR(conn->rx_thread);
++ goto out_tx;
++ }
++ conn->rx_thread_active = true;
++
++ return 0;
++out_tx:
++ kthread_stop(conn->tx_thread);
++ conn->tx_thread_active = false;
++out_bitmap:
++ spin_lock(&iscsit_global->ts_bitmap_lock);
++ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++ get_order(1));
++ spin_unlock(&iscsit_global->ts_bitmap_lock);
++ return ret;
++}
++
+ int iscsi_post_login_handler(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+@@ -709,7 +754,7 @@ int iscsi_post_login_handler(
+ struct se_session *se_sess = sess->se_sess;
+ struct iscsi_portal_group *tpg = sess->tpg;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+- struct iscsi_thread_set *ts;
++ int rc;
+
+ iscsit_inc_conn_usage_count(conn);
+
+@@ -724,7 +769,6 @@ int iscsi_post_login_handler(
+ /*
+ * SCSI Initiator -> SCSI Target Port Mapping
+ */
+- ts = iscsi_get_thread_set();
+ if (!zero_tsih) {
+ iscsi_set_session_parameters(sess->sess_ops,
+ conn->param_list, 0);
+@@ -751,9 +795,11 @@ int iscsi_post_login_handler(
+ sess->sess_ops->InitiatorName);
+ spin_unlock_bh(&sess->conn_lock);
+
+- iscsi_post_login_start_timers(conn);
++ rc = iscsit_start_kthreads(conn);
++ if (rc)
++ return rc;
+
+- iscsi_activate_thread_set(conn, ts);
++ iscsi_post_login_start_timers(conn);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+@@ -810,8 +856,11 @@ int iscsi_post_login_handler(
+ " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ spin_unlock_bh(&se_tpg->session_lock);
+
++ rc = iscsit_start_kthreads(conn);
++ if (rc)
++ return rc;
++
+ iscsi_post_login_start_timers(conn);
+- iscsi_activate_thread_set(conn, ts);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 44620fb..cbb0cc2 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *dev = FD_DEV(se_dev);
+ struct file *prot_fd = dev->fd_prot_file;
+- struct scatterlist *sg;
+ loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+ unsigned char *buf;
+- u32 prot_size, len, size;
+- int rc, ret = 1, i;
++ u32 prot_size;
++ int rc, ret = 1;
+
+ prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+ se_dev->prot_length;
+
+ if (!is_write) {
+- fd_prot->prot_buf = vzalloc(prot_size);
++ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
+ if (!fd_prot->prot_buf) {
+ pr_err("Unable to allocate fd_prot->prot_buf\n");
+ return -ENOMEM;
+ }
+ buf = fd_prot->prot_buf;
+
+- fd_prot->prot_sg_nents = cmd->t_prot_nents;
+- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+- fd_prot->prot_sg_nents, GFP_KERNEL);
++ fd_prot->prot_sg_nents = 1;
++ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
++ GFP_KERNEL);
+ if (!fd_prot->prot_sg) {
+ pr_err("Unable to allocate fd_prot->prot_sg\n");
+- vfree(fd_prot->prot_buf);
++ kfree(fd_prot->prot_buf);
+ return -ENOMEM;
+ }
+- size = prot_size;
+-
+- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+-
+- len = min_t(u32, PAGE_SIZE, size);
+- sg_set_buf(sg, buf, len);
+- size -= len;
+- buf += len;
+- }
++ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
++ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
+ }
+
+ if (is_write) {
+@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+
+ if (is_write || ret < 0) {
+ kfree(fd_prot->prot_sg);
+- vfree(fd_prot->prot_buf);
++ kfree(fd_prot->prot_buf);
+ }
+
+ return ret;
+@@ -549,6 +541,56 @@ fd_execute_write_same(struct se_cmd *cmd)
+ return 0;
+ }
+
++static int
++fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
++ void *buf, size_t bufsize)
++{
++ struct fd_dev *fd_dev = FD_DEV(se_dev);
++ struct file *prot_fd = fd_dev->fd_prot_file;
++ sector_t prot_length, prot;
++ loff_t pos = lba * se_dev->prot_length;
++
++ if (!prot_fd) {
++ pr_err("Unable to locate fd_dev->fd_prot_file\n");
++ return -ENODEV;
++ }
++
++ prot_length = nolb * se_dev->prot_length;
++
++ for (prot = 0; prot < prot_length;) {
++ sector_t len = min_t(sector_t, bufsize, prot_length - prot);
++ ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
++
++ if (ret != len) {
++ pr_err("vfs_write to prot file failed: %zd\n", ret);
++ return ret < 0 ? ret : -ENODEV;
++ }
++ prot += ret;
++ }
++
++ return 0;
++}
++
++static int
++fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
++{
++ void *buf;
++ int rc;
++
++ buf = (void *)__get_free_page(GFP_KERNEL);
++ if (!buf) {
++ pr_err("Unable to allocate FILEIO prot buf\n");
++ return -ENOMEM;
++ }
++ memset(buf, 0xff, PAGE_SIZE);
++
++ rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
++
++ free_page((unsigned long)buf);
++
++ return rc;
++}
++
+ static sense_reason_t
+ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+ {
+@@ -556,6 +598,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+ struct inode *inode = file->f_mapping->host;
+ int ret;
+
++ if (cmd->se_dev->dev_attrib.pi_prot_type) {
++ ret = fd_do_prot_unmap(cmd, lba, nolb);
++ if (ret)
++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++ }
++
+ if (S_ISBLK(inode->i_mode)) {
+ /* The backend is block device, use discard */
+ struct block_device *bdev = inode->i_bdev;
+@@ -658,11 +706,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return rc;
+ }
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ }
+ } else {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+@@ -678,7 +726,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return rc;
+ }
+ }
+@@ -714,7 +762,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+
+ if (ret < 0) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+@@ -878,48 +926,28 @@ static int fd_init_prot(struct se_device *dev)
+
+ static int fd_format_prot(struct se_device *dev)
+ {
+- struct fd_dev *fd_dev = FD_DEV(dev);
+- struct file *prot_fd = fd_dev->fd_prot_file;
+- sector_t prot_length, prot;
+ unsigned char *buf;
+- loff_t pos = 0;
+ int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+- int rc, ret = 0, size, len;
++ int ret;
+
+ if (!dev->dev_attrib.pi_prot_type) {
+ pr_err("Unable to format_prot while pi_prot_type == 0\n");
+ return -ENODEV;
+ }
+- if (!prot_fd) {
+- pr_err("Unable to locate fd_dev->fd_prot_file\n");
+- return -ENODEV;
+- }
+
+ buf = vzalloc(unit_size);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+- prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
+- size = prot_length;
+
+ pr_debug("Using FILEIO prot_length: %llu\n",
+- (unsigned long long)prot_length);
++ (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
++ dev->prot_length);
+
+ memset(buf, 0xff, unit_size);
+- for (prot = 0; prot < prot_length; prot += unit_size) {
+- len = min(unit_size, size);
+- rc = kernel_write(prot_fd, buf, len, pos);
+- if (rc != len) {
+- pr_err("vfs_write to prot file failed: %d\n", rc);
+- ret = -ENODEV;
+- goto out;
+- }
+- pos += len;
+- size -= len;
+- }
+-
+-out:
++ ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
++ buf, unit_size);
+ vfree(buf);
+ return ret;
+ }
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 3e72974..755bd9b3 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -312,7 +312,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ return 0;
+ }
+
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
+ {
+ unsigned char *buf, *addr;
+ struct scatterlist *sg;
+@@ -376,7 +376,7 @@ sbc_execute_rw(struct se_cmd *cmd)
+ cmd->data_direction);
+ }
+
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ {
+ struct se_device *dev = cmd->se_dev;
+
+@@ -399,7 +399,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+ return TCM_NO_SENSE;
+ }
+
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
+ {
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *write_sg = NULL, *sg;
+@@ -414,11 +414,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+
+ /*
+ * Handle early failure in transport_generic_request_failure(),
+- * which will not have taken ->caw_mutex yet..
++ * which will not have taken ->caw_sem yet..
+ */
+- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
++ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
+ return TCM_NO_SENSE;
+ /*
++ * Handle special case for zero-length COMPARE_AND_WRITE
++ */
++ if (!cmd->data_length)
++ goto out;
++ /*
+ * Immediately exit + release dev->caw_sem if command has already
+ * been failed with a non-zero SCSI status.
+ */
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index ac3cbab..f786de0 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ transport_complete_task_attr(cmd);
+ /*
+ * Handle special case for COMPARE_AND_WRITE failure, where the
+- * callback is expected to drop the per device ->caw_mutex.
++ * callback is expected to drop the per device ->caw_sem.
+ */
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->transport_complete_callback)
+- cmd->transport_complete_callback(cmd);
++ cmd->transport_complete_callback(cmd, false);
+
+ switch (sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+@@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work)
+ if (cmd->transport_complete_callback) {
+ sense_reason_t rc;
+
+- rc = cmd->transport_complete_callback(cmd);
++ rc = cmd->transport_complete_callback(cmd, true);
+ if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
++ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++ !cmd->data_length)
++ goto queue_rsp;
++
+ return;
+ } else if (rc) {
+ ret = transport_send_check_condition_and_sense(cmd,
+@@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ }
+ }
+
++queue_rsp:
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+@@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+ static inline void transport_free_pages(struct se_cmd *cmd)
+ {
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
++ /*
++ * Release special case READ buffer payload required for
++ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
++ */
++ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
++ transport_free_sgl(cmd->t_bidi_data_sg,
++ cmd->t_bidi_data_nents);
++ cmd->t_bidi_data_sg = NULL;
++ cmd->t_bidi_data_nents = 0;
++ }
+ transport_reset_sgl_orig(cmd);
+ return;
+ }
+@@ -2246,6 +2261,7 @@ sense_reason_t
+ transport_generic_new_cmd(struct se_cmd *cmd)
+ {
+ int ret = 0;
++ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ /*
+ * Determine is the TCM fabric module has already allocated physical
+@@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ */
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ cmd->data_length) {
+- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+@@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ cmd->data_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++ cmd->data_length) {
++ /*
++ * Special case for COMPARE_AND_WRITE with fabrics
++ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
++ */
++ u32 caw_length = cmd->t_task_nolb *
++ cmd->se_dev->dev_attrib.block_size;
++
++ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
++ &cmd->t_bidi_data_nents,
++ caw_length, zero_flag);
++ if (ret < 0)
++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * If this command is not a write we can execute it right here,
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index deae122..d465ace 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -3444,7 +3444,8 @@ void serial8250_suspend_port(int line)
+ port->type != PORT_8250) {
+ unsigned char canary = 0xa5;
+ serial_out(up, UART_SCR, canary);
+- up->canary = canary;
++ if (serial_in(up, UART_SCR) == canary)
++ up->canary = canary;
+ }
+
+ uart_suspend_port(&serial8250_reg, port);
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 6ae5b85..7a80250 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -629,6 +629,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
+ { "80860F0A", 0 },
+ { "8086228A", 0 },
+ { "APMC0D08", 0},
++ { "AMD0020", 0 },
+ { },
+ };
+ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 0eb29b1..2306191 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -818,7 +818,7 @@ static irqreturn_t imx_int(int irq, void *dev_id)
+ if (sts2 & USR2_ORE) {
+ dev_err(sport->port.dev, "Rx FIFO overrun\n");
+ sport->port.icount.overrun++;
+- writel(sts2 | USR2_ORE, sport->port.membase + USR2);
++ writel(USR2_ORE, sport->port.membase + USR2);
+ }
+
+ return IRQ_HANDLED;
+@@ -1181,10 +1181,12 @@ static int imx_startup(struct uart_port *port)
+ imx_uart_dma_init(sport);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
++
+ /*
+ * Finally, clear and enable interrupts
+ */
+ writel(USR1_RTSD, sport->port.membase + USR1);
++ writel(USR2_ORE, sport->port.membase + USR2);
+
+ if (sport->dma_is_inited && !sport->dma_is_enabled)
+ imx_enable_dma(sport);
+@@ -1199,10 +1201,6 @@ static int imx_startup(struct uart_port *port)
+
+ writel(temp, sport->port.membase + UCR1);
+
+- /* Clear any pending ORE flag before enabling interrupt */
+- temp = readl(sport->port.membase + USR2);
+- writel(temp | USR2_ORE, sport->port.membase + USR2);
+-
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_OREN;
+ writel(temp, sport->port.membase + UCR4);
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index a051a7a..a81f9dd 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
+ case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ dev_dbg(&desc->intf->dev,
+ "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
+- dr->wIndex, dr->wLength);
++ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
+ break;
+
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
+ clear_bit(WDM_POLL_RUNNING, &desc->flags);
+ dev_err(&desc->intf->dev,
+ "unknown notification %d received: index %d len %d\n",
+- dr->bNotificationType, dr->wIndex, dr->wLength);
++ dr->bNotificationType,
++ le16_to_cpu(dr->wIndex),
++ le16_to_cpu(dr->wLength));
+ goto exit;
+ }
+
+@@ -408,7 +410,7 @@ static ssize_t wdm_write
+ USB_RECIP_INTERFACE);
+ req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
+ req->wValue = 0;
+- req->wIndex = desc->inum;
++ req->wIndex = desc->inum; /* already converted */
+ req->wLength = cpu_to_le16(count);
+ set_bit(WDM_IN_USE, &desc->flags);
+ desc->outbuf = buf;
+@@ -422,7 +424,7 @@ static ssize_t wdm_write
+ rv = usb_translate_errors(rv);
+ } else {
+ dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
+- req->wIndex);
++ le16_to_cpu(req->wIndex));
+ }
+ out:
+ usb_autopm_put_interface(desc->intf);
+@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
+ desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+ desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
+ desc->irq->wValue = 0;
+- desc->irq->wIndex = desc->inum;
++ desc->irq->wIndex = desc->inum; /* already converted */
+ desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
+
+ usb_fill_control_urb(
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d7c3d5a..3b71516 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3406,10 +3406,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ if (status) {
+ dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
+ } else {
+- /* drive resume for at least 20 msec */
++ /* drive resume for USB_RESUME_TIMEOUT msec */
+ dev_dbg(&udev->dev, "usb %sresume\n",
+ (PMSG_IS_AUTO(msg) ? "auto-" : ""));
+- msleep(25);
++ msleep(USB_RESUME_TIMEOUT);
+
+ /* Virtual root hubs can trigger on GET_PORT_STATUS to
+ * stop resume signaling. Then finish the resume
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index c78c874..758b7e0 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -1521,7 +1521,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ writel(0, hsotg->regs + PCGCTL);
+- usleep_range(20000, 40000);
++ msleep(USB_RESUME_TIMEOUT);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
+index 9054598..6385c19 100644
+--- a/drivers/usb/gadget/legacy/printer.c
++++ b/drivers/usb/gadget/legacy/printer.c
+@@ -1031,6 +1031,15 @@ unknown:
+ break;
+ }
+ /* host either stalls (value < 0) or reports success */
++ if (value >= 0) {
++ req->length = value;
++ req->zero = value < wLength;
++ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
++ if (value < 0) {
++ ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
++ req->status = 0;
++ }
++ }
+ return value;
+ }
+
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 85e56d1..f4d88df 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ ehci->reset_done[i] == 0))
+ continue;
+
+- /* start 20 msec resume signaling from this port,
+- * and make hub_wq collect PORT_STAT_C_SUSPEND to
+- * stop that signaling. Use 5 ms extra for safety,
+- * like usb_port_resume() does.
++ /* start USB_RESUME_TIMEOUT msec resume signaling from
++ * this port, and make hub_wq collect
++ * PORT_STAT_C_SUSPEND to stop that signaling.
+ */
+- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
++ ehci->reset_done[i] = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ set_bit(i, &ehci->resuming_ports);
+ ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+ usb_hcd_start_port_resume(&hcd->self, i);
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 87cf86f..7354d01 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -471,10 +471,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
+ ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
+ }
+
+- /* msleep for 20ms only if code is trying to resume port */
++ /*
++ * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
++ * port
++ */
+ if (resume_needed) {
+ spin_unlock_irq(&ehci->lock);
+- msleep(20);
++ msleep(USB_RESUME_TIMEOUT);
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
+@@ -942,7 +945,7 @@ int ehci_hub_control(
+ temp &= ~PORT_WAKE_BITS;
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ ehci->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ set_bit(wIndex, &ehci->resuming_ports);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
+ break;
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 475b21f..7a6681f 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
+ /* resume signaling for 20 msec */
+ fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ fotg210->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fotg210->port_c_suspend);
+diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
+index a83eefe..ba77e2e 100644
+--- a/drivers/usb/host/fusbh200-hcd.c
++++ b/drivers/usb/host/fusbh200-hcd.c
+@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+- /* resume signaling for 20 msec */
+ fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+ fusbh200->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fusbh200->port_c_suspend);
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index 113d0cc..9ef5644 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -1490,7 +1490,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
+ spin_unlock_irq(&isp116x->lock);
+
+ hcd->state = HC_STATE_RESUMING;
+- msleep(20);
++ msleep(USB_RESUME_TIMEOUT);
+
+ /* Go operational */
+ spin_lock_irq(&isp116x->lock);
+diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
+index ef7efb2..28a2866 100644
+--- a/drivers/usb/host/oxu210hp-hcd.c
++++ b/drivers/usb/host/oxu210hp-hcd.c
+@@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+ || oxu->reset_done[i] != 0)
+ continue;
+
+- /* start 20 msec resume signaling from this port,
+- * and make hub_wq collect PORT_STAT_C_SUSPEND to
++ /* start USB_RESUME_TIMEOUT resume signaling from this
++ * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
+ * stop that signaling.
+ */
+- oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
++ oxu->reset_done[i] = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
+ mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
+ }
+diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
+index bdc82fe..54a4170 100644
+--- a/drivers/usb/host/r8a66597-hcd.c
++++ b/drivers/usb/host/r8a66597-hcd.c
+@@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
+ rh->port &= ~USB_PORT_STAT_SUSPEND;
+ rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
+ r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
+- msleep(50);
++ msleep(USB_RESUME_TIMEOUT);
+ r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
+ }
+
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index 4f4ba1e..9118cd8 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1259,7 +1259,7 @@ sl811h_hub_control(
+ sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+
+ mod_timer(&sl811->timer, jiffies
+- + msecs_to_jiffies(20));
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ break;
+ case USB_PORT_FEAT_POWER:
+ port_power(sl811, 0);
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 19ba5ea..7b3d1af 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -166,7 +166,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
+ /* Port received a wakeup request */
+ set_bit(port, &uhci->resuming_ports);
+ uhci->ports_timeout = jiffies +
+- msecs_to_jiffies(25);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ usb_hcd_start_port_resume(
+ &uhci_to_hcd(uhci)->self, port);
+
+@@ -338,7 +338,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ uhci_finish_suspend(uhci, port, port_addr);
+
+ /* USB v2.0 7.1.7.5 */
+- uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
++ uhci->ports_timeout = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_POWER:
+ /* UHCI has no power switching */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 73485fa..eeedde8 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1574,7 +1574,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ } else {
+ xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ bus_state->resume_done[faked_port_index] = jiffies +
+- msecs_to_jiffies(20);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ set_bit(faked_port_index, &bus_state->resuming_ports);
+ mod_timer(&hcd->rh_timer,
+ bus_state->resume_done[faked_port_index]);
+diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
+index 3cb98b1..7911b6b 100644
+--- a/drivers/usb/isp1760/isp1760-hcd.c
++++ b/drivers/usb/isp1760/isp1760-hcd.c
+@@ -1869,7 +1869,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp | PORT_RESUME);
+ priv->reset_done = jiffies +
+- msecs_to_jiffies(20);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ }
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 067920f..ec0ee3b 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -99,6 +99,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+ #include <linux/dma-mapping.h>
++#include <linux/usb.h>
+
+ #include "musb_core.h"
+
+@@ -562,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ (USB_PORT_STAT_C_SUSPEND << 16)
+ | MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ musb->need_finish_resume = 1;
+
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
+@@ -1597,16 +1598,30 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ is_host_active(musb) ? "host" : "peripheral",
+ musb->int_usb, musb->int_tx, musb->int_rx);
+
+- /* the core can interrupt us for multiple reasons; docs have
+- * a generic interrupt flowchart to follow
++ /**
++ * According to Mentor Graphics' documentation, flowchart on page 98,
++ * IRQ should be handled as follows:
++ *
++ * . Resume IRQ
++ * . Session Request IRQ
++ * . VBUS Error IRQ
++ * . Suspend IRQ
++ * . Connect IRQ
++ * . Disconnect IRQ
++ * . Reset/Babble IRQ
++ * . SOF IRQ (we're not using this one)
++ * . Endpoint 0 IRQ
++ * . TX Endpoints
++ * . RX Endpoints
++ *
++ * We will be following that flowchart in order to avoid any problems
++ * that might arise with internal Finite State Machine.
+ */
++
+ if (musb->int_usb)
+ retval |= musb_stage0_irq(musb, musb->int_usb,
+ devctl);
+
+- /* "stage 1" is handling endpoint irqs */
+-
+- /* handle endpoint 0 first */
+ if (musb->int_tx & 1) {
+ if (is_host_active(musb))
+ retval |= musb_h_ep0_irq(musb);
+@@ -1614,37 +1629,31 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ retval |= musb_g_ep0_irq(musb);
+ }
+
+- /* RX on endpoints 1-15 */
+- reg = musb->int_rx >> 1;
++ reg = musb->int_tx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+- /* musb_ep_select(musb->mregs, ep_num); */
+- /* REVISIT just retval = ep->rx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (is_host_active(musb))
+- musb_host_rx(musb, ep_num);
++ musb_host_tx(musb, ep_num);
+ else
+- musb_g_rx(musb, ep_num);
++ musb_g_tx(musb, ep_num);
+ }
+-
+ reg >>= 1;
+ ep_num++;
+ }
+
+- /* TX on endpoints 1-15 */
+- reg = musb->int_tx >> 1;
++ reg = musb->int_rx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+- /* musb_ep_select(musb->mregs, ep_num); */
+- /* REVISIT just retval |= ep->tx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (is_host_active(musb))
+- musb_host_tx(musb, ep_num);
++ musb_host_rx(musb, ep_num);
+ else
+- musb_g_tx(musb, ep_num);
++ musb_g_rx(musb, ep_num);
+ }
++
+ reg >>= 1;
+ ep_num++;
+ }
+@@ -2463,7 +2472,7 @@ static int musb_resume(struct device *dev)
+ if (musb->need_finish_resume) {
+ musb->need_finish_resume = 0;
+ schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(20));
++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ }
+
+ /*
+@@ -2506,7 +2515,7 @@ static int musb_runtime_resume(struct device *dev)
+ if (musb->need_finish_resume) {
+ musb->need_finish_resume = 0;
+ schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(20));
++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ }
+
+ return 0;
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index 294e159..5428ed1 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -136,7 +136,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
+ /* later, GetPortStatus will stop RESUME signaling */
+ musb->port1_status |= MUSB_PORT_STAT_RESUME;
+ schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(20));
++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ }
+ }
+
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index 2f9735b..d1cd6b5 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -81,7 +81,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
+
+ static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
+ {
+- return res == match_data;
++ struct usb_phy **phy = res;
++
++ return *phy == match_data;
+ }
+
+ /**
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 995986b..d925f55 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ int elf_prot = 0, elf_flags;
+ unsigned long k, vaddr;
++ unsigned long total_size = 0;
+
+ if (elf_ppnt->p_type != PT_LOAD)
+ continue;
+@@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ #else
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++ total_size = total_mapping_size(elf_phdata,
++ loc->elf_ex.e_phnum);
++ if (!total_size) {
++ error = -EINVAL;
++ goto out_free_dentry;
++ }
+ }
+
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+- elf_prot, elf_flags, 0);
++ elf_prot, elf_flags, total_size);
+ if (BAD_ADDR(error)) {
+ retval = IS_ERR((void *)error) ?
+ PTR_ERR((void*)error) : -EINVAL;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8b353ad..0a795c9 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6956,12 +6956,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+ return -ENOSPC;
+ }
+
+- if (btrfs_test_opt(root, DISCARD))
+- ret = btrfs_discard_extent(root, start, len, NULL);
+-
+ if (pin)
+ pin_down_extent(root, cache, start, len, 1);
+ else {
++ if (btrfs_test_opt(root, DISCARD))
++ ret = btrfs_discard_extent(root, start, len, NULL);
+ btrfs_add_free_space(cache, start, len);
+ btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
+ }
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 74609b9..f23d4be 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2897,6 +2897,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
+ if (src == dst)
+ return -EINVAL;
+
++ if (len == 0)
++ return 0;
++
+ btrfs_double_lock(src, loff, dst, dst_loff, len);
+
+ ret = extent_same_check_offsets(src, loff, len);
+@@ -3626,6 +3629,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ if (off + len == src->i_size)
+ len = ALIGN(src->i_size, bs) - off;
+
++ if (len == 0) {
++ ret = 0;
++ goto out_unlock;
++ }
++
+ /* verify the end result is block aligned */
+ if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
+ !IS_ALIGNED(destoff, bs))
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 883b936..45ea704 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -364,22 +364,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
+ /*
+ * Check if the attribute is in a supported namespace.
+ *
+- * This applied after the check for the synthetic attributes in the system
++ * This is applied after the check for the synthetic attributes in the system
+ * namespace.
+ */
+-static bool btrfs_is_valid_xattr(const char *name)
++static int btrfs_is_valid_xattr(const char *name)
+ {
+- return !strncmp(name, XATTR_SECURITY_PREFIX,
+- XATTR_SECURITY_PREFIX_LEN) ||
+- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
+- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
+- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
++ int len = strlen(name);
++ int prefixlen = 0;
++
++ if (!strncmp(name, XATTR_SECURITY_PREFIX,
++ XATTR_SECURITY_PREFIX_LEN))
++ prefixlen = XATTR_SECURITY_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
++ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
++ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++ prefixlen = XATTR_USER_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
++ prefixlen = XATTR_BTRFS_PREFIX_LEN;
++ else
++ return -EOPNOTSUPP;
++
++ /*
++ * The name cannot consist of just prefix
++ */
++ if (len <= prefixlen)
++ return -EINVAL;
++
++ return 0;
+ }
+
+ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size)
+ {
++ int ret;
++
+ /*
+ * If this is a request for a synthetic attribute in the system.*
+ * namespace use the generic infrastructure to resolve a handler
+@@ -388,8 +408,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_getxattr(dentry, name, buffer, size);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+ return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
+ }
+
+@@ -397,6 +418,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+ {
+ struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++ int ret;
+
+ /*
+ * The permission on security.* and system.* is not checked
+@@ -413,8 +435,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_setxattr(dentry, name, value, size, flags);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+
+ if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ return btrfs_set_prop(dentry->d_inode, name,
+@@ -430,6 +453,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ {
+ struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++ int ret;
+
+ /*
+ * The permission on security.* and system.* is not checked
+@@ -446,8 +470,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_removexattr(dentry, name);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+
+ if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ return btrfs_set_prop(dentry->d_inode, name,
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 28fe71a..aae7011 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ struct inode *inode)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+- struct buffer_head *bh;
++ struct buffer_head *bh = NULL;
+ struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_tail *t;
+ struct super_block *sb;
+@@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ return retval;
+ if (retval == 1) {
+ retval = 0;
+- return retval;
++ goto out;
+ }
+ }
+
+ if (is_dx(dir)) {
+ retval = ext4_dx_add_entry(handle, dentry, inode);
+ if (!retval || (retval != ERR_BAD_DX_DIR))
+- return retval;
++ goto out;
+ ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+ dx_fallback++;
+ ext4_mark_inode_dirty(handle, dir);
+@@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ return PTR_ERR(bh);
+
+ retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+- if (retval != -ENOSPC) {
+- brelse(bh);
+- return retval;
+- }
++ if (retval != -ENOSPC)
++ goto out;
+
+ if (blocks == 1 && !dx_fallback &&
+- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
+- return make_indexed_dir(handle, dentry, inode, bh);
++ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
++ retval = make_indexed_dir(handle, dentry, inode, bh);
++ bh = NULL; /* make_indexed_dir releases bh */
++ goto out;
++ }
+ brelse(bh);
+ }
+ bh = ext4_append(handle, dir, &block);
+@@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ }
+
+ retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
++out:
+ brelse(bh);
+ if (retval == 0)
+ ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
+index 665ef5a..a563ddb 100644
+--- a/fs/lockd/svcsubs.c
++++ b/fs/lockd/svcsubs.c
+@@ -31,7 +31,7 @@
+ static struct hlist_head nlm_files[FILE_NRHASH];
+ static DEFINE_MUTEX(nlm_file_mutex);
+
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
+ {
+ u32 *fhp = (u32*)f->data;
+diff --git a/fs/namei.c b/fs/namei.c
+index c83145a..caa38a2 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1591,7 +1591,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
+
+ if (should_follow_link(path->dentry, follow)) {
+ if (nd->flags & LOOKUP_RCU) {
+- if (unlikely(unlazy_walk(nd, path->dentry))) {
++ if (unlikely(nd->path.mnt != path->mnt ||
++ unlazy_walk(nd, path->dentry))) {
+ err = -ECHILD;
+ goto out_err;
+ }
+@@ -3047,7 +3048,8 @@ finish_lookup:
+
+ if (should_follow_link(path->dentry, !symlink_ok)) {
+ if (nd->flags & LOOKUP_RCU) {
+- if (unlikely(unlazy_walk(nd, path->dentry))) {
++ if (unlikely(nd->path.mnt != path->mnt ||
++ unlazy_walk(nd, path->dentry))) {
+ error = -ECHILD;
+ goto out;
+ }
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 82ef140..4622ee3 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+ */
+ struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
+ {
+- struct mount *p, *res;
+- res = p = __lookup_mnt(mnt, dentry);
++ struct mount *p, *res = NULL;
++ p = __lookup_mnt(mnt, dentry);
+ if (!p)
+ goto out;
++ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
++ res = p;
+ hlist_for_each_entry_continue(p, mnt_hash) {
+ if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
+ break;
+- res = p;
++ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
++ res = p;
+ }
+ out:
+ return res;
+@@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
+ /*
+ * vfsmount lock must be held for write
+ */
+-static void detach_mnt(struct mount *mnt, struct path *old_path)
++static void unhash_mnt(struct mount *mnt)
+ {
+- old_path->dentry = mnt->mnt_mountpoint;
+- old_path->mnt = &mnt->mnt_parent->mnt;
+ mnt->mnt_parent = mnt;
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
+ list_del_init(&mnt->mnt_child);
+@@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
+ /*
+ * vfsmount lock must be held for write
+ */
++static void detach_mnt(struct mount *mnt, struct path *old_path)
++{
++ old_path->dentry = mnt->mnt_mountpoint;
++ old_path->mnt = &mnt->mnt_parent->mnt;
++ unhash_mnt(mnt);
++}
++
++/*
++ * vfsmount lock must be held for write
++ */
++static void umount_mnt(struct mount *mnt)
++{
++ /* old mountpoint will be dropped when we can do that */
++ mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
++ unhash_mnt(mnt);
++}
++
++/*
++ * vfsmount lock must be held for write
++ */
+ void mnt_set_mountpoint(struct mount *mnt,
+ struct mountpoint *mp,
+ struct mount *child_mnt)
+@@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt)
+ rcu_read_unlock();
+
+ list_del(&mnt->mnt_instance);
++
++ if (unlikely(!list_empty(&mnt->mnt_mounts))) {
++ struct mount *p, *tmp;
++ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
++ umount_mnt(p);
++ }
++ }
+ unlock_mount_hash();
+
+ if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
+@@ -1319,49 +1347,63 @@ static inline void namespace_lock(void)
+ down_write(&namespace_sem);
+ }
+
++enum umount_tree_flags {
++ UMOUNT_SYNC = 1,
++ UMOUNT_PROPAGATE = 2,
++ UMOUNT_CONNECTED = 4,
++};
+ /*
+ * mount_lock must be held
+ * namespace_sem must be held for write
+- * how = 0 => just this tree, don't propagate
+- * how = 1 => propagate; we know that nobody else has reference to any victims
+- * how = 2 => lazy umount
+ */
+-void umount_tree(struct mount *mnt, int how)
++static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
+ {
+- HLIST_HEAD(tmp_list);
++ LIST_HEAD(tmp_list);
+ struct mount *p;
+
++ if (how & UMOUNT_PROPAGATE)
++ propagate_mount_unlock(mnt);
++
++ /* Gather the mounts to umount */
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+- hlist_del_init_rcu(&p->mnt_hash);
+- hlist_add_head(&p->mnt_hash, &tmp_list);
++ p->mnt.mnt_flags |= MNT_UMOUNT;
++ list_move(&p->mnt_list, &tmp_list);
+ }
+
+- hlist_for_each_entry(p, &tmp_list, mnt_hash)
++ /* Hide the mounts from mnt_mounts */
++ list_for_each_entry(p, &tmp_list, mnt_list) {
+ list_del_init(&p->mnt_child);
++ }
+
+- if (how)
++ /* Add propogated mounts to the tmp_list */
++ if (how & UMOUNT_PROPAGATE)
+ propagate_umount(&tmp_list);
+
+- while (!hlist_empty(&tmp_list)) {
+- p = hlist_entry(tmp_list.first, struct mount, mnt_hash);
+- hlist_del_init_rcu(&p->mnt_hash);
++ while (!list_empty(&tmp_list)) {
++ bool disconnect;
++ p = list_first_entry(&tmp_list, struct mount, mnt_list);
+ list_del_init(&p->mnt_expire);
+ list_del_init(&p->mnt_list);
+ __touch_mnt_namespace(p->mnt_ns);
+ p->mnt_ns = NULL;
+- if (how < 2)
++ if (how & UMOUNT_SYNC)
+ p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
+
+- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
++ disconnect = !(((how & UMOUNT_CONNECTED) &&
++ mnt_has_parent(p) &&
++ (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
++ IS_MNT_LOCKED_AND_LAZY(p));
++
++ pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
++ disconnect ? &unmounted : NULL);
+ if (mnt_has_parent(p)) {
+- hlist_del_init(&p->mnt_mp_list);
+- put_mountpoint(p->mnt_mp);
+ mnt_add_count(p->mnt_parent, -1);
+- /* old mountpoint will be dropped when we can do that */
+- p->mnt_ex_mountpoint = p->mnt_mountpoint;
+- p->mnt_mountpoint = p->mnt.mnt_root;
+- p->mnt_parent = p;
+- p->mnt_mp = NULL;
++ if (!disconnect) {
++ /* Don't forget about p */
++ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
++ } else {
++ umount_mnt(p);
++ }
+ }
+ change_mnt_propagation(p, MS_PRIVATE);
+ }
+@@ -1447,14 +1489,14 @@ static int do_umount(struct mount *mnt, int flags)
+
+ if (flags & MNT_DETACH) {
+ if (!list_empty(&mnt->mnt_list))
+- umount_tree(mnt, 2);
++ umount_tree(mnt, UMOUNT_PROPAGATE);
+ retval = 0;
+ } else {
+ shrink_submounts(mnt);
+ retval = -EBUSY;
+ if (!propagate_mount_busy(mnt, 2)) {
+ if (!list_empty(&mnt->mnt_list))
+- umount_tree(mnt, 1);
++ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ retval = 0;
+ }
+ }
+@@ -1480,13 +1522,20 @@ void __detach_mounts(struct dentry *dentry)
+
+ namespace_lock();
+ mp = lookup_mountpoint(dentry);
+- if (!mp)
++ if (IS_ERR_OR_NULL(mp))
+ goto out_unlock;
+
+ lock_mount_hash();
+ while (!hlist_empty(&mp->m_list)) {
+ mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+- umount_tree(mnt, 2);
++ if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
++ struct mount *p, *tmp;
++ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
++ hlist_add_head(&p->mnt_umount.s_list, &unmounted);
++ umount_mnt(p);
++ }
++ }
++ else umount_tree(mnt, UMOUNT_CONNECTED);
+ }
+ unlock_mount_hash();
+ put_mountpoint(mp);
+@@ -1648,7 +1697,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
+ out:
+ if (res) {
+ lock_mount_hash();
+- umount_tree(res, 0);
++ umount_tree(res, UMOUNT_SYNC);
+ unlock_mount_hash();
+ }
+ return q;
+@@ -1672,7 +1721,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
+ {
+ namespace_lock();
+ lock_mount_hash();
+- umount_tree(real_mount(mnt), 0);
++ umount_tree(real_mount(mnt), UMOUNT_SYNC);
+ unlock_mount_hash();
+ namespace_unlock();
+ }
+@@ -1855,7 +1904,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
+ out_cleanup_ids:
+ while (!hlist_empty(&tree_list)) {
+ child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+- umount_tree(child, 0);
++ umount_tree(child, UMOUNT_SYNC);
+ }
+ unlock_mount_hash();
+ cleanup_group_ids(source_mnt, NULL);
+@@ -2035,7 +2084,7 @@ static int do_loopback(struct path *path, const char *old_name,
+ err = graft_tree(mnt, parent, mp);
+ if (err) {
+ lock_mount_hash();
+- umount_tree(mnt, 0);
++ umount_tree(mnt, UMOUNT_SYNC);
+ unlock_mount_hash();
+ }
+ out2:
+@@ -2406,7 +2455,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
+ while (!list_empty(&graveyard)) {
+ mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
+ touch_mnt_namespace(mnt->mnt_ns);
+- umount_tree(mnt, 1);
++ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ }
+ unlock_mount_hash();
+ namespace_unlock();
+@@ -2477,7 +2526,7 @@ static void shrink_submounts(struct mount *mnt)
+ m = list_first_entry(&graveyard, struct mount,
+ mnt_expire);
+ touch_mnt_namespace(m->mnt_ns);
+- umount_tree(m, 1);
++ umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ }
+ }
+ }
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 351be920..8d129bb 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp)
+ if (try_to_freeze())
+ continue;
+
+- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
++ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+ if (!list_empty(&serv->sv_cb_list)) {
+ req = list_first_entry(&serv->sv_cb_list,
+@@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp)
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+- /* schedule_timeout to game the hung task watchdog */
+- schedule_timeout(60 * HZ);
++ schedule();
+ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
++ flush_signals(current);
+ }
+ return 0;
+ }
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index e907c8c..ab21ef1 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -129,22 +129,25 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+ int i;
+ ssize_t count;
+
+- WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
+-
+- count = dreq->mirrors[hdr->pgio_mirror_idx].count;
+- if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
+- count = hdr->io_start + hdr->good_bytes - dreq->io_start;
+- dreq->mirrors[hdr->pgio_mirror_idx].count = count;
+- }
+-
+- /* update the dreq->count by finding the minimum agreed count from all
+- * mirrors */
+- count = dreq->mirrors[0].count;
++ if (dreq->mirror_count == 1) {
++ dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
++ dreq->count += hdr->good_bytes;
++ } else {
++ /* mirrored writes */
++ count = dreq->mirrors[hdr->pgio_mirror_idx].count;
++ if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
++ count = hdr->io_start + hdr->good_bytes - dreq->io_start;
++ dreq->mirrors[hdr->pgio_mirror_idx].count = count;
++ }
++ /* update the dreq->count by finding the minimum agreed count from all
++ * mirrors */
++ count = dreq->mirrors[0].count;
+
+- for (i = 1; i < dreq->mirror_count; i++)
+- count = min(count, dreq->mirrors[i].count);
++ for (i = 1; i < dreq->mirror_count; i++)
++ count = min(count, dreq->mirrors[i].count);
+
+- dreq->count = count;
++ dreq->count = count;
++ }
+ }
+
+ /*
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 5c399ec..d494ea2 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -7365,6 +7365,11 @@ nfs4_stat_to_errno(int stat)
+ .p_name = #proc, \
+ }
+
++#define STUB(proc) \
++[NFSPROC4_CLNT_##proc] = { \
++ .p_name = #proc, \
++}
++
+ struct rpc_procinfo nfs4_procedures[] = {
+ PROC(READ, enc_read, dec_read),
+ PROC(WRITE, enc_write, dec_write),
+@@ -7417,6 +7422,7 @@ struct rpc_procinfo nfs4_procedures[] = {
+ PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
+ PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
+ PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
++ STUB(GETDEVICELIST),
+ PROC(BIND_CONN_TO_SESSION,
+ enc_bind_conn_to_session, dec_bind_conn_to_session),
+ PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 568ecf0..848d8b1 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -284,7 +284,7 @@ int nfs_readpage(struct file *file, struct page *page)
+ dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
+ page, PAGE_CACHE_SIZE, page_file_index(page));
+ nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+- nfs_inc_stats(inode, NFSIOS_READPAGES);
++ nfs_add_stats(inode, NFSIOS_READPAGES, 1);
+
+ /*
+ * Try to flush any pending writes to the file..
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 849ed78..41b3f1096 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -580,7 +580,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
+ int ret;
+
+ nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+- nfs_inc_stats(inode, NFSIOS_WRITEPAGES);
++ nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
+
+ nfs_pageio_cond_complete(pgio, page_file_index(page));
+ ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 92b9d97..5416968 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1030,6 +1030,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
+ return status;
+ }
++ if (!file)
++ return nfserr_bad_stateid;
+
+ status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
+ fallocate->falloc_offset,
+@@ -1069,6 +1071,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
+ return status;
+ }
++ if (!file)
++ return nfserr_bad_stateid;
+
+ switch (seek->seek_whence) {
+ case NFS4_CONTENT_DATA:
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 8ba1d88..ee1cccd 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1139,7 +1139,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
+ return sid->sequence % SESSION_HASH_SIZE;
+ }
+
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ static inline void
+ dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
+ {
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 5fb7e78..5b33ce1 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3422,6 +3422,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ unsigned long maxcount;
+ struct xdr_stream *xdr = &resp->xdr;
+ struct file *file = read->rd_filp;
++ struct svc_fh *fhp = read->rd_fhp;
+ int starting_len = xdr->buf->len;
+ struct raparms *ra;
+ __be32 *p;
+@@ -3445,12 +3446,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len));
+ maxcount = min_t(unsigned long, maxcount, read->rd_length);
+
+- if (!read->rd_filp) {
++ if (read->rd_filp)
++ err = nfsd_permission(resp->rqstp, fhp->fh_export,
++ fhp->fh_dentry,
++ NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
++ else
+ err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp,
+ &file, &ra);
+- if (err)
+- goto err_truncate;
+- }
++ if (err)
++ goto err_truncate;
+
+ if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
+ err = nfsd4_encode_splice_read(resp, read, file, maxcount);
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index aa47d75..9690cb4 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1250,15 +1250,15 @@ static int __init init_nfsd(void)
+ int retval;
+ printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
+
+- retval = register_cld_notifier();
+- if (retval)
+- return retval;
+ retval = register_pernet_subsys(&nfsd_net_ops);
+ if (retval < 0)
+- goto out_unregister_notifier;
+- retval = nfsd4_init_slabs();
++ return retval;
++ retval = register_cld_notifier();
+ if (retval)
+ goto out_unregister_pernet;
++ retval = nfsd4_init_slabs();
++ if (retval)
++ goto out_unregister_notifier;
+ retval = nfsd4_init_pnfs();
+ if (retval)
+ goto out_free_slabs;
+@@ -1290,10 +1290,10 @@ out_exit_pnfs:
+ nfsd4_exit_pnfs();
+ out_free_slabs:
+ nfsd4_free_slabs();
+-out_unregister_pernet:
+- unregister_pernet_subsys(&nfsd_net_ops);
+ out_unregister_notifier:
+ unregister_cld_notifier();
++out_unregister_pernet:
++ unregister_pernet_subsys(&nfsd_net_ops);
+ return retval;
+ }
+
+@@ -1308,8 +1308,8 @@ static void __exit exit_nfsd(void)
+ nfsd4_exit_pnfs();
+ nfsd_fault_inject_cleanup();
+ unregister_filesystem(&nfsd_fs_type);
+- unregister_pernet_subsys(&nfsd_net_ops);
+ unregister_cld_notifier();
++ unregister_pernet_subsys(&nfsd_net_ops);
+ }
+
+ MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 565c4da..cf98052 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -24,7 +24,7 @@
+ #include "export.h"
+
+ #undef ifdebug
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ # define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag)
+ #else
+ # define ifdebug(flag) if (0)
+diff --git a/fs/open.c b/fs/open.c
+index 33f9cbf..44a3be1 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+
++retry_deleg:
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ if (!uid_valid(uid))
+@@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ if (!S_ISDIR(inode->i_mode))
+ newattrs.ia_valid |=
+ ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+-retry_deleg:
+ mutex_lock(&inode->i_mutex);
+ error = security_path_chown(path, uid, gid);
+ if (!error)
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 260ac8f..6367e1e 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -362,6 +362,46 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
+ }
+
+ /*
++ * Clear MNT_LOCKED when it can be shown to be safe.
++ *
++ * mount_lock lock must be held for write
++ */
++void propagate_mount_unlock(struct mount *mnt)
++{
++ struct mount *parent = mnt->mnt_parent;
++ struct mount *m, *child;
++
++ BUG_ON(parent == mnt);
++
++ for (m = propagation_next(parent, parent); m;
++ m = propagation_next(m, parent)) {
++ child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
++ if (child)
++ child->mnt.mnt_flags &= ~MNT_LOCKED;
++ }
++}
++
++/*
++ * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
++ */
++static void mark_umount_candidates(struct mount *mnt)
++{
++ struct mount *parent = mnt->mnt_parent;
++ struct mount *m;
++
++ BUG_ON(parent == mnt);
++
++ for (m = propagation_next(parent, parent); m;
++ m = propagation_next(m, parent)) {
++ struct mount *child = __lookup_mnt_last(&m->mnt,
++ mnt->mnt_mountpoint);
++ if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
++ SET_MNT_MARK(child);
++ }
++ }
++}
++
++/*
+ * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
+ * parent propagates to.
+ */
+@@ -378,13 +418,16 @@ static void __propagate_umount(struct mount *mnt)
+ struct mount *child = __lookup_mnt_last(&m->mnt,
+ mnt->mnt_mountpoint);
+ /*
+- * umount the child only if the child has no
+- * other children
++ * umount the child only if the child has no children
++ * and the child is marked safe to unmount.
+ */
+- if (child && list_empty(&child->mnt_mounts)) {
++ if (!child || !IS_MNT_MARKED(child))
++ continue;
++ CLEAR_MNT_MARK(child);
++ if (list_empty(&child->mnt_mounts)) {
+ list_del_init(&child->mnt_child);
+- hlist_del_init_rcu(&child->mnt_hash);
+- hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
++ child->mnt.mnt_flags |= MNT_UMOUNT;
++ list_move_tail(&child->mnt_list, &mnt->mnt_list);
+ }
+ }
+ }
+@@ -396,11 +439,14 @@ static void __propagate_umount(struct mount *mnt)
+ *
+ * vfsmount lock must be held for write
+ */
+-int propagate_umount(struct hlist_head *list)
++int propagate_umount(struct list_head *list)
+ {
+ struct mount *mnt;
+
+- hlist_for_each_entry(mnt, list, mnt_hash)
++ list_for_each_entry_reverse(mnt, list, mnt_list)
++ mark_umount_candidates(mnt);
++
++ list_for_each_entry(mnt, list, mnt_list)
+ __propagate_umount(mnt);
+ return 0;
+ }
+diff --git a/fs/pnode.h b/fs/pnode.h
+index 4a24635..7114ce6 100644
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -19,6 +19,9 @@
+ #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
+ #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
+ #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
++#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
++#define IS_MNT_LOCKED_AND_LAZY(m) \
++ (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
+
+ #define CL_EXPIRE 0x01
+ #define CL_SLAVE 0x02
+@@ -40,14 +43,14 @@ static inline void set_mnt_shared(struct mount *mnt)
+ void change_mnt_propagation(struct mount *, int);
+ int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
+ struct hlist_head *);
+-int propagate_umount(struct hlist_head *);
++int propagate_umount(struct list_head *);
+ int propagate_mount_busy(struct mount *, int);
++void propagate_mount_unlock(struct mount *);
+ void mnt_release_group_id(struct mount *);
+ int get_dominating_id(struct mount *mnt, const struct path *root);
+ unsigned int mnt_get_count(struct mount *mnt);
+ void mnt_set_mountpoint(struct mount *, struct mountpoint *,
+ struct mount *);
+-void umount_tree(struct mount *, int);
+ struct mount *copy_tree(struct mount *, struct dentry *, int);
+ bool is_path_reachable(struct mount *, struct dentry *,
+ const struct path *root);
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index b034f10..0d58525 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -199,9 +199,29 @@ typedef int s32;
+ typedef s32 acpi_native_int;
+
+ typedef u32 acpi_size;
++
++#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
++
++/*
++ * OSPMs can define this to shrink the size of the structures for 32-bit
++ * none PAE environment. ASL compiler may always define this to generate
++ * 32-bit OSPM compliant tables.
++ */
+ typedef u32 acpi_io_address;
+ typedef u32 acpi_physical_address;
+
++#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
++
++/*
++ * It is reported that, after some calculations, the physical addresses can
++ * wrap over the 32-bit boundary on 32-bit PAE environment.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
++ */
++typedef u64 acpi_io_address;
++typedef u64 acpi_physical_address;
++
++#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
++
+ #define ACPI_MAX_PTR ACPI_UINT32_MAX
+ #define ACPI_SIZE_MAX ACPI_UINT32_MAX
+
+@@ -736,10 +756,6 @@ typedef u32 acpi_event_status;
+ #define ACPI_GPE_ENABLE 0
+ #define ACPI_GPE_DISABLE 1
+ #define ACPI_GPE_CONDITIONAL_ENABLE 2
+-#define ACPI_GPE_SAVE_MASK 4
+-
+-#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
+-#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)
+
+ /*
+ * GPE info flags - Per GPE
+diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
+index ad74dc5..ecdf940 100644
+--- a/include/acpi/platform/acenv.h
++++ b/include/acpi/platform/acenv.h
+@@ -76,6 +76,7 @@
+ #define ACPI_LARGE_NAMESPACE_NODE
+ #define ACPI_DATA_TABLE_DISASSEMBLY
+ #define ACPI_SINGLE_THREADED
++#define ACPI_32BIT_PHYSICAL_ADDRESS
+ #endif
+
+ /* acpi_exec configuration. Multithreaded with full AML debugger */
+diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
+index ae2eb17..a215609 100644
+--- a/include/dt-bindings/clock/tegra124-car-common.h
++++ b/include/dt-bindings/clock/tegra124-car-common.h
+@@ -297,7 +297,7 @@
+ #define TEGRA124_CLK_PLL_C4 270
+ #define TEGRA124_CLK_PLL_DP 271
+ #define TEGRA124_CLK_PLL_E_MUX 272
+-#define TEGRA124_CLK_PLLD_DSI 273
++#define TEGRA124_CLK_PLL_D_DSI_OUT 273
+ /* 274 */
+ /* 275 */
+ /* 276 */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index bbfceb7..33b52fb 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f);
+
+ /* function argument constraints */
+ enum bpf_arg_type {
+- ARG_ANYTHING = 0, /* any argument is ok */
++ ARG_DONTCARE = 0, /* unused argument in helper function */
+
+ /* the following constraints used to prototype
+ * bpf_map_lookup/update/delete_elem() functions
+@@ -62,6 +62,8 @@ enum bpf_arg_type {
+ */
+ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
+ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
++
++ ARG_ANYTHING, /* any (initialized) argument is ok */
+ };
+
+ /* type of values returned from helper functions */
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index c2c561d..564beee 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -61,6 +61,7 @@ struct mnt_namespace;
+ #define MNT_DOOMED 0x1000000
+ #define MNT_SYNC_UMOUNT 0x2000000
+ #define MNT_MARKED 0x4000000
++#define MNT_UMOUNT 0x8000000
+
+ struct vfsmount {
+ struct dentry *mnt_root; /* root of the mounted tree */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index a419b65..51348f7 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+ extern void calc_global_load(unsigned long ticks);
+ extern void update_cpu_load_nohz(void);
+
++/* Notifier for when a task gets migrated to a new CPU */
++struct task_migration_notifier {
++ struct task_struct *task;
++ int from_cpu;
++ int to_cpu;
++};
++extern void register_task_migration_notifier(struct notifier_block *n);
++
+ extern unsigned long get_parent_ip(unsigned long addr);
+
+ extern void dump_cpu_task(int cpu);
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f54d665..bdccc4b 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -769,6 +769,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+
+ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+ int node);
++struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+ struct sk_buff *build_skb(void *data, unsigned int frag_size);
+ static inline struct sk_buff *alloc_skb(unsigned int size,
+ gfp_t priority)
+@@ -3013,6 +3014,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
+ */
+ #define CHECKSUM_BREAK 76
+
++/* Unset checksum-complete
++ *
++ * Unset checksum complete can be done when packet is being modified
++ * (uncompressed for instance) and checksum-complete value is
++ * invalidated.
++ */
++static inline void skb_checksum_complete_unset(struct sk_buff *skb)
++{
++ if (skb->ip_summed == CHECKSUM_COMPLETE)
++ skb->ip_summed = CHECKSUM_NONE;
++}
++
+ /* Validate (init) checksum based on checksum complete.
+ *
+ * Return values:
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 7ee1b5c..447fe29 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf);
+ #define USB_MAXINTERFACES 32
+ #define USB_MAXIADS (USB_MAXINTERFACES/2)
+
++/*
++ * USB Resume Timer: Every Host controller driver should drive the resume
++ * signalling on the bus for the amount of time defined by this macro.
++ *
++ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
++ *
++ * Note that the USB Specification states we should drive resume for *at least*
++ * 20 ms, but it doesn't give an upper bound. This creates two possible
++ * situations which we want to avoid:
++ *
++ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
++ * us to fail USB Electrical Tests, thus failing Certification
++ *
++ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
++ * and while we can argue that's against the USB Specification, we don't have
++ * control over which devices a certification laboratory will be using for
++ * certification. If CertLab uses a device which was tested against Windows and
++ * that happens to have relaxed resume signalling rules, we might fall into
++ * situations where we fail interoperability and electrical tests.
++ *
++ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
++ * should cope with both LPJ calibration errors and devices not following every
++ * detail of the USB Specification.
++ */
++#define USB_RESUME_TIMEOUT 40 /* ms */
++
+ /**
+ * struct usb_interface_cache - long-term representation of a device interface
+ * @num_altsetting: number of altsettings defined.
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index d3583d3..dd0f3ab 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -602,6 +602,11 @@ struct iscsi_conn {
+ struct iscsi_session *sess;
+ /* Pointer to thread_set in use for this conn's threads */
+ struct iscsi_thread_set *thread_set;
++ int bitmap_id;
++ int rx_thread_active;
++ struct task_struct *rx_thread;
++ int tx_thread_active;
++ struct task_struct *tx_thread;
+ /* list_head for session connection list */
+ struct list_head conn_list;
+ } ____cacheline_aligned;
+@@ -871,10 +876,12 @@ struct iscsit_global {
+ /* Unique identifier used for the authentication daemon */
+ u32 auth_id;
+ u32 inactive_ts;
++#define ISCSIT_BITMAP_BITS 262144
+ /* Thread Set bitmap count */
+ int ts_bitmap_count;
+ /* Thread Set bitmap pointer */
+ unsigned long *ts_bitmap;
++ spinlock_t ts_bitmap_lock;
+ /* Used for iSCSI discovery session authentication */
+ struct iscsi_node_acl discovery_acl;
+ struct iscsi_portal_group *discovery_tpg;
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 672150b..985ca4c 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -524,7 +524,7 @@ struct se_cmd {
+ sense_reason_t (*execute_cmd)(struct se_cmd *);
+ sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
+ u32, enum dma_data_direction);
+- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
++ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+
+ unsigned char *t_task_cdb;
+ unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
+diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
+index 0bf130a..28ec6c9 100644
+--- a/include/uapi/linux/nfsd/debug.h
++++ b/include/uapi/linux/nfsd/debug.h
+@@ -12,14 +12,6 @@
+ #include <linux/sunrpc/debug.h>
+
+ /*
+- * Enable debugging for nfsd.
+- * Requires RPC_DEBUG.
+- */
+-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+-# define NFSD_DEBUG 1
+-#endif
+-
+-/*
+ * knfsd debug flags
+ */
+ #define NFSDDBG_SOCK 0x0001
+diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
+index a20e4a3..847a0a2 100644
+--- a/include/video/samsung_fimd.h
++++ b/include/video/samsung_fimd.h
+@@ -436,6 +436,12 @@
+ #define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
+ #define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+
++/* Display port clock control */
++#define DP_MIE_CLKCON 0x27c
++#define DP_MIE_CLK_DISABLE 0x0
++#define DP_MIE_CLK_DP_ENABLE 0x2
++#define DP_MIE_CLK_MIE_ENABLE 0x3
++
+ /* Notes on per-window bpp settings
+ *
+ * Value Win0 Win1 Win2 Win3 Win 4
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 36508e6..5d8ea3d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
+ enum bpf_reg_type expected_type;
+ int err = 0;
+
+- if (arg_type == ARG_ANYTHING)
++ if (arg_type == ARG_DONTCARE)
+ return 0;
+
+ if (reg->type == NOT_INIT) {
+@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
+ return -EACCES;
+ }
+
++ if (arg_type == ARG_ANYTHING)
++ return 0;
++
+ if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
+ arg_type == ARG_PTR_TO_MAP_VALUE) {
+ expected_type = PTR_TO_STACK;
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 227fec3..9a34bd8 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -697,6 +697,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
+ static int ptrace_resume(struct task_struct *child, long request,
+ unsigned long data)
+ {
++ bool need_siglock;
++
+ if (!valid_signal(data))
+ return -EIO;
+
+@@ -724,8 +726,26 @@ static int ptrace_resume(struct task_struct *child, long request,
+ user_disable_single_step(child);
+ }
+
++ /*
++ * Change ->exit_code and ->state under siglock to avoid the race
++ * with wait_task_stopped() in between; a non-zero ->exit_code will
++ * wrongly look like another report from tracee.
++ *
++ * Note that we need siglock even if ->exit_code == data and/or this
++ * status was not reported yet, the new status must not be cleared by
++ * wait_task_stopped() after resume.
++ *
++ * If data == 0 we do not care if wait_task_stopped() reports the old
++ * status and clears the code too; this can't race with the tracee, it
++ * takes siglock after resume.
++ */
++ need_siglock = data && !thread_group_empty(current);
++ if (need_siglock)
++ spin_lock_irq(&child->sighand->siglock);
+ child->exit_code = data;
+ wake_up_state(child, __TASK_TRACED);
++ if (need_siglock)
++ spin_unlock_irq(&child->sighand->siglock);
+
+ return 0;
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 62671f5..3d5f6f6 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -996,6 +996,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+ rq_clock_skip_update(rq, true);
+ }
+
++static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
++
++void register_task_migration_notifier(struct notifier_block *n)
++{
++ atomic_notifier_chain_register(&task_migration_notifier, n);
++}
++
+ #ifdef CONFIG_SMP
+ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ {
+@@ -1026,10 +1033,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ trace_sched_migrate_task(p, new_cpu);
+
+ if (task_cpu(p) != new_cpu) {
++ struct task_migration_notifier tmn;
++
+ if (p->sched_class->migrate_task_rq)
+ p->sched_class->migrate_task_rq(p, new_cpu);
+ p->se.nr_migrations++;
+ perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
++
++ tmn.task = p;
++ tmn.from_cpu = task_cpu(p);
++ tmn.to_cpu = new_cpu;
++
++ atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
+ }
+
+ __set_task_cpu(p, new_cpu);
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 3fa8fa6..f670cbb 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -514,7 +514,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
+ unsigned long flags;
+ struct rq *rq;
+
+- rq = task_rq_lock(current, &flags);
++ rq = task_rq_lock(p, &flags);
+
+ /*
+ * We need to take care of several possible races here:
+@@ -569,7 +569,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
+ push_dl_task(rq);
+ #endif
+ unlock:
+- task_rq_unlock(rq, current, &flags);
++ task_rq_unlock(rq, p, &flags);
+
+ return HRTIMER_NORESTART;
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 5040d44..922048a 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
+
+ static __always_inline int trace_recursive_lock(void)
+ {
+- unsigned int val = this_cpu_read(current_context);
++ unsigned int val = __this_cpu_read(current_context);
+ int bit;
+
+ if (in_interrupt()) {
+@@ -2696,18 +2696,17 @@ static __always_inline int trace_recursive_lock(void)
+ return 1;
+
+ val |= (1 << bit);
+- this_cpu_write(current_context, val);
++ __this_cpu_write(current_context, val);
+
+ return 0;
+ }
+
+ static __always_inline void trace_recursive_unlock(void)
+ {
+- unsigned int val = this_cpu_read(current_context);
++ unsigned int val = __this_cpu_read(current_context);
+
+- val--;
+- val &= this_cpu_read(current_context);
+- this_cpu_write(current_context, val);
++ val &= val & (val - 1);
++ __this_cpu_write(current_context, val);
+ }
+
+ #else
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index db54dda..a9c10a3 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
+ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+ {
+ char *event = NULL, *sub = NULL, *match;
++ int ret;
+
+ /*
+ * The buf format can be <subsystem>:<event-name>
+@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+ event = NULL;
+ }
+
+- return __ftrace_set_clr_event(tr, match, sub, event, set);
++ ret = __ftrace_set_clr_event(tr, match, sub, event, set);
++
++ /* Put back the colon to allow this to be called again */
++ if (buf)
++ *(buf - 1) = ':';
++
++ return ret;
+ }
+
+ /**
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 2d25ad1..b6fce36 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -1309,15 +1309,19 @@ void graph_trace_open(struct trace_iterator *iter)
+ {
+ /* pid and depth on the last trace processed */
+ struct fgraph_data *data;
++ gfp_t gfpflags;
+ int cpu;
+
+ iter->private = NULL;
+
+- data = kzalloc(sizeof(*data), GFP_KERNEL);
++ /* We can be called in atomic context via ftrace_dump() */
++ gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
++
++ data = kzalloc(sizeof(*data), gfpflags);
+ if (!data)
+ goto out_err;
+
+- data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
++ data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
+ if (!data->cpu_data)
+ goto out_err_free;
+
+diff --git a/lib/string.c b/lib/string.c
+index ce81aae..a579201 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
+ void memzero_explicit(void *s, size_t count)
+ {
+ memset(s, 0, count);
+- OPTIMIZER_HIDE_VAR(s);
++ barrier();
+ }
+ EXPORT_SYMBOL(memzero_explicit);
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 6817b03..956d4db 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2316,8 +2316,14 @@ static struct page
+ struct vm_area_struct *vma, unsigned long address,
+ int node)
+ {
++ gfp_t flags;
++
+ VM_BUG_ON_PAGE(*hpage, *hpage);
+
++ /* Only allocate from the target node */
++ flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
++ __GFP_THISNODE;
++
+ /*
+ * Before allocating the hugepage, release the mmap_sem read lock.
+ * The allocation can take potentially a long time if it involves
+@@ -2326,8 +2332,7 @@ static struct page
+ */
+ up_read(&mm->mmap_sem);
+
+- *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
+- khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
++ *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
+ if (unlikely(!*hpage)) {
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+ *hpage = ERR_PTR(-ENOMEM);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index c41b2a0..caad3c5 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3735,8 +3735,7 @@ retry:
+ if (!pmd_huge(*pmd))
+ goto out;
+ if (pmd_present(*pmd)) {
+- page = pte_page(*(pte_t *)pmd) +
+- ((address & ~PMD_MASK) >> PAGE_SHIFT);
++ page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ if (flags & FOLL_GET)
+ get_page(page);
+ } else {
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 4721046..de5dc5e 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1985,7 +1985,8 @@ retry_cpuset:
+ nmask = policy_nodemask(gfp, pol);
+ if (!nmask || node_isset(node, *nmask)) {
+ mpol_cond_put(pol);
+- page = alloc_pages_exact_node(node, gfp, order);
++ page = alloc_pages_exact_node(node,
++ gfp | __GFP_THISNODE, order);
+ goto out;
+ }
+ }
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 0ee453f..f371cbf 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -651,6 +651,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
+ struct net_device *in;
+
+ if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
++ int frag_max_size;
++
++ if (skb->protocol == htons(ETH_P_IP)) {
++ frag_max_size = IPCB(skb)->frag_max_size;
++ BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
++ }
++
+ in = nf_bridge->physindev;
+ if (nf_bridge->mask & BRNF_PKT_TYPE) {
+ skb->pkt_type = PACKET_OTHERHOST;
+@@ -710,8 +717,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
+ nf_bridge->mask |= BRNF_PKT_TYPE;
+ }
+
+- if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
+- return NF_DROP;
++ if (pf == NFPROTO_IPV4) {
++ int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
++
++ if (br_parse_ip_options(skb))
++ return NF_DROP;
++
++ IPCB(skb)->frag_max_size = frag_max;
++ }
+
+ /* The physdev module checks on this */
+ nf_bridge->mask |= BRNF_BRIDGED;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 45109b7..22a53ac 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3041,7 +3041,7 @@ static struct rps_dev_flow *
+ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow *rflow, u16 next_cpu)
+ {
+- if (next_cpu != RPS_NO_CPU) {
++ if (next_cpu < nr_cpu_ids) {
+ #ifdef CONFIG_RFS_ACCEL
+ struct netdev_rx_queue *rxqueue;
+ struct rps_dev_flow_table *flow_table;
+@@ -3146,7 +3146,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ * If the desired CPU (where last recvmsg was done) is
+ * different from current CPU (one in the rx-queue flow
+ * table entry), switch if one of the following holds:
+- * - Current CPU is unset (equal to RPS_NO_CPU).
++ * - Current CPU is unset (>= nr_cpu_ids).
+ * - Current CPU is offline.
+ * - The current CPU's queue tail has advanced beyond the
+ * last packet that was enqueued using this table entry.
+@@ -3154,14 +3154,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ * have been dequeued, thus preserving in order delivery.
+ */
+ if (unlikely(tcpu != next_cpu) &&
+- (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
++ (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
+ ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
+ rflow->last_qtail)) >= 0)) {
+ tcpu = next_cpu;
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+ }
+
+- if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
++ if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
+ *rflowp = rflow;
+ cpu = tcpu;
+ goto done;
+@@ -3202,14 +3202,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+ struct rps_dev_flow_table *flow_table;
+ struct rps_dev_flow *rflow;
+ bool expire = true;
+- int cpu;
++ unsigned int cpu;
+
+ rcu_read_lock();
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ if (flow_table && flow_id <= flow_table->mask) {
+ rflow = &flow_table->flows[flow_id];
+ cpu = ACCESS_ONCE(rflow->cpu);
+- if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
++ if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
+ ((int)(per_cpu(softnet_data, cpu).input_queue_head -
+ rflow->last_qtail) <
+ (int)(10 * flow_table->mask)))
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 98d45fe..e9f9a15 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -280,13 +280,14 @@ nodata:
+ EXPORT_SYMBOL(__alloc_skb);
+
+ /**
+- * build_skb - build a network buffer
++ * __build_skb - build a network buffer
+ * @data: data buffer provided by caller
+- * @frag_size: size of fragment, or 0 if head was kmalloced
++ * @frag_size: size of data, or 0 if head was kmalloced
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc() only if
+- * @frag_size is 0, otherwise data should come from the page allocator.
++ * @frag_size is 0, otherwise data should come from the page allocator
++ * or vmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
+ * before giving packet to stack.
+ * RX rings only contains data buffers, not full skbs.
+ */
+-struct sk_buff *build_skb(void *data, unsigned int frag_size)
++struct sk_buff *__build_skb(void *data, unsigned int frag_size)
+ {
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->truesize = SKB_TRUESIZE(size);
+- skb->head_frag = frag_size != 0;
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ return skb;
+ }
++
++/* build_skb() is wrapper over __build_skb(), that specifically
++ * takes care of skb->head and skb->pfmemalloc
++ * This means that if @frag_size is not zero, then @data must be backed
++ * by a page fragment, not kmalloc() or vmalloc()
++ */
++struct sk_buff *build_skb(void *data, unsigned int frag_size)
++{
++ struct sk_buff *skb = __build_skb(data, frag_size);
++
++ if (skb && frag_size) {
++ skb->head_frag = 1;
++ if (virt_to_head_page(data)->pfmemalloc)
++ skb->pfmemalloc = 1;
++ }
++ return skb;
++}
+ EXPORT_SYMBOL(build_skb);
+
+ struct netdev_alloc_cache {
+@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
+ gfp_t gfp = gfp_mask;
+
+ if (order) {
+- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
++ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
++ __GFP_NOMEMALLOC;
+ page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+ nc->frag.size = PAGE_SIZE << (page ? order : 0);
+ }
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index d9bc28a..53bd53f 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -82,6 +82,9 @@ int ip_forward(struct sk_buff *skb)
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
++ if (unlikely(skb->sk))
++ goto drop;
++
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index d520492..9d48dc4 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2751,39 +2751,65 @@ begin_fwd:
+ }
+ }
+
+-/* Send a fin. The caller locks the socket for us. This cannot be
+- * allowed to fail queueing a FIN frame under any circumstances.
++/* We allow to exceed memory limits for FIN packets to expedite
++ * connection tear down and (memory) recovery.
++ * Otherwise tcp_send_fin() could be tempted to either delay FIN
++ * or even be forced to close flow without any FIN.
++ */
++static void sk_forced_wmem_schedule(struct sock *sk, int size)
++{
++ int amt, status;
++
++ if (size <= sk->sk_forward_alloc)
++ return;
++ amt = sk_mem_pages(size);
++ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
++ sk_memory_allocated_add(sk, amt, &status);
++}
++
++/* Send a FIN. The caller locks the socket for us.
++ * We should try to send a FIN packet really hard, but eventually give up.
+ */
+ void tcp_send_fin(struct sock *sk)
+ {
++ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+- struct sk_buff *skb = tcp_write_queue_tail(sk);
+- int mss_now;
+
+- /* Optimization, tack on the FIN if we have a queue of
+- * unsent frames. But be careful about outgoing SACKS
+- * and IP options.
++ /* Optimization, tack on the FIN if we have one skb in write queue and
++ * this skb was not yet sent, or we are under memory pressure.
++ * Note: in the latter case, FIN packet will be sent after a timeout,
++ * as TCP stack thinks it has already been transmitted.
+ */
+- mss_now = tcp_current_mss(sk);
+-
+- if (tcp_send_head(sk) != NULL) {
+- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
+- TCP_SKB_CB(skb)->end_seq++;
++ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
++coalesce:
++ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
++ TCP_SKB_CB(tskb)->end_seq++;
+ tp->write_seq++;
++ if (!tcp_send_head(sk)) {
++ /* This means tskb was already sent.
++ * Pretend we included the FIN on previous transmit.
++ * We need to set tp->snd_nxt to the value it would have
++ * if FIN had been sent. This is because retransmit path
++ * does not change tp->snd_nxt.
++ */
++ tp->snd_nxt++;
++ return;
++ }
+ } else {
+- /* Socket is locked, keep trying until memory is available. */
+- for (;;) {
+- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+- if (skb)
+- break;
+- yield();
++ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++ if (unlikely(!skb)) {
++ if (tskb)
++ goto coalesce;
++ return;
+ }
++ skb_reserve(skb, MAX_TCP_HEADER);
++ sk_forced_wmem_schedule(sk, skb->truesize);
+ /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ tcp_init_nondata_skb(skb, tp->write_seq,
+ TCPHDR_ACK | TCPHDR_FIN);
+ tcp_queue_skb(sk, skb);
+ }
+- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
++ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
+ }
+
+ /* We get here when a process closes a file descriptor (either due to
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 142f66a..0ca013d 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2260,7 +2260,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
+ else
+ ssid_len = ssid[1];
+
+- ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
++ ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
+ ssid + 2, ssid_len, NULL,
+ 0, (u32) -1, true, 0,
+ ifmgd->associated->channel, false);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 05919bf..d1d7a81 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
+ if (data == NULL)
+ return NULL;
+
+- skb = build_skb(data, size);
++ skb = __build_skb(data, size);
+ if (skb == NULL)
+ vfree(data);
+- else {
+- skb->head_frag = 0;
++ else
+ skb->destructor = netlink_skb_destructor;
+- }
+
+ return skb;
+ }
+diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
+index 2ca9f2e..53745f4 100644
+--- a/sound/pci/emu10k1/emuproc.c
++++ b/sound/pci/emu10k1/emuproc.c
+@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
+ struct snd_emu10k1 *emu = entry->private_data;
+ u32 value;
+ u32 value2;
+- unsigned long flags;
+ u32 rate;
+
+ if (emu->card_capabilities->emu_model) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x38, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ if ((value & 0x1) == 0) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x2a, &value);
+ snd_emu1010_fpga_read(emu, 0x2b, &value2);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ rate = 0x1770000 / (((value << 5) | value2)+1);
+ snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
+ } else {
+ snd_iprintf(buffer, "ADAT Unlocked\n");
+ }
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x20, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ if ((value & 0x4) == 0) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x28, &value);
+ snd_emu1010_fpga_read(emu, 0x29, &value2);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ rate = 0x1770000 / (((value << 5) | value2)+1);
+ snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
+ } else {
+@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
+ {
+ struct snd_emu10k1 *emu = entry->private_data;
+ u32 value;
+- unsigned long flags;
+ int i;
+ snd_iprintf(buffer, "EMU1010 Registers:\n\n");
+
+ for(i = 0; i < 0x40; i+=1) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, i, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
+ }
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f9d12c0..2fd490b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5047,12 +5047,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -5142,6 +5144,16 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {0x1b, 0x411111f0}, \
+ {0x1e, 0x411111f0}
+
++#define ALC256_STANDARD_PINS \
++ {0x12, 0x90a60140}, \
++ {0x14, 0x90170110}, \
++ {0x19, 0x411111f0}, \
++ {0x1a, 0x411111f0}, \
++ {0x1b, 0x411111f0}, \
++ {0x1d, 0x40700001}, \
++ {0x1e, 0x411111f0}, \
++ {0x21, 0x02211020}
++
+ #define ALC282_STANDARD_PINS \
+ {0x14, 0x90170110}, \
+ {0x18, 0x411111f0}, \
+@@ -5235,15 +5247,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x1d, 0x40700001},
+ {0x21, 0x02211050}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+- {0x12, 0x90a60140},
+- {0x13, 0x40000000},
+- {0x14, 0x90170110},
+- {0x19, 0x411111f0},
+- {0x1a, 0x411111f0},
+- {0x1b, 0x411111f0},
+- {0x1d, 0x40700001},
+- {0x1e, 0x411111f0},
+- {0x21, 0x02211020}),
++ ALC256_STANDARD_PINS,
++ {0x13, 0x40000000}),
++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC256_STANDARD_PINS,
++ {0x13, 0x411111f0}),
+ SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+ {0x12, 0x90a60130},
+ {0x13, 0x40000000},
+@@ -5563,6 +5571,8 @@ static int patch_alc269(struct hda_codec *codec)
+ break;
+ case 0x10ec0256:
+ spec->codec_variant = ALC269_TYPE_ALC256;
++ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
++ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
+ break;
+ }
+
+@@ -5576,8 +5586,8 @@ static int patch_alc269(struct hda_codec *codec)
+ if (err < 0)
+ goto error;
+
+- if (!spec->gen.no_analog && spec->gen.beep_nid)
+- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
++ if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
++ set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
+
+ codec->patch_ops = alc_patch_ops;
+ #ifdef CONFIG_PM
+diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
+index 7d3a6ac..e770ee6 100644
+--- a/sound/soc/codecs/cs4271.c
++++ b/sound/soc/codecs/cs4271.c
+@@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec)
+ if (gpio_is_valid(cs4271->gpio_nreset)) {
+ /* Reset codec */
+ gpio_direction_output(cs4271->gpio_nreset, 0);
+- udelay(1);
++ mdelay(1);
+ gpio_set_value(cs4271->gpio_nreset, 1);
+ /* Give the codec time to wake up */
+- udelay(1);
++ mdelay(1);
+ }
+
+ ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 474cae8..8c09e3f 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -304,9 +304,9 @@ static const struct soc_enum pcm512x_veds =
+ static const struct snd_kcontrol_new pcm512x_controls[] = {
+ SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
+ PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
+-SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
++SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
+ PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
+-SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
++SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
+ PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
+ SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
+ PCM512x_RQMR_SHIFT, 1, 1),
+@@ -576,8 +576,8 @@ static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai,
+
+ /* pllin_rate / P (or here, den) cannot be greater than 20 MHz */
+ if (pllin_rate / den > 20000000 && num < 8) {
+- num *= 20000000 / (pllin_rate / den);
+- den *= 20000000 / (pllin_rate / den);
++ num *= DIV_ROUND_UP(pllin_rate / den, 20000000);
++ den *= DIV_ROUND_UP(pllin_rate / den, 20000000);
+ }
+ dev_dbg(dev, "num / den = %lu / %lu\n", num, den);
+
+diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
+index 31bb480..9e71c76 100644
+--- a/sound/soc/codecs/wm8741.c
++++ b/sound/soc/codecs/wm8741.c
+@@ -123,7 +123,7 @@ static struct {
+ };
+
+ static const unsigned int rates_11289[] = {
+- 44100, 88235,
++ 44100, 88200,
+ };
+
+ static const struct snd_pcm_hw_constraint_list constraints_11289 = {
+@@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = {
+ };
+
+ static const unsigned int rates_16934[] = {
+- 44100, 88235,
++ 44100, 88200,
+ };
+
+ static const struct snd_pcm_hw_constraint_list constraints_16934 = {
+@@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = {
+ };
+
+ static const unsigned int rates_22579[] = {
+- 44100, 88235, 1764000
++ 44100, 88200, 176400
+ };
+
+ static const struct snd_pcm_hw_constraint_list constraints_22579 = {
+@@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = {
+ };
+
+ static const unsigned int rates_36864[] = {
+- 48000, 96000, 19200
++ 48000, 96000, 192000
+ };
+
+ static const struct snd_pcm_hw_constraint_list constraints_36864 = {
+diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
+index b6bb594..8c2b9be 100644
+--- a/sound/soc/davinci/davinci-evm.c
++++ b/sound/soc/davinci/davinci-evm.c
+@@ -425,18 +425,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int davinci_evm_remove(struct platform_device *pdev)
+-{
+- struct snd_soc_card *card = platform_get_drvdata(pdev);
+-
+- snd_soc_unregister_card(card);
+-
+- return 0;
+-}
+-
+ static struct platform_driver davinci_evm_driver = {
+ .probe = davinci_evm_probe,
+- .remove = davinci_evm_remove,
+ .driver = {
+ .name = "davinci_evm",
+ .pm = &snd_soc_pm_ops,
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 9a28365..32631a8 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1115,6 +1115,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ {
+ /* devices which do not support reading the sample rate. */
+ switch (chip->usb_id) {
++ case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
+ case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ return true;
+diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
+index dcc6652..deb3569 100644
+--- a/tools/lib/traceevent/kbuffer-parse.c
++++ b/tools/lib/traceevent/kbuffer-parse.c
+@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
+ switch (type_len) {
+ case KBUFFER_TYPE_PADDING:
+ *length = read_4(kbuf, data);
+- data += *length;
+ break;
+
+ case KBUFFER_TYPE_TIME_EXTEND:
+diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
+index cc22408..0884d31 100644
+--- a/tools/perf/config/Makefile
++++ b/tools/perf/config/Makefile
+@@ -651,7 +651,7 @@ ifeq (${IS_64_BIT}, 1)
+ NO_PERF_READ_VDSO32 := 1
+ endif
+ endif
+- ifneq (${IS_X86_64}, 1)
++ ifneq ($(ARCH), x86)
+ NO_PERF_READ_VDSOX32 := 1
+ endif
+ ifndef NO_PERF_READ_VDSOX32
+@@ -699,7 +699,7 @@ sysconfdir = $(prefix)/etc
+ ETC_PERFCONFIG = etc/perfconfig
+ endif
+ ifndef lib
+-ifeq ($(IS_X86_64),1)
++ifeq ($(ARCH)$(IS_64_BIT), x861)
+ lib = lib64
+ else
+ lib = lib
+diff --git a/tools/perf/tests/make b/tools/perf/tests/make
+index 75709d2..bff8532 100644
+--- a/tools/perf/tests/make
++++ b/tools/perf/tests/make
+@@ -5,7 +5,7 @@ include config/Makefile.arch
+
+ # FIXME looks like x86 is the only arch running tests ;-)
+ # we need some IS_(32/64) flag to make this generic
+-ifeq ($(IS_X86_64),1)
++ifeq ($(ARCH)$(IS_64_BIT), x861)
+ lib = lib64
+ else
+ lib = lib
+diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
+index 6da965b..85b5238 100644
+--- a/tools/perf/util/cloexec.c
++++ b/tools/perf/util/cloexec.c
+@@ -7,6 +7,12 @@
+
+ static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
+
++int __weak sched_getcpu(void)
++{
++ errno = ENOSYS;
++ return -1;
++}
++
+ static int perf_flag_probe(void)
+ {
+ /* use 'safest' configuration as used in perf_evsel__fallback() */
+diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
+index 94a5a7d..68888c2 100644
+--- a/tools/perf/util/cloexec.h
++++ b/tools/perf/util/cloexec.h
+@@ -3,4 +3,10 @@
+
+ unsigned long perf_event_open_cloexec_flag(void);
+
++#ifdef __GLIBC_PREREQ
++#if !__GLIBC_PREREQ(2, 6)
++extern int sched_getcpu(void) __THROW;
++#endif
++#endif
++
+ #endif /* __PERF_CLOEXEC_H */
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 33b7a2a..9bdf007 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -74,6 +74,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+ return GELF_ST_TYPE(sym->st_info);
+ }
+
++#ifndef STT_GNU_IFUNC
++#define STT_GNU_IFUNC 10
++#endif
++
+ static inline int elf_sym__is_function(const GElf_Sym *sym)
+ {
+ return (elf_sym__type(sym) == STT_FUNC ||
+diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
+index d1b3a36..4039854 100644
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -1,8 +1,12 @@
+ CC = $(CROSS_COMPILE)gcc
+-BUILD_OUTPUT := $(PWD)
++BUILD_OUTPUT := $(CURDIR)
+ PREFIX := /usr
+ DESTDIR :=
+
++ifeq ("$(origin O)", "command line")
++ BUILD_OUTPUT := $(O)
++endif
++
+ turbostat : turbostat.c
+ CFLAGS += -Wall
+ CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index c9f60f5..e5abe7c 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1371,6 +1371,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+ goto out;
+ }
+
++ if (irq_num >= kvm->arch.vgic.nr_irqs)
++ return -EINVAL;
++
+ vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+ if (vcpu_id >= 0) {
+ /* kick the specified vcpu */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index cc6a25d..f8f3f5f 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1653,8 +1653,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ ghc->generation = slots->generation;
+ ghc->len = len;
+ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
++ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
+ ghc->hva += offset;
+ } else {
+ /*
diff --git a/1002_linux-4.0.3.patch b/1002_linux-4.0.3.patch
new file mode 100644
index 0000000..d137bf2
--- /dev/null
+++ b/1002_linux-4.0.3.patch
@@ -0,0 +1,2827 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index bfcb1a62a7b4..4d68ec841304 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -3746,6 +3746,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ READ_CAPACITY_16 command);
+ f = NO_REPORT_OPCODES (don't use report opcodes
+ command, uas only);
++ g = MAX_SECTORS_240 (don't transfer more than
++ 240 sectors at a time, uas only);
+ h = CAPACITY_HEURISTICS (decrease the
+ reported device capacity by one
+ sector if the number is odd);
+diff --git a/Makefile b/Makefile
+index 0649a6011a76..dc9f43a019d6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 0
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma sheep
+
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index ef7d112f5ce0..b0bd4e5fd5cf 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
+
+ *ret_page = phys_to_page(phys);
+ ptr = (void *)val;
+- if (flags & __GFP_ZERO)
+- memset(ptr, 0, size);
++ memset(ptr, 0, size);
+ }
+
+ return ptr;
+@@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
+ struct page *page;
+ void *addr;
+
+- size = PAGE_ALIGN(size);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+@@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ addr = page_address(page);
+- if (flags & __GFP_ZERO)
+- memset(addr, 0, size);
++ memset(addr, 0, size);
+ return addr;
+ } else {
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+@@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size,
+ {
+ void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+
++ size = PAGE_ALIGN(size);
++
+ if (!is_device_dma_coherent(dev)) {
+ if (__free_from_pool(vaddr, size))
+ return;
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index c7a16904cd03..1a313c468d65 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2072,7 +2072,7 @@ config MIPSR2_TO_R6_EMULATOR
+ help
+ Choose this option if you want to run non-R6 MIPS userland code.
+ Even if you say 'Y' here, the emulator will still be disabled by
+- default. You can enable it using the 'mipsr2emul' kernel option.
++ default. You can enable it using the 'mipsr2emu' kernel option.
+ The only reason this is a build-time option is to save ~14K from the
+ final kernel image.
+ comment "MIPS R2-to-R6 emulator is only available for UP kernels"
+@@ -2142,7 +2142,7 @@ config MIPS_CMP
+
+ config MIPS_CPS
+ bool "MIPS Coherent Processing System support"
+- depends on SYS_SUPPORTS_MIPS_CPS
++ depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
+ select MIPS_CM
+ select MIPS_CPC
+ select MIPS_CPS_PM if HOTPLUG_CPU
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index 8f57fc72d62c..1b4dab1e6ab8 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -197,11 +197,17 @@ endif
+ # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
+ # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
+ # been fixed properly.
+-mips-cflags := "$(cflags-y)"
+-cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
+-cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips)
++mips-cflags := $(cflags-y)
++ifeq ($(CONFIG_CPU_HAS_SMARTMIPS),y)
++smartmips-ase := $(call cc-option-yn,$(mips-cflags) -msmartmips)
++cflags-$(smartmips-ase) += -msmartmips -Wa,--no-warn
++endif
++ifeq ($(CONFIG_CPU_MICROMIPS),y)
++micromips-ase := $(call cc-option-yn,$(mips-cflags) -mmicromips)
++cflags-$(micromips-ase) += -mmicromips
++endif
+ ifeq ($(CONFIG_CPU_HAS_MSA),y)
+-toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
++toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
+ cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
+ endif
+
+diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
+index b3ae068ca4fa..3fd369d74444 100644
+--- a/arch/mips/bcm47xx/board.c
++++ b/arch/mips/bcm47xx/board.c
+@@ -247,8 +247,8 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
+ }
+
+ if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 &&
+- bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) {
+- for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
++ bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) {
++ for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) {
+ if (!strstarts(buf1, e2->value1) &&
+ !strcmp(buf2, e2->value2))
+ return &e2->board;
+diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
+index e1f27d653f60..7019e2967009 100644
+--- a/arch/mips/bcm63xx/prom.c
++++ b/arch/mips/bcm63xx/prom.c
+@@ -17,7 +17,6 @@
+ #include <bcm63xx_cpu.h>
+ #include <bcm63xx_io.h>
+ #include <bcm63xx_regs.h>
+-#include <bcm63xx_gpio.h>
+
+ void __init prom_init(void)
+ {
+@@ -53,9 +52,6 @@ void __init prom_init(void)
+ reg &= ~mask;
+ bcm_perf_writel(reg, PERF_CKCTL_REG);
+
+- /* register gpiochip */
+- bcm63xx_gpio_init();
+-
+ /* do low level board init */
+ board_prom_init();
+
+diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
+index 6660c7ddf87b..240fb4ffa55c 100644
+--- a/arch/mips/bcm63xx/setup.c
++++ b/arch/mips/bcm63xx/setup.c
+@@ -20,6 +20,7 @@
+ #include <bcm63xx_cpu.h>
+ #include <bcm63xx_regs.h>
+ #include <bcm63xx_io.h>
++#include <bcm63xx_gpio.h>
+
+ void bcm63xx_machine_halt(void)
+ {
+@@ -160,6 +161,9 @@ void __init plat_mem_setup(void)
+
+ int __init bcm63xx_register_devices(void)
+ {
++ /* register gpiochip */
++ bcm63xx_gpio_init();
++
+ return board_register_devices();
+ }
+
+diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
+index 7d8987818ccf..d8960d46417b 100644
+--- a/arch/mips/cavium-octeon/dma-octeon.c
++++ b/arch/mips/cavium-octeon/dma-octeon.c
+@@ -306,7 +306,7 @@ void __init plat_swiotlb_setup(void)
+ swiotlbsize = 64 * (1<<20);
+ }
+ #endif
+-#ifdef CONFIG_USB_OCTEON_OHCI
++#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
+ /* OCTEON II ohci is only 32-bit. */
+ if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
+ swiotlbsize = 64 * (1<<20);
+diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
+index a42110e7edbc..a7f40820e567 100644
+--- a/arch/mips/cavium-octeon/setup.c
++++ b/arch/mips/cavium-octeon/setup.c
+@@ -413,7 +413,10 @@ static void octeon_restart(char *command)
+
+ mb();
+ while (1)
+- cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
++ if (OCTEON_IS_OCTEON3())
++ cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
++ else
++ cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
+ }
+
+
+diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
+index e08381a37f8b..723229f4cf27 100644
+--- a/arch/mips/include/asm/cacheflush.h
++++ b/arch/mips/include/asm/cacheflush.h
+@@ -29,6 +29,20 @@
+ * - flush_icache_all() flush the entire instruction cache
+ * - flush_data_cache_page() flushes a page from the data cache
+ */
++
++ /*
++ * This flag is used to indicate that the page pointed to by a pte
++ * is dirty and requires cleaning before returning it to the user.
++ */
++#define PG_dcache_dirty PG_arch_1
++
++#define Page_dcache_dirty(page) \
++ test_bit(PG_dcache_dirty, &(page)->flags)
++#define SetPageDcacheDirty(page) \
++ set_bit(PG_dcache_dirty, &(page)->flags)
++#define ClearPageDcacheDirty(page) \
++ clear_bit(PG_dcache_dirty, &(page)->flags)
++
+ extern void (*flush_cache_all)(void);
+ extern void (*__flush_cache_all)(void);
+ extern void (*flush_cache_mm)(struct mm_struct *mm);
+@@ -37,13 +51,15 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+ extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
+ extern void __flush_dcache_page(struct page *page);
++extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
+
+ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+ static inline void flush_dcache_page(struct page *page)
+ {
+- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
++ if (cpu_has_dc_aliases)
+ __flush_dcache_page(page);
+-
++ else if (!cpu_has_ic_fills_f_dc)
++ SetPageDcacheDirty(page);
+ }
+
+ #define flush_dcache_mmap_lock(mapping) do { } while (0)
+@@ -61,6 +77,11 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
+ static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+ {
++ if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
++ Page_dcache_dirty(page)) {
++ __flush_icache_page(vma, page);
++ ClearPageDcacheDirty(page);
++ }
+ }
+
+ extern void (*flush_icache_range)(unsigned long start, unsigned long end);
+@@ -95,19 +116,6 @@ extern void (*flush_icache_all)(void);
+ extern void (*local_flush_data_cache_page)(void * addr);
+ extern void (*flush_data_cache_page)(unsigned long addr);
+
+-/*
+- * This flag is used to indicate that the page pointed to by a pte
+- * is dirty and requires cleaning before returning it to the user.
+- */
+-#define PG_dcache_dirty PG_arch_1
+-
+-#define Page_dcache_dirty(page) \
+- test_bit(PG_dcache_dirty, &(page)->flags)
+-#define SetPageDcacheDirty(page) \
+- set_bit(PG_dcache_dirty, &(page)->flags)
+-#define ClearPageDcacheDirty(page) \
+- clear_bit(PG_dcache_dirty, &(page)->flags)
+-
+ /* Run kernel code uncached, useful for cache probing functions. */
+ unsigned long run_uncached(void *func);
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 0d8208de9a3f..345fd7f80730 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -235,8 +235,39 @@
+ /* MIPSR2 and MIPSR6 have a lot of similarities */
+ #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6)
+
++/*
++ * cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor
++ *
++ * Returns non-zero value if the current processor implementation requires
++ * an IHB instruction to deal with an instruction hazard as per MIPS R2
++ * architecture specification, zero otherwise.
++ */
+ #ifndef cpu_has_mips_r2_exec_hazard
+-#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
++#define cpu_has_mips_r2_exec_hazard \
++({ \
++ int __res; \
++ \
++ switch (current_cpu_type()) { \
++ case CPU_M14KC: \
++ case CPU_74K: \
++ case CPU_1074K: \
++ case CPU_PROAPTIV: \
++ case CPU_P5600: \
++ case CPU_M5150: \
++ case CPU_QEMU_GENERIC: \
++ case CPU_CAVIUM_OCTEON: \
++ case CPU_CAVIUM_OCTEON_PLUS: \
++ case CPU_CAVIUM_OCTEON2: \
++ case CPU_CAVIUM_OCTEON3: \
++ __res = 0; \
++ break; \
++ \
++ default: \
++ __res = 1; \
++ } \
++ \
++ __res; \
++})
+ #endif
+
+ /*
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index 535f196ffe02..694925a26924 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -294,6 +294,9 @@ do { \
+ if (personality(current->personality) != PER_LINUX) \
+ set_personality(PER_LINUX); \
+ \
++ clear_thread_flag(TIF_HYBRID_FPREGS); \
++ set_thread_flag(TIF_32BIT_FPREGS); \
++ \
+ mips_set_personality_fp(state); \
+ \
+ current->thread.abi = &mips_abi; \
+@@ -319,6 +322,8 @@ do { \
+ do { \
+ set_thread_flag(TIF_32BIT_REGS); \
+ set_thread_flag(TIF_32BIT_ADDR); \
++ clear_thread_flag(TIF_HYBRID_FPREGS); \
++ set_thread_flag(TIF_32BIT_FPREGS); \
+ \
+ mips_set_personality_fp(state); \
+ \
+diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+index fa1f3cfbae8d..d68e685cde60 100644
+--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
++++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+@@ -50,7 +50,6 @@
+ #define cpu_has_mips32r2 0
+ #define cpu_has_mips64r1 0
+ #define cpu_has_mips64r2 1
+-#define cpu_has_mips_r2_exec_hazard 0
+ #define cpu_has_dsp 0
+ #define cpu_has_dsp2 0
+ #define cpu_has_mipsmt 0
+diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
+index 33db1c806b01..774bb45834cb 100644
+--- a/arch/mips/include/asm/octeon/cvmx.h
++++ b/arch/mips/include/asm/octeon/cvmx.h
+@@ -436,14 +436,6 @@ static inline uint64_t cvmx_get_cycle_global(void)
+
+ /***************************************************************************/
+
+-static inline void cvmx_reset_octeon(void)
+-{
+- union cvmx_ciu_soft_rst ciu_soft_rst;
+- ciu_soft_rst.u64 = 0;
+- ciu_soft_rst.s.soft_rst = 1;
+- cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
+-}
+-
+ /* Return the number of cores available in the chip */
+ static inline uint32_t cvmx_octeon_num_cores(void)
+ {
+diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
+index 64ba56a02843..1884609741a8 100644
+--- a/arch/mips/include/asm/octeon/pci-octeon.h
++++ b/arch/mips/include/asm/octeon/pci-octeon.h
+@@ -11,9 +11,6 @@
+
+ #include <linux/pci.h>
+
+-/* Some PCI cards require delays when accessing config space. */
+-#define PCI_CONFIG_SPACE_DELAY 10000
+-
+ /*
+ * The physical memory base mapped by BAR1. 256MB at the end of the
+ * first 4GB.
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index bef782c4a44b..f8f809fd6c6d 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -127,10 +127,6 @@ do { \
+ } \
+ } while(0)
+
+-
+-extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+- pte_t pteval);
+-
+ #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
+ #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
+@@ -154,6 +150,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
+ }
+ }
+ }
++#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+@@ -192,6 +189,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
+ }
+ #endif
+ }
++#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+@@ -407,12 +405,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+
+ extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte);
++extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
++ pte_t pte);
+
+ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+ __update_tlb(vma, address, pte);
++ __update_cache(vma, address, pte);
+ }
+
+ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
+index 1b22d2da88a1..38902bf97adc 100644
+--- a/arch/mips/include/asm/r4kcache.h
++++ b/arch/mips/include/asm/r4kcache.h
+@@ -12,6 +12,8 @@
+ #ifndef _ASM_R4KCACHE_H
+ #define _ASM_R4KCACHE_H
+
++#include <linux/stringify.h>
++
+ #include <asm/asm.h>
+ #include <asm/cacheops.h>
+ #include <asm/compiler.h>
+@@ -344,7 +346,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
+ " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
+ " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
+ " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
+- " addiu $1, $0, 0x100 \n" \
++ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
+ " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
+ " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
+@@ -368,17 +370,17 @@ static inline void invalidate_tcache_page(unsigned long addr)
+ " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
+ " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
+ " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
+- " addiu $1, $1, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
+- " addiu $1, $1, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
+@@ -396,25 +398,25 @@ static inline void invalidate_tcache_page(unsigned long addr)
+ " .set noat\n" \
+ " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
+ " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+ " .set pop\n" \
+@@ -429,39 +431,38 @@ static inline void invalidate_tcache_page(unsigned long addr)
+ " .set mips64r6\n" \
+ " .set noat\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
+- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+- " addiu $1, %0, 0x100\n" \
++ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
++ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
++ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " .set pop\n" \
+ : \
+ : "r" (base), \
+diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
+index b4548690ade9..1fca2e0793dc 100644
+--- a/arch/mips/include/asm/spinlock.h
++++ b/arch/mips/include/asm/spinlock.h
+@@ -263,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ if (R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ "1: ll %1, %2 # arch_read_unlock \n"
+- " addiu %1, 1 \n"
++ " addiu %1, -1 \n"
+ " sc %1, %0 \n"
+ " beqzl %1, 1b \n"
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
+index af41ba6db960..7791840cf22c 100644
+--- a/arch/mips/kernel/entry.S
++++ b/arch/mips/kernel/entry.S
+@@ -10,6 +10,7 @@
+
+ #include <asm/asm.h>
+ #include <asm/asmmacro.h>
++#include <asm/compiler.h>
+ #include <asm/regdef.h>
+ #include <asm/mipsregs.h>
+ #include <asm/stackframe.h>
+@@ -185,7 +186,7 @@ syscall_exit_work:
+ * For C code use the inline version named instruction_hazard().
+ */
+ LEAF(mips_ihb)
+- .set mips32r2
++ .set MIPS_ISA_LEVEL_RAW
+ jr.hb ra
+ nop
+ END(mips_ihb)
+diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
+index bed7590e475f..d5589bedd0a4 100644
+--- a/arch/mips/kernel/smp-cps.c
++++ b/arch/mips/kernel/smp-cps.c
+@@ -88,6 +88,12 @@ static void __init cps_smp_setup(void)
+
+ /* Make core 0 coherent with everything */
+ write_gcr_cl_coherence(0xff);
++
++#ifdef CONFIG_MIPS_MT_FPAFF
++ /* If we have an FPU, enroll ourselves in the FPU-full mask */
++ if (cpu_has_fpu)
++ cpu_set(0, mt_fpu_cpumask);
++#endif /* CONFIG_MIPS_MT_FPAFF */
+ }
+
+ static void __init cps_prepare_cpus(unsigned int max_cpus)
+diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
+index 7e3ea7766822..77d96db8253c 100644
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -119,36 +119,37 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
+
+ EXPORT_SYMBOL(__flush_anon_page);
+
+-static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
++void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
++{
++ unsigned long addr;
++
++ if (PageHighMem(page))
++ return;
++
++ addr = (unsigned long) page_address(page);
++ flush_data_cache_page(addr);
++}
++EXPORT_SYMBOL_GPL(__flush_icache_page);
++
++void __update_cache(struct vm_area_struct *vma, unsigned long address,
++ pte_t pte)
+ {
+ struct page *page;
+- unsigned long pfn = pte_pfn(pteval);
++ unsigned long pfn, addr;
++ int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+
++ pfn = pte_pfn(pte);
+ if (unlikely(!pfn_valid(pfn)))
+ return;
+-
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && Page_dcache_dirty(page)) {
+- unsigned long page_addr = (unsigned long) page_address(page);
+-
+- if (!cpu_has_ic_fills_f_dc ||
+- pages_do_alias(page_addr, address & PAGE_MASK))
+- flush_data_cache_page(page_addr);
++ addr = (unsigned long) page_address(page);
++ if (exec || pages_do_alias(addr, address & PAGE_MASK))
++ flush_data_cache_page(addr);
+ ClearPageDcacheDirty(page);
+ }
+ }
+
+-void set_pte_at(struct mm_struct *mm, unsigned long addr,
+- pte_t *ptep, pte_t pteval)
+-{
+- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
+- if (pte_present(pteval))
+- mips_flush_dcache_from_pte(pteval, addr);
+- }
+-
+- set_pte(ptep, pteval);
+-}
+-
+ unsigned long _page_cachable_default;
+ EXPORT_SYMBOL(_page_cachable_default);
+
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index d75ff73a2012..a79fd0af0224 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -501,26 +501,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+ case tlb_indexed: tlbw = uasm_i_tlbwi; break;
+ }
+
+- if (cpu_has_mips_r2_exec_hazard) {
+- /*
+- * The architecture spec says an ehb is required here,
+- * but a number of cores do not have the hazard and
+- * using an ehb causes an expensive pipeline stall.
+- */
+- switch (current_cpu_type()) {
+- case CPU_M14KC:
+- case CPU_74K:
+- case CPU_1074K:
+- case CPU_PROAPTIV:
+- case CPU_P5600:
+- case CPU_M5150:
+- case CPU_QEMU_GENERIC:
+- break;
+-
+- default:
++ if (cpu_has_mips_r2_r6) {
++ if (cpu_has_mips_r2_exec_hazard)
+ uasm_i_ehb(p);
+- break;
+- }
+ tlbw(p);
+ return;
+ }
+diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
+index c83dbf3689e2..7b066a44e679 100644
+--- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c
++++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
+@@ -203,6 +203,7 @@ static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
+ static void config_sata_phy(u64 regbase)
+ {
+ u32 port, i, reg;
++ u8 val;
+
+ for (port = 0; port < 2; port++) {
+ for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
+@@ -210,6 +211,18 @@ static void config_sata_phy(u64 regbase)
+
+ for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
+ write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
++
++ /* Fix for PHY link up failures at lower temperatures */
++ write_phy_reg(regbase, 0x800F, port, 0x1f);
++
++ val = read_phy_reg(regbase, 0x0029, port);
++ write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
++
++ val = read_phy_reg(regbase, 0x0056, port);
++ write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
++
++ val = read_phy_reg(regbase, 0x0018, port);
++ write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
+ }
+ }
+
+diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
+index 300591c6278d..2eda01e6e08f 100644
+--- a/arch/mips/pci/Makefile
++++ b/arch/mips/pci/Makefile
+@@ -43,7 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
+ obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
+ obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
+ obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
+-obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o
++obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o
+ obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
+ obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
+ obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
+diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
+index a04af55d89f1..c258cd406fbb 100644
+--- a/arch/mips/pci/pci-octeon.c
++++ b/arch/mips/pci/pci-octeon.c
+@@ -214,6 +214,8 @@ const char *octeon_get_pci_interrupts(void)
+ return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ return "AABCD";
++ case CVMX_BOARD_TYPE_CUST_DSR1000N:
++ return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
+ case CVMX_BOARD_TYPE_THUNDER:
+ case CVMX_BOARD_TYPE_EBH3000:
+ default:
+@@ -271,9 +273,6 @@ static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
+ pci_addr.s.func = devfn & 0x7;
+ pci_addr.s.reg = reg;
+
+-#if PCI_CONFIG_SPACE_DELAY
+- udelay(PCI_CONFIG_SPACE_DELAY);
+-#endif
+ switch (size) {
+ case 4:
+ *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
+@@ -308,9 +307,6 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
+ pci_addr.s.func = devfn & 0x7;
+ pci_addr.s.reg = reg;
+
+-#if PCI_CONFIG_SPACE_DELAY
+- udelay(PCI_CONFIG_SPACE_DELAY);
+-#endif
+ switch (size) {
+ case 4:
+ cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
+diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
+index 1bb0b2bf8d6e..99f3db4f0a9b 100644
+--- a/arch/mips/pci/pcie-octeon.c
++++ b/arch/mips/pci/pcie-octeon.c
+@@ -1762,14 +1762,6 @@ static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+-#if PCI_CONFIG_SPACE_DELAY
+- /*
+- * Delay on writes so that devices have time to come up. Some
+- * bridges need this to allow time for the secondary busses to
+- * work
+- */
+- udelay(PCI_CONFIG_SPACE_DELAY);
+-#endif
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
+index b1c52ca580f9..e9bc8c96174e 100644
+--- a/arch/mips/ralink/Kconfig
++++ b/arch/mips/ralink/Kconfig
+@@ -7,6 +7,11 @@ config CLKEVT_RT3352
+ select CLKSRC_OF
+ select CLKSRC_MMIO
+
++config RALINK_ILL_ACC
++ bool
++ depends on SOC_RT305X
++ default y
++
+ choice
+ prompt "Ralink SoC selection"
+ default SOC_RT305X
+diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
+index a7a3edd28beb..f23179e84128 100644
+--- a/drivers/acpi/sbs.c
++++ b/drivers/acpi/sbs.c
+@@ -670,7 +670,7 @@ static int acpi_sbs_add(struct acpi_device *device)
+ if (!sbs_manager_broken) {
+ result = acpi_manager_get_info(sbs);
+ if (!result) {
+- sbs->manager_present = 0;
++ sbs->manager_present = 1;
+ for (id = 0; id < MAX_SBS_BAT; ++id)
+ if ((sbs->batteries_supported & (1 << id)))
+ acpi_battery_add(sbs, id);
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index b40af3203089..b67066d0d9a6 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
+ result, xferred);
+ if (!img_request->result)
+ img_request->result = result;
++ /*
++ * Need to end I/O on the entire obj_request worth of
++ * bytes in case of error.
++ */
++ xferred = obj_request->length;
+ }
+
+ /* Image object requests don't own their page array */
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 9bd56116fd5a..1afc0b419da2 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ else
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
++ /* if there is no audio, set MINM_OVER_MAXP */
++ if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
++ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+ if (rdev->family < CHIP_RV770)
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+ /* use frac fb div on APUs */
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index c39c1d0d9d4e..f20eb32406d1 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1729,17 +1729,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int encoder_mode = atombios_get_encoder_mode(encoder);
+
+ DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+ radeon_encoder->encoder_id, mode, radeon_encoder->devices,
+ radeon_encoder->active_device);
+
+- if (connector && (radeon_audio != 0) &&
++ if ((radeon_audio != 0) &&
+ ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+- (ENCODER_MODE_IS_DP(encoder_mode) &&
+- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
++ ENCODER_MODE_IS_DP(encoder_mode)))
+ radeon_audio_dpms(encoder, mode);
+
+ switch (radeon_encoder->encoder_id) {
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 3adc2afe32aa..68fd9fc677e3 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ }
+ }
+-
+-void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
+-{
+- struct drm_device *dev = encoder->dev;
+- struct radeon_device *rdev = dev->dev_private;
+- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+-
+- if (!dig || !dig->afmt)
+- return;
+-
+- if (enable) {
+- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+- EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
+- EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
+- EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
+- EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
+- EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
+- } else {
+- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
+- }
+-
+- dig->afmt->enabled = enable;
+-}
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index c18d4ecbd95d..0926739c9fa7 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
+ WREG32(AFMT_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
+
+- WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
+- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+- HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+-
+ WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
+- HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+- ~HDMI_AVI_INFO_LINE_MASK);
++ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
++ ~HDMI_AVI_INFO_LINE_MASK);
+ }
+
+ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
+@@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
+ WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
+ AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+
++ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
++ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
++ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
++
+ /* allow 60958 channel status and send audio packets fields to be updated */
+- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+- AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
++ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
++ AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
+ }
+
+
+@@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+ return;
+
+ if (enable) {
+- WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
+- HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+-
+- WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
++ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+- HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+- HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
++ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
++ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
++ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
++ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
++ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
++ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
++ AFMT_AUDIO_SAMPLE_SEND);
++ } else {
++ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
++ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
++ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
++ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
++ ~AFMT_AUDIO_SAMPLE_SEND);
++ }
+ } else {
++ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
++ ~AFMT_AUDIO_SAMPLE_SEND);
+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
+ }
+
+@@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
++ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if (!dig || !dig->afmt)
+ return;
+
+- if (enable) {
++ if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+ uint32_t val;
+
++ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
++ AFMT_AUDIO_SAMPLE_SEND);
++
+ WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+ EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+
+- if (radeon_connector->con_priv) {
++ if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
+ dig_connector = radeon_connector->con_priv;
+ val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
+ val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
+@@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
+ EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
+ } else {
+ WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
++ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
++ ~AFMT_AUDIO_SAMPLE_SEND);
+ }
+
+ dig->afmt->enabled = enable;
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index dd6606b8e23c..e85894ade95c 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
+ WREG32(HDMI0_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
+
++ WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
++ HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
++
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+- HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
++ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
++ HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
+
+- WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
+- HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index b21ef69a34ac..b7d33a13db9f 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
+ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
+ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
+-void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
+
+ static const u32 pin_offsets[7] =
+ {
+@@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
+ .set_avi_packet = evergreen_set_avi_packet,
+ .set_audio_packet = dce4_set_audio_packet,
+ .mode_set = radeon_audio_dp_mode_set,
+- .dpms = dce6_dp_enable,
++ .dpms = evergreen_dp_enable,
+ };
+
+ static void radeon_audio_interface_init(struct radeon_device *rdev)
+@@ -461,30 +460,33 @@ void radeon_audio_detect(struct drm_connector *connector,
+ if (!connector || !connector->encoder)
+ return;
+
++ if (!radeon_encoder_is_digital(connector->encoder))
++ return;
++
+ rdev = connector->encoder->dev->dev_private;
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ dig = radeon_encoder->enc_priv;
+
+- if (status == connector_status_connected) {
+- struct radeon_connector *radeon_connector;
+- int sink_type;
+-
+- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+- radeon_encoder->audio = NULL;
+- return;
+- }
++ if (!dig->afmt)
++ return;
+
+- radeon_connector = to_radeon_connector(connector);
+- sink_type = radeon_dp_getsinktype(radeon_connector);
++ if (status == connector_status_connected) {
++ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+- sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
++ radeon_dp_getsinktype(radeon_connector) ==
++ CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ radeon_encoder->audio = rdev->audio.dp_funcs;
+ else
+ radeon_encoder->audio = rdev->audio.hdmi_funcs;
+
+ dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
+- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
++ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
++ } else {
++ radeon_audio_enable(rdev, dig->afmt->pin, 0);
++ dig->afmt->pin = NULL;
++ }
+ } else {
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ dig->afmt->pin = NULL;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 27def67cb6be..27973e3faf0e 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1333,8 +1333,10 @@ out:
+ /* updated in get modes as well since we need to know if it's analog or digital */
+ radeon_connector_update_scratch_regs(connector, ret);
+
+- if (radeon_audio != 0)
++ if (radeon_audio != 0) {
++ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, ret);
++ }
+
+ exit:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+@@ -1659,8 +1661,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+
+ radeon_connector_update_scratch_regs(connector, ret);
+
+- if (radeon_audio != 0)
++ if (radeon_audio != 0) {
++ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, ret);
++ }
+
+ out:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 4d0f96cc3da4..ab39b85e0f76 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+ p->dma_reloc_idx = 0;
+ /* FIXME: we assume that each relocs use 4 dwords */
+ p->nrelocs = chunk->length_dw / 4;
+- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
++ p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+@@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
+ }
+ }
+ kfree(parser->track);
+- kfree(parser->relocs);
++ drm_free_large(parser->relocs);
+ drm_free_large(parser->vm_bos);
+ for (i = 0; i < parser->nchunks; i++)
+ drm_free_large(parser->chunks[i].kdata);
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index 2a5a4a9e772d..de42fc4a22b8 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ }
+
+ mutex_lock(&vm->mutex);
++ soffset /= RADEON_GPU_PAGE_SIZE;
++ eoffset /= RADEON_GPU_PAGE_SIZE;
++ if (soffset || eoffset) {
++ struct interval_tree_node *it;
++ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
++ if (it && it != &bo_va->it) {
++ struct radeon_bo_va *tmp;
++ tmp = container_of(it, struct radeon_bo_va, it);
++ /* bo and tmp overlap, invalid offset */
++ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
++ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
++ soffset, tmp->bo, tmp->it.start, tmp->it.last);
++ mutex_unlock(&vm->mutex);
++ return -EINVAL;
++ }
++ }
++
+ if (bo_va->it.start || bo_va->it.last) {
+ if (bo_va->addr) {
+ /* add a clone of the bo_va to clear the old address */
+@@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ spin_lock(&vm->status_lock);
+ list_add(&tmp->vm_status, &vm->freed);
+ spin_unlock(&vm->status_lock);
++
++ bo_va->addr = 0;
+ }
+
+ interval_tree_remove(&bo_va->it, &vm->va);
+@@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ bo_va->it.last = 0;
+ }
+
+- soffset /= RADEON_GPU_PAGE_SIZE;
+- eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+- struct interval_tree_node *it;
+- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+- if (it) {
+- struct radeon_bo_va *tmp;
+- tmp = container_of(it, struct radeon_bo_va, it);
+- /* bo and tmp overlap, invalid offset */
+- dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
+- "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+- soffset, tmp->bo, tmp->it.start, tmp->it.last);
+- mutex_unlock(&vm->mutex);
+- return -EINVAL;
+- }
+ bo_va->it.start = soffset;
+ bo_va->it.last = eoffset - 1;
+ interval_tree_insert(&bo_va->it, &vm->va);
+@@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ list_del(&bo_va->bo_list);
+
+ mutex_lock(&vm->mutex);
+- interval_tree_remove(&bo_va->it, &vm->va);
++ if (bo_va->it.start || bo_va->it.last)
++ interval_tree_remove(&bo_va->it, &vm->va);
+ spin_lock(&vm->status_lock);
+ list_del(&bo_va->vm_status);
+
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 7be11651b7e6..9dbb3154d559 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2924,6 +2924,7 @@ struct si_dpm_quirk {
+ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
++ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { 0, 0, 0, 0 },
+ };
+
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 3736f71bdec5..18def3022f6e 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -787,7 +787,7 @@ int vmbus_request_offers(void)
+ {
+ struct vmbus_channel_message_header *msg;
+ struct vmbus_channel_msginfo *msginfo;
+- int ret, t;
++ int ret;
+
+ msginfo = kmalloc(sizeof(*msginfo) +
+ sizeof(struct vmbus_channel_message_header),
+@@ -795,8 +795,6 @@ int vmbus_request_offers(void)
+ if (!msginfo)
+ return -ENOMEM;
+
+- init_completion(&msginfo->waitevent);
+-
+ msg = (struct vmbus_channel_message_header *)msginfo->msg;
+
+ msg->msgtype = CHANNELMSG_REQUESTOFFERS;
+@@ -810,14 +808,6 @@ int vmbus_request_offers(void)
+ goto cleanup;
+ }
+
+- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+- if (t == 0) {
+- ret = -ETIMEDOUT;
+- goto cleanup;
+- }
+-
+-
+-
+ cleanup:
+ kfree(msginfo);
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index ee394dc68303..ec1ea8ba7aac 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
+- MA_EXT_MEMORY1_BAR_A));
++ MA_EXT_MEMORY0_BAR_A));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 3485acf03014..2f1324bed7b3 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_ptp_overflow_check(mdev);
+
++ mlx4_en_recover_from_oom(priv);
+ queue_delayed_work(mdev->workqueue, &priv->service_task,
+ SERVICE_TASK_DELAY);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 698d60de1255..05ec5e151ded 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+ return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
+ }
+
++static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
++{
++ BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
++ return ring->prod == ring->cons;
++}
++
+ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+ {
+ *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
+ ring->cons, ring->prod);
+
+ /* Unmap and free Rx buffers */
+- BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
+- while (ring->cons != ring->prod) {
++ while (!mlx4_en_is_ring_empty(ring)) {
+ index = ring->cons & ring->size_mask;
+ en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+ mlx4_en_free_rx_desc(priv, ring, index);
+@@ -491,6 +496,23 @@ err_allocator:
+ return err;
+ }
+
++/* We recover from out of memory by scheduling our napi poll
++ * function (mlx4_en_process_cq), which tries to allocate
++ * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
++ */
++void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
++{
++ int ring;
++
++ if (!priv->port_up)
++ return;
++
++ for (ring = 0; ring < priv->rx_ring_num; ring++) {
++ if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
++ napi_reschedule(&priv->rx_cq[ring]->napi);
++ }
++}
++
+ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 55f9f5c5344e..8c234ec1d8aa 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ ring->queue_index = queue_index;
+
+- if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+- cpumask_set_cpu(queue_index, &ring->affinity_mask);
++ if (queue_index < priv->num_tx_rings_p_up)
++ cpumask_set_cpu_local_first(queue_index,
++ priv->mdev->dev->numa_node,
++ &ring->affinity_mask);
+
+ *pring = ring;
+ return 0;
+@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+
+ err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
+ &ring->qp, &ring->qp_state);
+- if (!user_prio && cpu_online(ring->queue_index))
++ if (!cpumask_empty(&ring->affinity_mask))
+ netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ ring->queue_index);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index ebbe244e80dd..8687c8d54227 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -790,6 +790,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring);
+ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
++void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
+ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride, int node);
+diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
+index 7600639db4c4..add419d6ff34 100644
+--- a/drivers/scsi/3w-9xxx.c
++++ b/drivers/scsi/3w-9xxx.c
+@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
+ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
+ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
+ static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
+-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
+
+ /* Functions */
+
+@@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
+ }
+
+ /* Now complete the io */
++ scsi_dma_unmap(cmd);
++ cmd->scsi_done(cmd);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
+ tw_dev->posted_request_count--;
+- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+- twa_unmap_scsi_data(tw_dev, request_id);
+ }
+
+ /* Check for valid status after each drain */
+@@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
+ }
+ } /* End twa_load_sgl() */
+
+-/* This function will perform a pci-dma mapping for a scatter gather list */
+-static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
+-{
+- int use_sg;
+- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-
+- use_sg = scsi_dma_map(cmd);
+- if (!use_sg)
+- return 0;
+- else if (use_sg < 0) {
+- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
+- return 0;
+- }
+-
+- cmd->SCp.phase = TW_PHASE_SGLIST;
+- cmd->SCp.have_data_in = use_sg;
+-
+- return use_sg;
+-} /* End twa_map_scsi_sg_data() */
+-
+ /* This function will poll for a response interrupt of a request */
+ static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
+ {
+@@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
+ (tw_dev->state[i] != TW_S_INITIAL) &&
+ (tw_dev->state[i] != TW_S_COMPLETED)) {
+ if (tw_dev->srb[i]) {
+- tw_dev->srb[i]->result = (DID_RESET << 16);
+- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
+- twa_unmap_scsi_data(tw_dev, i);
++ struct scsi_cmnd *cmd = tw_dev->srb[i];
++
++ cmd->result = (DID_RESET << 16);
++ scsi_dma_unmap(cmd);
++ cmd->scsi_done(cmd);
+ }
+ }
+ }
+@@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
+ /* Save the scsi command for use by the ISR */
+ tw_dev->srb[request_id] = SCpnt;
+
+- /* Initialize phase to zero */
+- SCpnt->SCp.phase = TW_PHASE_INITIAL;
+-
+ retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ switch (retval) {
+ case SCSI_MLQUEUE_HOST_BUSY:
++ scsi_dma_unmap(SCpnt);
+ twa_free_request_id(tw_dev, request_id);
+- twa_unmap_scsi_data(tw_dev, request_id);
+ break;
+ case 1:
+- tw_dev->state[request_id] = TW_S_COMPLETED;
+- twa_free_request_id(tw_dev, request_id);
+- twa_unmap_scsi_data(tw_dev, request_id);
+ SCpnt->result = (DID_ERROR << 16);
++ scsi_dma_unmap(SCpnt);
+ done(SCpnt);
++ tw_dev->state[request_id] = TW_S_COMPLETED;
++ twa_free_request_id(tw_dev, request_id);
+ retval = 0;
+ }
+ out:
+@@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+ command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
+ } else {
+- sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
+- if (sg_count == 0)
++ sg_count = scsi_dma_map(srb);
++ if (sg_count < 0)
+ goto out;
+
+ scsi_for_each_sg(srb, sg, sg_count, i) {
+@@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
+ return(table[index].text);
+ } /* End twa_string_lookup() */
+
+-/* This function will perform a pci-dma unmap */
+-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
+-{
+- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-
+- if (cmd->SCp.phase == TW_PHASE_SGLIST)
+- scsi_dma_unmap(cmd);
+-} /* End twa_unmap_scsi_data() */
+-
+ /* This function gets called when a disk is coming on-line */
+ static int twa_slave_configure(struct scsi_device *sdev)
+ {
+diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
+index 040f7214e5b7..0fdc83cfa0e1 100644
+--- a/drivers/scsi/3w-9xxx.h
++++ b/drivers/scsi/3w-9xxx.h
+@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
+ #define TW_CURRENT_DRIVER_BUILD 0
+ #define TW_CURRENT_DRIVER_BRANCH 0
+
+-/* Phase defines */
+-#define TW_PHASE_INITIAL 0
+-#define TW_PHASE_SINGLE 1
+-#define TW_PHASE_SGLIST 2
+-
+ /* Misc defines */
+ #define TW_9550SX_DRAIN_COMPLETED 0xFFFF
+ #define TW_SECTOR_SIZE 512
+diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
+index 2361772d5909..f8374850f714 100644
+--- a/drivers/scsi/3w-sas.c
++++ b/drivers/scsi/3w-sas.c
+@@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
+ return 0;
+ } /* End twl_post_command_packet() */
+
+-/* This function will perform a pci-dma mapping for a scatter gather list */
+-static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
+-{
+- int use_sg;
+- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-
+- use_sg = scsi_dma_map(cmd);
+- if (!use_sg)
+- return 0;
+- else if (use_sg < 0) {
+- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
+- return 0;
+- }
+-
+- cmd->SCp.phase = TW_PHASE_SGLIST;
+- cmd->SCp.have_data_in = use_sg;
+-
+- return use_sg;
+-} /* End twl_map_scsi_sg_data() */
+-
+ /* This function hands scsi cdb's to the firmware */
+ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
+ {
+@@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ if (!sglistarg) {
+ /* Map sglist from scsi layer to cmd packet */
+ if (scsi_sg_count(srb)) {
+- sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
+- if (sg_count == 0)
++ sg_count = scsi_dma_map(srb);
++ if (sg_count <= 0)
+ goto out;
+
+ scsi_for_each_sg(srb, sg, sg_count, i) {
+@@ -1102,15 +1082,6 @@ out:
+ return retval;
+ } /* End twl_initialize_device_extension() */
+
+-/* This function will perform a pci-dma unmap */
+-static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
+-{
+- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-
+- if (cmd->SCp.phase == TW_PHASE_SGLIST)
+- scsi_dma_unmap(cmd);
+-} /* End twl_unmap_scsi_data() */
+-
+ /* This function will handle attention interrupts */
+ static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
+ {
+@@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
+ }
+
+ /* Now complete the io */
++ scsi_dma_unmap(cmd);
++ cmd->scsi_done(cmd);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twl_free_request_id(tw_dev, request_id);
+ tw_dev->posted_request_count--;
+- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+- twl_unmap_scsi_data(tw_dev, request_id);
+ }
+
+ /* Check for another response interrupt */
+@@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
+ (tw_dev->state[i] != TW_S_INITIAL) &&
+ (tw_dev->state[i] != TW_S_COMPLETED)) {
+- if (tw_dev->srb[i]) {
+- tw_dev->srb[i]->result = (DID_RESET << 16);
+- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
+- twl_unmap_scsi_data(tw_dev, i);
++ struct scsi_cmnd *cmd = tw_dev->srb[i];
++
++ if (cmd) {
++ cmd->result = (DID_RESET << 16);
++ scsi_dma_unmap(cmd);
++ cmd->scsi_done(cmd);
+ }
+ }
+ }
+@@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
+ /* Save the scsi command for use by the ISR */
+ tw_dev->srb[request_id] = SCpnt;
+
+- /* Initialize phase to zero */
+- SCpnt->SCp.phase = TW_PHASE_INITIAL;
+-
+ retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ if (retval) {
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
+index d474892701d4..fec6449c7595 100644
+--- a/drivers/scsi/3w-sas.h
++++ b/drivers/scsi/3w-sas.h
+@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
+ #define TW_CURRENT_DRIVER_BUILD 0
+ #define TW_CURRENT_DRIVER_BRANCH 0
+
+-/* Phase defines */
+-#define TW_PHASE_INITIAL 0
+-#define TW_PHASE_SGLIST 2
+-
+ /* Misc defines */
+ #define TW_SECTOR_SIZE 512
+ #define TW_MAX_UNITS 32
+diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
+index c75f2048319f..2940bd769936 100644
+--- a/drivers/scsi/3w-xxxx.c
++++ b/drivers/scsi/3w-xxxx.c
+@@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
+ return 0;
+ } /* End tw_initialize_device_extension() */
+
+-static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
+-{
+- int use_sg;
+-
+- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
+-
+- use_sg = scsi_dma_map(cmd);
+- if (use_sg < 0) {
+- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
+- return 0;
+- }
+-
+- cmd->SCp.phase = TW_PHASE_SGLIST;
+- cmd->SCp.have_data_in = use_sg;
+-
+- return use_sg;
+-} /* End tw_map_scsi_sg_data() */
+-
+-static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
+-{
+- dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
+-
+- if (cmd->SCp.phase == TW_PHASE_SGLIST)
+- scsi_dma_unmap(cmd);
+-} /* End tw_unmap_scsi_data() */
+-
+ /* This function will reset a device extension */
+ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
+ {
+@@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
+ srb = tw_dev->srb[i];
+ if (srb != NULL) {
+ srb->result = (DID_RESET << 16);
+- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
+- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
++ scsi_dma_unmap(srb);
++ srb->scsi_done(srb);
+ }
+ }
+ }
+@@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
+ command_packet->byte8.io.lba = lba;
+ command_packet->byte6.block_count = num_sectors;
+
+- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
+- if (!use_sg)
++ use_sg = scsi_dma_map(srb);
++ if (use_sg <= 0)
+ return 1;
+
+ scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
+@@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
+ /* Save the scsi command for use by the ISR */
+ tw_dev->srb[request_id] = SCpnt;
+
+- /* Initialize phase to zero */
+- SCpnt->SCp.phase = TW_PHASE_INITIAL;
+-
+ switch (*command) {
+ case READ_10:
+ case READ_6:
+@@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
+
+ /* Now complete the io */
+ if ((error != TW_ISR_DONT_COMPLETE)) {
++ scsi_dma_unmap(tw_dev->srb[request_id]);
++ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ tw_dev->posted_request_count--;
+- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+-
+- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
+ }
+ }
+
+diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
+index 29b0b84ed69e..6f65e663d393 100644
+--- a/drivers/scsi/3w-xxxx.h
++++ b/drivers/scsi/3w-xxxx.h
+@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
+ #define TW_AEN_SMART_FAIL 0x000F
+ #define TW_AEN_SBUF_FAIL 0x0024
+
+-/* Phase defines */
+-#define TW_PHASE_INITIAL 0
+-#define TW_PHASE_SINGLE 1
+-#define TW_PHASE_SGLIST 2
+-
+ /* Misc defines */
+ #define TW_ALIGNMENT_6000 64 /* 64 bytes */
+ #define TW_ALIGNMENT_7000 4 /* 4 bytes */
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 262ab837a704..9f77d23239a2 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -226,6 +226,7 @@ static struct {
+ {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ {"Promise", "", NULL, BLIST_SPARSELUN},
++ {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+ {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
+ {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 9c0a520d933c..3e6142f61499 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ */
+ if (*bflags & BLIST_MAX_512)
+ blk_queue_max_hw_sectors(sdev->request_queue, 512);
++ /*
++ * Max 1024 sector transfer length for targets that report incorrect
++ * max/optimal lengths and relied on the old block layer safe default
++ */
++ else if (*bflags & BLIST_MAX_1024)
++ blk_queue_max_hw_sectors(sdev->request_queue, 1024);
+
+ /*
+ * Some devices may not want to have a start command automatically
+diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
+index 75b3603906c1..f0d22cdb51cd 100644
+--- a/drivers/ssb/Kconfig
++++ b/drivers/ssb/Kconfig
+@@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS
+ bool "SSB Broadcom MIPS core driver"
+ depends on SSB && MIPS
+ select SSB_SERIAL
++ select SSB_SFLASH
+ help
+ Driver for the Sonics Silicon Backplane attached
+ Broadcom MIPS core.
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 4e959c43f680..6afce7eb3d74 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.dst_addr = port->mapbase + ATMEL_US_THR;
++ config.dst_maxburst = 1;
+
+ ret = dmaengine_slave_config(atmel_port->chan_tx,
+ &config);
+@@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.src_addr = port->mapbase + ATMEL_US_RHR;
++ config.src_maxburst = 1;
+
+ ret = dmaengine_slave_config(atmel_port->chan_rx,
+ &config);
+diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
+index 33fb94f78967..0a52c8b55a5f 100644
+--- a/drivers/tty/serial/of_serial.c
++++ b/drivers/tty/serial/of_serial.c
+@@ -344,7 +344,6 @@ static struct of_device_id of_platform_serial_table[] = {
+ { .compatible = "ibm,qpace-nwp-serial",
+ .data = (void *)PORT_NWPSERIAL, },
+ #endif
+- { .type = "serial", .data = (void *)PORT_UNKNOWN, },
+ { /* end of list */ },
+ };
+
+diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
+index 189f52e3111f..a0099a7f60d4 100644
+--- a/drivers/tty/serial/uartlite.c
++++ b/drivers/tty/serial/uartlite.c
+@@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
+
+ static int ulite_probe(struct platform_device *pdev)
+ {
+- struct resource *res, *res2;
++ struct resource *res;
++ int irq;
+ int id = pdev->id;
+ #ifdef CONFIG_OF
+ const __be32 *prop;
+@@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev)
+ if (!res)
+ return -ENODEV;
+
+- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+- if (!res2)
+- return -ENODEV;
++ irq = platform_get_irq(pdev, 0);
++ if (irq <= 0)
++ return -ENXIO;
+
+- return ulite_assign(&pdev->dev, id, res->start, res2->start);
++ return ulite_assign(&pdev->dev, id, res->start, irq);
+ }
+
+ static int ulite_remove(struct platform_device *pdev)
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index cff531a51a78..54853a02ce9e 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -1325,9 +1325,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
+ */
+ static int cdns_uart_probe(struct platform_device *pdev)
+ {
+- int rc, id;
++ int rc, id, irq;
+ struct uart_port *port;
+- struct resource *res, *res2;
++ struct resource *res;
+ struct cdns_uart *cdns_uart_data;
+
+ cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
+@@ -1374,9 +1374,9 @@ static int cdns_uart_probe(struct platform_device *pdev)
+ goto err_out_clk_disable;
+ }
+
+- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+- if (!res2) {
+- rc = -ENODEV;
++ irq = platform_get_irq(pdev, 0);
++ if (irq <= 0) {
++ rc = -ENXIO;
+ goto err_out_clk_disable;
+ }
+
+@@ -1405,7 +1405,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
+ * and triggers invocation of the config_port() entry point.
+ */
+ port->mapbase = res->start;
+- port->irq = res2->start;
++ port->irq = irq;
+ port->dev = &pdev->dev;
+ port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
+ port->private_data = cdns_uart_data;
+diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
+index 562e581f6765..3770330a2201 100644
+--- a/drivers/usb/chipidea/otg_fsm.c
++++ b/drivers/usb/chipidea/otg_fsm.c
+@@ -537,7 +537,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
+ {
+ struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
+
+- mutex_unlock(&fsm->lock);
+ if (on) {
+ ci_role_stop(ci);
+ ci_role_start(ci, CI_ROLE_HOST);
+@@ -546,7 +545,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
+ hw_device_reset(ci);
+ ci_role_start(ci, CI_ROLE_GADGET);
+ }
+- mutex_lock(&fsm->lock);
+ return 0;
+ }
+
+@@ -554,12 +552,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on)
+ {
+ struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
+
+- mutex_unlock(&fsm->lock);
+ if (on)
+ usb_gadget_vbus_connect(&ci->gadget);
+ else
+ usb_gadget_vbus_disconnect(&ci->gadget);
+- mutex_lock(&fsm->lock);
+
+ return 0;
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 683617714e7c..220c0fd059bb 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1133,11 +1133,16 @@ static int acm_probe(struct usb_interface *intf,
+ }
+
+ while (buflen > 0) {
++ elength = buffer[0];
++ if (!elength) {
++ dev_err(&intf->dev, "skipping garbage byte\n");
++ elength = 1;
++ goto next_desc;
++ }
+ if (buffer[1] != USB_DT_CS_INTERFACE) {
+ dev_err(&intf->dev, "skipping garbage\n");
+ goto next_desc;
+ }
+- elength = buffer[0];
+
+ switch (buffer[2]) {
+ case USB_CDC_UNION_TYPE: /* we've found it */
+diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
+index 9893d696fc97..f58caa9e6a27 100644
+--- a/drivers/usb/storage/uas-detect.h
++++ b/drivers/usb/storage/uas-detect.h
+@@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt,
+ }
+
+ static int uas_use_uas_driver(struct usb_interface *intf,
+- const struct usb_device_id *id)
++ const struct usb_device_id *id,
++ unsigned long *flags_ret)
+ {
+ struct usb_host_endpoint *eps[4] = { };
+ struct usb_device *udev = interface_to_usbdev(intf);
+@@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf,
+ * this writing the following versions exist:
+ * ASM1051 - no uas support version
+ * ASM1051 - with broken (*) uas support
+- * ASM1053 - with working uas support
++ * ASM1053 - with working uas support, but problems with large xfers
+ * ASM1153 - with working uas support
+ *
+ * Devices with these chips re-use a number of device-ids over the
+@@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf,
+ } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+ /* Possibly an ASM1051, disable uas */
+ flags |= US_FL_IGNORE_UAS;
++ } else {
++ /* ASM1053, these have issues with large transfers */
++ flags |= US_FL_MAX_SECTORS_240;
+ }
+ }
+
+@@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf,
+ return 0;
+ }
+
++ if (flags_ret)
++ *flags_ret = flags;
++
+ return 1;
+ }
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 6cdabdc119a7..6d3122afeed3 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
+
+ static int uas_slave_alloc(struct scsi_device *sdev)
+ {
+- sdev->hostdata = (void *)sdev->host->hostdata;
++ struct uas_dev_info *devinfo =
++ (struct uas_dev_info *)sdev->host->hostdata;
++
++ sdev->hostdata = devinfo;
+
+ /* USB has unusual DMA-alignment requirements: Although the
+ * starting address of each scatter-gather element doesn't matter,
+@@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev)
+ */
+ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+
++ if (devinfo->flags & US_FL_MAX_SECTORS_64)
++ blk_queue_max_hw_sectors(sdev->request_queue, 64);
++ else if (devinfo->flags & US_FL_MAX_SECTORS_240)
++ blk_queue_max_hw_sectors(sdev->request_queue, 240);
++
+ return 0;
+ }
+
+@@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ struct Scsi_Host *shost = NULL;
+ struct uas_dev_info *devinfo;
+ struct usb_device *udev = interface_to_usbdev(intf);
++ unsigned long dev_flags;
+
+- if (!uas_use_uas_driver(intf, id))
++ if (!uas_use_uas_driver(intf, id, &dev_flags))
+ return -ENODEV;
+
+ if (uas_switch_interface(udev, intf))
+@@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ devinfo->udev = udev;
+ devinfo->resetting = 0;
+ devinfo->shutdown = 0;
+- devinfo->flags = id->driver_info;
+- usb_stor_adjust_quirks(udev, &devinfo->flags);
++ devinfo->flags = dev_flags;
+ init_usb_anchor(&devinfo->cmd_urbs);
+ init_usb_anchor(&devinfo->sense_urbs);
+ init_usb_anchor(&devinfo->data_urbs);
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index 5600c33fcadb..6c10c888f35f 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
+ US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
+ US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
+- US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES);
++ US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
++ US_FL_MAX_SECTORS_240);
+
+ p = quirks;
+ while (*p) {
+@@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ case 'f':
+ f |= US_FL_NO_REPORT_OPCODES;
+ break;
++ case 'g':
++ f |= US_FL_MAX_SECTORS_240;
++ break;
+ case 'h':
+ f |= US_FL_CAPACITY_HEURISTICS;
+ break;
+@@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf,
+
+ /* If uas is enabled and this device can do uas then ignore it. */
+ #if IS_ENABLED(CONFIG_USB_UAS)
+- if (uas_use_uas_driver(intf, id))
++ if (uas_use_uas_driver(intf, id, NULL))
+ return -ENXIO;
+ #endif
+
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index f23d4be3280e..2b4c5423672d 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2403,7 +2403,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ "Attempt to delete subvolume %llu during send",
+ dest->root_key.objectid);
+ err = -EPERM;
+- goto out_dput;
++ goto out_unlock_inode;
+ }
+
+ d_invalidate(dentry);
+@@ -2498,6 +2498,7 @@ out_up_write:
+ root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
+ spin_unlock(&dest->root_item_lock);
+ }
++out_unlock_inode:
+ mutex_unlock(&inode->i_mutex);
+ if (!err) {
+ shrink_dcache_sb(root->fs_info->sb);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index bed43081720f..16f6365f65e7 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4934,13 +4934,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ if (ret)
+ return ret;
+
+- /*
+- * currently supporting (pre)allocate mode for extent-based
+- * files _only_
+- */
+- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+- return -EOPNOTSUPP;
+-
+ if (mode & FALLOC_FL_COLLAPSE_RANGE)
+ return ext4_collapse_range(inode, offset, len);
+
+@@ -4962,6 +4955,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+
+ mutex_lock(&inode->i_mutex);
+
++ /*
++ * We only support preallocation for extent-based files only
++ */
++ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
++
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ offset + len > i_size_read(inode)) {
+ new_size = offset + len;
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index e04d45733976..9a0121376358 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -705,6 +705,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ BUG_ON(end < lblk);
+
++ if ((status & EXTENT_STATUS_DELAYED) &&
++ (status & EXTENT_STATUS_WRITTEN)) {
++ ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
++ " delayed and written which can potentially "
++ " cause data loss.\n", lblk, len);
++ WARN_ON(1);
++ }
++
+ newes.es_lblk = lblk;
+ newes.es_len = len;
+ ext4_es_store_pblock_status(&newes, pblk, status);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 5cb9a212b86f..852cc521f327 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -534,6 +534,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
++ !(status & EXTENT_STATUS_WRITTEN) &&
+ ext4_find_delalloc_range(inode, map->m_lblk,
+ map->m_lblk + map->m_len - 1))
+ status |= EXTENT_STATUS_DELAYED;
+@@ -638,6 +639,7 @@ found:
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
++ !(status & EXTENT_STATUS_WRITTEN) &&
+ ext4_find_delalloc_range(inode, map->m_lblk,
+ map->m_lblk + map->m_len - 1))
+ status |= EXTENT_STATUS_DELAYED;
+diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
+index d98094a9f476..ff10f3decbc9 100644
+--- a/fs/hfsplus/xattr.c
++++ b/fs/hfsplus/xattr.c
+@@ -806,9 +806,6 @@ end_removexattr:
+ static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+ {
+- char *xattr_name;
+- int res;
+-
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+@@ -818,24 +815,19 @@ static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
+ */
+ if (is_known_namespace(name))
+ return -EOPNOTSUPP;
+- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
+- + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
+- if (!xattr_name)
+- return -ENOMEM;
+- strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
+- strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
+
+- res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
+- kfree(xattr_name);
+- return res;
++ /*
++ * osx is the namespace we use to indicate an unprefixed
++ * attribute on the filesystem (like the ones that OS X
++ * creates), so we pass the name through unmodified (after
++ * ensuring it doesn't conflict with another namespace).
++ */
++ return hfsplus_getxattr(dentry, name, buffer, size);
+ }
+
+ static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
+ const void *buffer, size_t size, int flags, int type)
+ {
+- char *xattr_name;
+- int res;
+-
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+@@ -845,16 +837,14 @@ static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
+ */
+ if (is_known_namespace(name))
+ return -EOPNOTSUPP;
+- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
+- + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
+- if (!xattr_name)
+- return -ENOMEM;
+- strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
+- strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
+
+- res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+- kfree(xattr_name);
+- return res;
++ /*
++ * osx is the namespace we use to indicate an unprefixed
++ * attribute on the filesystem (like the ones that OS X
++ * creates), so we pass the name through unmodified (after
++ * ensuring it doesn't conflict with another namespace).
++ */
++ return hfsplus_setxattr(dentry, name, buffer, size, flags);
+ }
+
+ static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
+diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
+index a7f2604c5f25..7f5f78bd15ad 100644
+--- a/include/linux/usb_usual.h
++++ b/include/linux/usb_usual.h
+@@ -77,6 +77,8 @@
+ /* Cannot handle ATA_12 or ATA_16 CDBs */ \
+ US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
+ /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
++ US_FLAG(MAX_SECTORS_240, 0x08000000) \
++ /* Sets max_sectors to 240 */ \
+
+ #define US_FLAG(name, value) US_FL_##name = value ,
+ enum { US_DO_ALL_FLAGS };
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 183eaab7c380..96e3f56519e7 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -36,5 +36,6 @@
+ for sequential scan */
+ #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
+ #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
++#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
+
+ #endif
+diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
+index 0de95ccb92cf..5bd134651f5e 100644
+--- a/include/sound/emu10k1.h
++++ b/include/sound/emu10k1.h
+@@ -41,7 +41,8 @@
+
+ #define EMUPAGESIZE 4096
+ #define MAXREQVOICES 8
+-#define MAXPAGES 8192
++#define MAXPAGES0 4096 /* 32 bit mode */
++#define MAXPAGES1 8192 /* 31 bit mode */
+ #define RESERVED 0
+ #define NUM_MIDI 16
+ #define NUM_G 64 /* use all channels */
+@@ -50,8 +51,7 @@
+
+ /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
+ #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
+-#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
+- /* See ALSA bug #1276 - rlrevell */
++#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
+
+ #define TMEMSIZE 256*1024
+ #define TMEMSIZEREG 4
+@@ -466,8 +466,11 @@
+
+ #define MAPB 0x0d /* Cache map B */
+
+-#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
+-#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
++#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
++#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
++
++#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
++#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
+
+ /* 0x0e, 0x0f: Not used */
+
+@@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
+ unsigned short model; /* subsystem id */
+ unsigned int card_type; /* EMU10K1_CARD_* */
+ unsigned int ecard_ctrl; /* ecard control bits */
++ unsigned int address_mode; /* address mode */
+ unsigned long dma_mask; /* PCI DMA mask */
+ unsigned int delay_pcm_irq; /* in samples */
+ int max_cache_pages; /* max memory size / PAGE_SIZE */
+diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
+index 8d7416e46861..15355892a0ff 100644
+--- a/include/sound/soc-dapm.h
++++ b/include/sound/soc-dapm.h
+@@ -287,7 +287,7 @@ struct device;
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+ .tlv.p = (tlv_array), \
+ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
++ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
+ #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
+ SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
+ #define SOC_DAPM_ENUM(xname, xenum) \
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index a64e7a207d2b..0c5796eadae1 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -357,8 +357,8 @@ select_insn:
+ ALU64_MOD_X:
+ if (unlikely(SRC == 0))
+ return 0;
+- tmp = DST;
+- DST = do_div(tmp, SRC);
++ div64_u64_rem(DST, SRC, &tmp);
++ DST = tmp;
+ CONT;
+ ALU_MOD_X:
+ if (unlikely(SRC == 0))
+@@ -367,8 +367,8 @@ select_insn:
+ DST = do_div(tmp, (u32) SRC);
+ CONT;
+ ALU64_MOD_K:
+- tmp = DST;
+- DST = do_div(tmp, IMM);
++ div64_u64_rem(DST, IMM, &tmp);
++ DST = tmp;
+ CONT;
+ ALU_MOD_K:
+ tmp = (u32) DST;
+@@ -377,7 +377,7 @@ select_insn:
+ ALU64_DIV_X:
+ if (unlikely(SRC == 0))
+ return 0;
+- do_div(DST, SRC);
++ DST = div64_u64(DST, SRC);
+ CONT;
+ ALU_DIV_X:
+ if (unlikely(SRC == 0))
+@@ -387,7 +387,7 @@ select_insn:
+ DST = (u32) tmp;
+ CONT;
+ ALU64_DIV_K:
+- do_div(DST, IMM);
++ DST = div64_u64(DST, IMM);
+ CONT;
+ ALU_DIV_K:
+ tmp = (u32) DST;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 208d5439e59b..787b0d699969 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
+ if (sk_hashed(sk)) {
+ write_lock_bh(&ping_table.lock);
+ hlist_nulls_del(&sk->sk_nulls_node);
++ sk_nulls_node_init(&sk->sk_nulls_node);
+ sock_put(sk);
+ isk->inet_num = 0;
+ isk->inet_sport = 0;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index ad5064362c5c..20fc0202cbbe 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -963,10 +963,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ if (dst_metric_locked(dst, RTAX_MTU))
+ return;
+
+- if (dst->dev->mtu < mtu)
+- return;
+-
+- if (rt->rt_pmtu && rt->rt_pmtu < mtu)
++ if (ipv4_mtu(dst) < mtu)
+ return;
+
+ if (mtu < ip_rt_min_pmtu)
+diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
+index 37d0220a094c..db7a2e5e4a14 100644
+--- a/sound/pci/emu10k1/emu10k1.c
++++ b/sound/pci/emu10k1/emu10k1.c
+@@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
+ }
+ #endif
+
+- strcpy(card->driver, emu->card_capabilities->driver);
+- strcpy(card->shortname, emu->card_capabilities->name);
++ strlcpy(card->driver, emu->card_capabilities->driver,
++ sizeof(card->driver));
++ strlcpy(card->shortname, emu->card_capabilities->name,
++ sizeof(card->shortname));
+ snprintf(card->longname, sizeof(card->longname),
+ "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
+ card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
+diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
+index 874cd76c7b7f..d2c7ea3a7610 100644
+--- a/sound/pci/emu10k1/emu10k1_callback.c
++++ b/sound/pci/emu10k1/emu10k1_callback.c
+@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
+ snd_emu10k1_ptr_write(hw, Z2, ch, 0);
+
+ /* invalidate maps */
+- temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
++ temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+ snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
+ snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
+ #if 0
+@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
+ snd_emu10k1_ptr_write(hw, CDF, ch, sample);
+
+ /* invalidate maps */
+- temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
++ temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+ snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
+ snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
+
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index b4458a630a7c..df9f5c7c9c77 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
+ snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
+ snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
+
+- silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
++ silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+ for (ch = 0; ch < NUM_G; ch++) {
+ snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
+ snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
+@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
+ outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
+ }
+
++ if (emu->address_mode == 0) {
++ /* use 16M in 4G */
++ outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
++ }
++
+ return 0;
+ }
+
+@@ -1421,7 +1426,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ *
+ */
+ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
+- .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
++ .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
+ .id = "Audigy2",
+ .emu10k2_chip = 1,
+ .ca0108_chip = 1,
+@@ -1571,7 +1576,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ .adc_1361t = 1, /* 24 bit capture instead of 16bit */
+ .ac97_chip = 1} ,
+ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
+- .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
++ .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
+ .id = "Audigy2",
+ .emu10k2_chip = 1,
+ .ca0102_chip = 1,
+@@ -1877,8 +1882,10 @@ int snd_emu10k1_create(struct snd_card *card,
+
+ is_audigy = emu->audigy = c->emu10k2_chip;
+
++ /* set addressing mode */
++ emu->address_mode = is_audigy ? 0 : 1;
+ /* set the DMA transfer mask */
+- emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
++ emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
+ if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
+ pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
+ dev_err(card->dev,
+@@ -1903,7 +1910,7 @@ int snd_emu10k1_create(struct snd_card *card,
+
+ emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+- 32 * 1024, &emu->ptb_pages) < 0) {
++ (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
+ err = -ENOMEM;
+ goto error;
+ }
+@@ -2002,8 +2009,8 @@ int snd_emu10k1_create(struct snd_card *card,
+
+ /* Clear silent pages and set up pointers */
+ memset(emu->silent_page.area, 0, PAGE_SIZE);
+- silent_page = emu->silent_page.addr << 1;
+- for (idx = 0; idx < MAXPAGES; idx++)
++ silent_page = emu->silent_page.addr << emu->address_mode;
++ for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
+ ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
+
+ /* set up voice indices */
+diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
+index 0dc07385af0e..14a305bd8a98 100644
+--- a/sound/pci/emu10k1/emupcm.c
++++ b/sound/pci/emu10k1/emupcm.c
+@@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
+ snd_emu10k1_ptr_write(emu, Z1, voice, 0);
+ snd_emu10k1_ptr_write(emu, Z2, voice, 0);
+ /* invalidate maps */
+- silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
++ silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+ snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
+ snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
+ /* modulation envelope */
+diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
+index c68e6dd2fa67..4f1f69be1865 100644
+--- a/sound/pci/emu10k1/memory.c
++++ b/sound/pci/emu10k1/memory.c
+@@ -34,10 +34,11 @@
+ * aligned pages in others
+ */
+ #define __set_ptb_entry(emu,page,addr) \
+- (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
++ (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
+
+ #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
+-#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
++#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
++#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
+ /* get aligned page from offset address */
+ #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
+ /* get offset address from aligned page */
+@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
+ }
+ page = blk->mapped_page + blk->pages;
+ }
+- size = MAX_ALIGN_PAGES - page;
++ size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
+ if (size >= max_size) {
+ *nextp = pos;
+ return page;
+@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
+ q = get_emu10k1_memblk(p, mapped_link);
+ end_page = q->mapped_page;
+ } else
+- end_page = MAX_ALIGN_PAGES;
++ end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
+
+ /* remove links */
+ list_del(&blk->mapped_link);
+@@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
+ if (snd_BUG_ON(!emu))
+ return NULL;
+ if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
+- runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
++ runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
+ return NULL;
+ hdr = emu->memhdr;
+ if (snd_BUG_ON(!hdr))
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 2fe86d2e1b09..a63a86332deb 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -3027,6 +3027,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = {
+ .put = vmaster_mute_mode_put,
+ };
+
++/* meta hook to call each driver's vmaster hook */
++static void vmaster_hook(void *private_data, int enabled)
++{
++ struct hda_vmaster_mute_hook *hook = private_data;
++
++ if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
++ enabled = hook->mute_mode;
++ hook->hook(hook->codec, enabled);
++}
++
+ /**
+ * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED
+ * @codec: the HDA codec
+@@ -3045,9 +3055,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
+
+ if (!hook->hook || !hook->sw_kctl)
+ return 0;
+- snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
+ hook->codec = codec;
+ hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
++ snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
+ if (!expose_enum_ctl)
+ return 0;
+ kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
+@@ -3073,14 +3083,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
+ */
+ if (hook->codec->bus->shutdown)
+ return;
+- switch (hook->mute_mode) {
+- case HDA_VMUTE_FOLLOW_MASTER:
+- snd_ctl_sync_vmaster_hook(hook->sw_kctl);
+- break;
+- default:
+- hook->hook(hook->codec, hook->mute_mode);
+- break;
+- }
++ snd_ctl_sync_vmaster_hook(hook->sw_kctl);
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook);
+
+diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
+index 6ba0b5517c40..2341fc334163 100644
+--- a/sound/pci/hda/thinkpad_helper.c
++++ b/sound/pci/hda/thinkpad_helper.c
+@@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
+ if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
+ old_vmaster_hook = spec->vmaster_mute.hook;
+ spec->vmaster_mute.hook = update_tpacpi_mute_led;
++ spec->vmaster_mute_enum = 1;
+ removefunc = false;
+ }
+ if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
+diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
+index fb9c20eace3f..97b33e96439a 100644
+--- a/sound/soc/codecs/rt5677.c
++++ b/sound/soc/codecs/rt5677.c
+@@ -62,6 +62,9 @@ static const struct reg_default init_list[] = {
+ {RT5677_PR_BASE + 0x1e, 0x0000},
+ {RT5677_PR_BASE + 0x12, 0x0eaa},
+ {RT5677_PR_BASE + 0x14, 0x018a},
++ {RT5677_PR_BASE + 0x15, 0x0490},
++ {RT5677_PR_BASE + 0x38, 0x0f71},
++ {RT5677_PR_BASE + 0x39, 0x0f71},
+ };
+ #define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list)
+
+@@ -901,7 +904,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
+ {
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+- int idx = rl6231_calc_dmic_clk(rt5677->sysclk);
++ int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
+
+ if (idx < 0)
+ dev_err(codec->dev, "Failed to set DMIC clock\n");
+diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
+index 16f1b71edb55..aab0af681e8c 100644
+--- a/sound/soc/codecs/tfa9879.c
++++ b/sound/soc/codecs/tfa9879.c
+@@ -280,8 +280,8 @@ static int tfa9879_i2c_probe(struct i2c_client *i2c,
+ int i;
+
+ tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL);
+- if (IS_ERR(tfa9879))
+- return PTR_ERR(tfa9879);
++ if (!tfa9879)
++ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, tfa9879);
+
+diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
+index 326d3c3804e3..5bf723689692 100644
+--- a/sound/soc/samsung/s3c24xx-i2s.c
++++ b/sound/soc/samsung/s3c24xx-i2s.c
+@@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
+ return -ENOENT;
+ }
+ s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
+- if (s3c24xx_i2s.regs == NULL)
+- return -ENXIO;
++ if (IS_ERR(s3c24xx_i2s.regs))
++ return PTR_ERR(s3c24xx_i2s.regs);
+
+ s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO;
+ s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO;
+diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
+index ab37add269ae..82e350e9501c 100644
+--- a/sound/synth/emux/emux_oss.c
++++ b/sound/synth/emux/emux_oss.c
+@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
+ if (snd_BUG_ON(!arg || !emu))
+ return -ENXIO;
+
+- mutex_lock(&emu->register_mutex);
+-
+- if (!snd_emux_inc_count(emu)) {
+- mutex_unlock(&emu->register_mutex);
++ if (!snd_emux_inc_count(emu))
+ return -EFAULT;
+- }
+
+ memset(&callback, 0, sizeof(callback));
+ callback.owner = THIS_MODULE;
+@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
+ if (p == NULL) {
+ snd_printk(KERN_ERR "can't create port\n");
+ snd_emux_dec_count(emu);
+- mutex_unlock(&emu->register_mutex);
+ return -ENOMEM;
+ }
+
+@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
+ reset_port_mode(p, arg->seq_mode);
+
+ snd_emux_reset_port(p);
+-
+- mutex_unlock(&emu->register_mutex);
+ return 0;
+ }
+
+@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
+ if (snd_BUG_ON(!emu))
+ return -ENXIO;
+
+- mutex_lock(&emu->register_mutex);
+ snd_emux_sounds_off_all(p);
+ snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
+ snd_seq_event_port_detach(p->chset.client, p->chset.port);
+ snd_emux_dec_count(emu);
+
+- mutex_unlock(&emu->register_mutex);
+ return 0;
+ }
+
+diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
+index 7778b8e19782..a0209204ae48 100644
+--- a/sound/synth/emux/emux_seq.c
++++ b/sound/synth/emux/emux_seq.c
+@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
+ if (emu->voices)
+ snd_emux_terminate_all(emu);
+
+- mutex_lock(&emu->register_mutex);
+ if (emu->client >= 0) {
+ snd_seq_delete_kernel_client(emu->client);
+ emu->client = -1;
+ }
+- mutex_unlock(&emu->register_mutex);
+ }
+
+
+@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
+ /*
+ * increment usage count
+ */
+-int
+-snd_emux_inc_count(struct snd_emux *emu)
++static int
++__snd_emux_inc_count(struct snd_emux *emu)
+ {
+ emu->used++;
+ if (!try_module_get(emu->ops.owner))
+@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
+ return 1;
+ }
+
++int snd_emux_inc_count(struct snd_emux *emu)
++{
++ int ret;
++
++ mutex_lock(&emu->register_mutex);
++ ret = __snd_emux_inc_count(emu);
++ mutex_unlock(&emu->register_mutex);
++ return ret;
++}
+
+ /*
+ * decrease usage count
+ */
+-void
+-snd_emux_dec_count(struct snd_emux *emu)
++static void
++__snd_emux_dec_count(struct snd_emux *emu)
+ {
+ module_put(emu->card->module);
+ emu->used--;
+@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
+ module_put(emu->ops.owner);
+ }
+
++void snd_emux_dec_count(struct snd_emux *emu)
++{
++ mutex_lock(&emu->register_mutex);
++ __snd_emux_dec_count(emu);
++ mutex_unlock(&emu->register_mutex);
++}
+
+ /*
+ * Routine that is called upon a first use of a particular port
+@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
+
+ mutex_lock(&emu->register_mutex);
+ snd_emux_init_port(p);
+- snd_emux_inc_count(emu);
++ __snd_emux_inc_count(emu);
+ mutex_unlock(&emu->register_mutex);
+ return 0;
+ }
+@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
+
+ mutex_lock(&emu->register_mutex);
+ snd_emux_sounds_off_all(p);
+- snd_emux_dec_count(emu);
++ __snd_emux_dec_count(emu);
+ mutex_unlock(&emu->register_mutex);
+ return 0;
+ }
diff --git a/1003_linux-4.0.4.patch b/1003_linux-4.0.4.patch
new file mode 100644
index 0000000..e5c793a
--- /dev/null
+++ b/1003_linux-4.0.4.patch
@@ -0,0 +1,2713 @@
+diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
+index a4873e5e3e36..e30e184f50c7 100644
+--- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
++++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
+@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
+ 80 81 68 69
+ 70 71 72 73
+ 74 75 76 77>;
+- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
++ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
+ "saif0", "saif1", "i2c0", "i2c1",
+ "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
+ "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
+diff --git a/Makefile b/Makefile
+index dc9f43a019d6..3d16bcc87585 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 0
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma sheep
+
+diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+index 0c76d9f05fd0..f4838ebd918b 100644
+--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
++++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+@@ -105,6 +105,10 @@
+ };
+
+ internal-regs {
++ rtc@10300 {
++ /* No crystal connected to the internal RTC */
++ status = "disabled";
++ };
+ serial@12000 {
+ status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
+index 7e6eef2488e8..82045398bf1f 100644
+--- a/arch/arm/boot/dts/imx23-olinuxino.dts
++++ b/arch/arm/boot/dts/imx23-olinuxino.dts
+@@ -12,6 +12,7 @@
+ */
+
+ /dts-v1/;
++#include <dt-bindings/gpio/gpio.h>
+ #include "imx23.dtsi"
+
+ / {
+@@ -93,6 +94,7 @@
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
++ dr_mode = "host";
+ vbus-supply = <&reg_usb0_vbus>;
+ status = "okay";
+ };
+@@ -122,7 +124,7 @@
+
+ user {
+ label = "green";
+- gpios = <&gpio2 1 1>;
++ gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
+index e4d3aecc4ed2..677f81d9dcd5 100644
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -428,6 +428,7 @@
+
+ pwm4: pwm@53fc8000 {
+ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
++ #pwm-cells = <2>;
+ reg = <0x53fc8000 0x4000>;
+ clocks = <&clks 108>, <&clks 52>;
+ clock-names = "ipg", "per";
+diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
+index 47f68ac868d4..5ed245a3f9ac 100644
+--- a/arch/arm/boot/dts/imx28.dtsi
++++ b/arch/arm/boot/dts/imx28.dtsi
+@@ -900,7 +900,7 @@
+ 80 81 68 69
+ 70 71 72 73
+ 74 75 76 77>;
+- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
++ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
+ "saif0", "saif1", "i2c0", "i2c1",
+ "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
+ "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+index 19cc269a08d4..1ce6133b67f5 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+@@ -31,6 +31,7 @@
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 15 0>;
++ enable-active-high;
+ };
+
+ reg_usb_h1_vbus: regulator@1 {
+@@ -40,6 +41,7 @@
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 0 0>;
++ enable-active-high;
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
+index db80f9d376fa..9c8bdf2c93a1 100644
+--- a/arch/arm/boot/dts/omap3-n900.dts
++++ b/arch/arm/boot/dts/omap3-n900.dts
+@@ -484,6 +484,8 @@
+ DRVDD-supply = <&vmmc2>;
+ IOVDD-supply = <&vio>;
+ DVDD-supply = <&vio>;
++
++ ai3x-micbias-vg = <1>;
+ };
+
+ tlv320aic3x_aux: tlv320aic3x@19 {
+@@ -495,6 +497,8 @@
+ DRVDD-supply = <&vmmc2>;
+ IOVDD-supply = <&vio>;
+ DVDD-supply = <&vio>;
++
++ ai3x-micbias-vg = <2>;
+ };
+
+ tsl2563: tsl2563@29 {
+diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
+index bfd3f1c734b8..2201cd5da3bb 100644
+--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
++++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
+@@ -1017,23 +1017,6 @@
+ status = "disabled";
+ };
+
+- vmmci: regulator-gpio {
+- compatible = "regulator-gpio";
+-
+- regulator-min-microvolt = <1800000>;
+- regulator-max-microvolt = <2900000>;
+- regulator-name = "mmci-reg";
+- regulator-type = "voltage";
+-
+- startup-delay-us = <100>;
+- enable-active-high;
+-
+- states = <1800000 0x1
+- 2900000 0x0>;
+-
+- status = "disabled";
+- };
+-
+ mcde@a0350000 {
+ compatible = "stericsson,mcde";
+ reg = <0xa0350000 0x1000>, /* MCDE */
+diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
+index bf8f0eddc2c0..744c1e3a744d 100644
+--- a/arch/arm/boot/dts/ste-href.dtsi
++++ b/arch/arm/boot/dts/ste-href.dtsi
+@@ -111,6 +111,21 @@
+ pinctrl-1 = <&i2c3_sleep_mode>;
+ };
+
++ vmmci: regulator-gpio {
++ compatible = "regulator-gpio";
++
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <2900000>;
++ regulator-name = "mmci-reg";
++ regulator-type = "voltage";
++
++ startup-delay-us = <100>;
++ enable-active-high;
++
++ states = <1800000 0x1
++ 2900000 0x0>;
++ };
++
+ // External Micro SD slot
+ sdi0_per1@80126000 {
+ arm,primecell-periphid = <0x10480180>;
+diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
+index 206826a855c0..1bc84ebdccaa 100644
+--- a/arch/arm/boot/dts/ste-snowball.dts
++++ b/arch/arm/boot/dts/ste-snowball.dts
+@@ -146,8 +146,21 @@
+ };
+
+ vmmci: regulator-gpio {
++ compatible = "regulator-gpio";
++
+ gpios = <&gpio7 4 0x4>;
+ enable-gpio = <&gpio6 25 0x4>;
++
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <2900000>;
++ regulator-name = "mmci-reg";
++ regulator-type = "voltage";
++
++ startup-delay-us = <100>;
++ enable-active-high;
++
++ states = <1800000 0x1
++ 2900000 0x0>;
+ };
+
+ // External Micro SD slot
+diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
+index 902397dd1000..1c1cdfa566ac 100644
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -86,7 +86,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+ obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
+ ifeq ($(CONFIG_ARM_PSCI),y)
+-obj-y += psci.o
++obj-y += psci.o psci-call.o
+ obj-$(CONFIG_SMP) += psci_smp.o
+ endif
+
+diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S
+new file mode 100644
+index 000000000000..a78e9e1e206d
+--- /dev/null
++++ b/arch/arm/kernel/psci-call.S
+@@ -0,0 +1,31 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Copyright (C) 2015 ARM Limited
++ *
++ * Author: Mark Rutland <mark.rutland@arm.com>
++ */
++
++#include <linux/linkage.h>
++
++#include <asm/opcodes-sec.h>
++#include <asm/opcodes-virt.h>
++
++/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
++ENTRY(__invoke_psci_fn_hvc)
++ __HVC(0)
++ bx lr
++ENDPROC(__invoke_psci_fn_hvc)
++
++/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
++ENTRY(__invoke_psci_fn_smc)
++ __SMC(0)
++ bx lr
++ENDPROC(__invoke_psci_fn_smc)
+diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
+index f73891b6b730..f90fdf4ce7c7 100644
+--- a/arch/arm/kernel/psci.c
++++ b/arch/arm/kernel/psci.c
+@@ -23,8 +23,6 @@
+
+ #include <asm/compiler.h>
+ #include <asm/errno.h>
+-#include <asm/opcodes-sec.h>
+-#include <asm/opcodes-virt.h>
+ #include <asm/psci.h>
+ #include <asm/system_misc.h>
+
+@@ -33,6 +31,9 @@ struct psci_operations psci_ops;
+ static int (*invoke_psci_fn)(u32, u32, u32, u32);
+ typedef int (*psci_initcall_t)(const struct device_node *);
+
++asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
++asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
++
+ enum psci_function {
+ PSCI_FN_CPU_SUSPEND,
+ PSCI_FN_CPU_ON,
+@@ -71,40 +72,6 @@ static u32 psci_power_state_pack(struct psci_power_state state)
+ & PSCI_0_2_POWER_STATE_AFFL_MASK);
+ }
+
+-/*
+- * The following two functions are invoked via the invoke_psci_fn pointer
+- * and will not be inlined, allowing us to piggyback on the AAPCS.
+- */
+-static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
+- u32 arg2)
+-{
+- asm volatile(
+- __asmeq("%0", "r0")
+- __asmeq("%1", "r1")
+- __asmeq("%2", "r2")
+- __asmeq("%3", "r3")
+- __HVC(0)
+- : "+r" (function_id)
+- : "r" (arg0), "r" (arg1), "r" (arg2));
+-
+- return function_id;
+-}
+-
+-static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
+- u32 arg2)
+-{
+- asm volatile(
+- __asmeq("%0", "r0")
+- __asmeq("%1", "r1")
+- __asmeq("%2", "r2")
+- __asmeq("%3", "r3")
+- __SMC(0)
+- : "+r" (function_id)
+- : "r" (arg0), "r" (arg1), "r" (arg2));
+-
+- return function_id;
+-}
+-
+ static int psci_get_version(void)
+ {
+ int err;
+diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
+index cbefbd7cfdb5..661d753df584 100644
+--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
++++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
+@@ -112,6 +112,7 @@
+ #define OMAP3430_VC_CMD_ONLP_SHIFT 16
+ #define OMAP3430_VC_CMD_RET_SHIFT 8
+ #define OMAP3430_VC_CMD_OFF_SHIFT 0
++#define OMAP3430_SREN_MASK (1 << 4)
+ #define OMAP3430_HSEN_MASK (1 << 3)
+ #define OMAP3430_MCODE_MASK (0x7 << 0)
+ #define OMAP3430_VALID_MASK (1 << 24)
+diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
+index b1c7a33e00e7..e794828dee55 100644
+--- a/arch/arm/mach-omap2/prm-regbits-44xx.h
++++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
+@@ -35,6 +35,7 @@
+ #define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1
+ #define OMAP4430_GLOBAL_WUEN_MASK (1 << 16)
+ #define OMAP4430_HSMCODE_MASK (0x7 << 0)
++#define OMAP4430_SRMODEEN_MASK (1 << 4)
+ #define OMAP4430_HSMODEEN_MASK (1 << 3)
+ #define OMAP4430_HSSCLL_SHIFT 24
+ #define OMAP4430_ICEPICK_RST_SHIFT 9
+diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
+index be9ef834fa81..076fd20d7e5a 100644
+--- a/arch/arm/mach-omap2/vc.c
++++ b/arch/arm/mach-omap2/vc.c
+@@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
+ * idle. And we can also scale voltages to zero for off-idle.
+ * Note that no actual voltage scaling during off-idle will
+ * happen unless the board specific twl4030 PMIC scripts are
+- * loaded.
++ * loaded. See also omap_vc_i2c_init for comments regarding
++ * erratum i531.
+ */
+ val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET);
+ if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) {
+@@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
+ return;
+ }
+
++ /*
++ * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around
++ * erratum i531 "Extra Power Consumed When Repeated Start Operation
++ * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)".
++ * Otherwise I2C4 eventually leads into about 23mW extra power being
++ * consumed even during off idle using VMODE.
++ */
+ i2c_high_speed = voltdm->pmic->i2c_high_speed;
+ if (i2c_high_speed)
+- voltdm->rmw(vc->common->i2c_cfg_hsen_mask,
++ voltdm->rmw(vc->common->i2c_cfg_clear_mask,
+ vc->common->i2c_cfg_hsen_mask,
+ vc->common->i2c_cfg_reg);
+
+diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h
+index cdbdd78e755e..89b83b7ff3ec 100644
+--- a/arch/arm/mach-omap2/vc.h
++++ b/arch/arm/mach-omap2/vc.h
+@@ -34,6 +34,7 @@ struct voltagedomain;
+ * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register
+ * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register
+ * @i2c_cfg_reg: I2C configuration register offset
++ * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register
+ * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register
+ * @i2c_mcode_mask: MCODE field mask for I2C config register
+ *
+@@ -52,6 +53,7 @@ struct omap_vc_common {
+ u8 cmd_ret_shift;
+ u8 cmd_off_shift;
+ u8 i2c_cfg_reg;
++ u8 i2c_cfg_clear_mask;
+ u8 i2c_cfg_hsen_mask;
+ u8 i2c_mcode_mask;
+ };
+diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
+index 75bc4aa22b3a..71d74c9172c1 100644
+--- a/arch/arm/mach-omap2/vc3xxx_data.c
++++ b/arch/arm/mach-omap2/vc3xxx_data.c
+@@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = {
+ .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT,
+ .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT,
+ .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT,
++ .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK,
+ .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK,
+ .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET,
+ .i2c_mcode_mask = OMAP3430_MCODE_MASK,
+diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
+index 085e5d6a04fd..2abd5fa8a697 100644
+--- a/arch/arm/mach-omap2/vc44xx_data.c
++++ b/arch/arm/mach-omap2/vc44xx_data.c
+@@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = {
+ .cmd_ret_shift = OMAP4430_RET_SHIFT,
+ .cmd_off_shift = OMAP4430_OFF_SHIFT,
+ .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET,
++ .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK,
+ .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK,
+ .i2c_mcode_mask = OMAP4430_HSMCODE_MASK,
+ };
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index e1268f905026..f412b53ed268 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -449,10 +449,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
+ return;
+ }
+ #endif
+- if (rm != ARM_R0)
+- emit(ARM_MOV_R(ARM_R0, rm), ctx);
++
++ /*
++ * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
++ * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
++ * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
++ * before using it as a source for ARM_R1.
++ *
++ * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
++ * ARM_R5 (r_X) so there is no particular register overlap
++ * issues.
++ */
+ if (rn != ARM_R1)
+ emit(ARM_MOV_R(ARM_R1, rn), ctx);
++ if (rm != ARM_R0)
++ emit(ARM_MOV_R(ARM_R0, rm), ctx);
+
+ ctx->seen |= SEEN_CALL;
+ emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
+diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
+index cf87de3fc390..64b611782ef0 100644
+--- a/arch/x86/include/asm/spinlock.h
++++ b/arch/x86/include/asm/spinlock.h
+@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+
+ tmp.head &= ~TICKET_SLOWPATH_FLAG;
+- return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
++ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
+ }
+ #define arch_spin_is_contended arch_spin_is_contended
+
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index e4695985f9de..d93963340c3c 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
+ kfree(info);
+ }
+
++/*
++ * An IO port or MMIO resource assigned to a PCI host bridge may be
++ * consumed by the host bridge itself or available to its child
++ * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
++ * to tell whether the resource is consumed by the host bridge itself,
++ * but firmware hasn't used that bit consistently, so we can't rely on it.
++ *
++ * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
++ * to be available to child bus/devices except one special case:
++ * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
++ * to access PCI configuration space.
++ *
++ * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
++ */
++static bool resource_is_pcicfg_ioport(struct resource *res)
++{
++ return (res->flags & IORESOURCE_IO) &&
++ res->start == 0xCF8 && res->end == 0xCFF;
++}
++
+ static void probe_pci_root_info(struct pci_root_info *info,
+ struct acpi_device *device,
+ int busnum, int domain,
+@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
+ "no IO and memory resources present in _CRS\n");
+ else
+ resource_list_for_each_entry_safe(entry, tmp, list) {
+- if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
+- (entry->res->flags & IORESOURCE_DISABLED))
++ if ((entry->res->flags & IORESOURCE_DISABLED) ||
++ resource_is_pcicfg_ioport(entry->res))
+ resource_list_destroy_entry(entry);
+ else
+ entry->res->name = info->name;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 794c3e7f01cf..66406474f0c4 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q)
+ q->queue_lock = &q->__queue_lock;
+ spin_unlock_irq(lock);
+
++ bdi_destroy(&q->backing_dev_info);
++
+ /* @q is and will stay empty, shutdown and put */
+ blk_put_queue(q);
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 33c428530193..5c39703e644f 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -675,8 +675,11 @@ static void blk_mq_rq_timer(unsigned long priv)
+ data.next = blk_rq_timeout(round_jiffies_up(data.next));
+ mod_timer(&q->timeout, data.next);
+ } else {
+- queue_for_each_hw_ctx(q, hctx, i)
+- blk_mq_tag_idle(hctx);
++ queue_for_each_hw_ctx(q, hctx, i) {
++ /* the hctx may be unmapped, so check it here */
++ if (blk_mq_hw_queue_mapped(hctx))
++ blk_mq_tag_idle(hctx);
++ }
+ }
+ }
+
+@@ -1570,22 +1573,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
+ return NOTIFY_OK;
+ }
+
+-static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
+-{
+- struct request_queue *q = hctx->queue;
+- struct blk_mq_tag_set *set = q->tag_set;
+-
+- if (set->tags[hctx->queue_num])
+- return NOTIFY_OK;
+-
+- set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
+- if (!set->tags[hctx->queue_num])
+- return NOTIFY_STOP;
+-
+- hctx->tags = set->tags[hctx->queue_num];
+- return NOTIFY_OK;
+-}
+-
+ static int blk_mq_hctx_notify(void *data, unsigned long action,
+ unsigned int cpu)
+ {
+@@ -1593,8 +1580,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
+
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
+ return blk_mq_hctx_cpu_offline(hctx, cpu);
+- else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
+- return blk_mq_hctx_cpu_online(hctx, cpu);
++
++ /*
++ * In case of CPU online, tags may be reallocated
++ * in blk_mq_map_swqueue() after mapping is updated.
++ */
+
+ return NOTIFY_OK;
+ }
+@@ -1776,6 +1766,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
+ unsigned int i;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
++ struct blk_mq_tag_set *set = q->tag_set;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ cpumask_clear(hctx->cpumask);
+@@ -1802,16 +1793,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
+ * disable it and free the request entries.
+ */
+ if (!hctx->nr_ctx) {
+- struct blk_mq_tag_set *set = q->tag_set;
+-
+ if (set->tags[i]) {
+ blk_mq_free_rq_map(set, set->tags[i], i);
+ set->tags[i] = NULL;
+- hctx->tags = NULL;
+ }
++ hctx->tags = NULL;
+ continue;
+ }
+
++ /* unmapped hw queue can be remapped after CPU topo changed */
++ if (!set->tags[i])
++ set->tags[i] = blk_mq_init_rq_map(set, i);
++ hctx->tags = set->tags[i];
++ WARN_ON(!hctx->tags);
++
+ /*
+ * Initialize batch roundrobin counts
+ */
+@@ -2075,9 +2070,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
+ */
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_freeze_queue_start(q);
+- list_for_each_entry(q, &all_q_list, all_q_node)
++ list_for_each_entry(q, &all_q_list, all_q_node) {
+ blk_mq_freeze_queue_wait(q);
+
++ /*
++ * timeout handler can't touch hw queue during the
++ * reinitialization
++ */
++ del_timer_sync(&q->timeout);
++ }
++
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_queue_reinit(q);
+
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index faaf36ade7eb..2b8fd302f677 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
+
+ blk_trace_shutdown(q);
+
+- bdi_destroy(&q->backing_dev_info);
+-
+ ida_simple_remove(&blk_queue_ida, q->id);
+ call_rcu(&q->rcu_head, blk_free_queue_rcu);
+ }
+diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
+index b193f8425999..ff6d8adc9cda 100644
+--- a/drivers/acpi/acpi_pnp.c
++++ b/drivers/acpi/acpi_pnp.c
+@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
+ {"PNPb006"},
+ /* cs423x-pnpbios */
+ {"CSC0100"},
++ {"CSC0103"},
++ {"CSC0110"},
+ {"CSC0000"},
+ {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */
+ /* es18xx-pnpbios */
+diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
+index cf607fe69dbd..c240bdf824f2 100644
+--- a/drivers/acpi/acpica/acmacros.h
++++ b/drivers/acpi/acpica/acmacros.h
+@@ -63,23 +63,12 @@
+ #define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
+
+ /*
+- * printf() format helpers. These macros are workarounds for the difficulties
++ * printf() format helper. This macros is a workaround for the difficulties
+ * with emitting 64-bit integers and 64-bit pointers with the same code
+ * for both 32-bit and 64-bit hosts.
+ */
+ #define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
+
+-#if ACPI_MACHINE_WIDTH == 64
+-#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
+-#define ACPI_FORMAT_TO_UINT(i) ACPI_FORMAT_UINT64(i)
+-#define ACPI_PRINTF_UINT "0x%8.8X%8.8X"
+-
+-#else
+-#define ACPI_FORMAT_NATIVE_UINT(i) 0, (u32) (i)
+-#define ACPI_FORMAT_TO_UINT(i) (u32) (i)
+-#define ACPI_PRINTF_UINT "0x%8.8X"
+-#endif
+-
+ /*
+ * Macros for moving data around to/from buffers that are possibly unaligned.
+ * If the hardware supports the transfer of unaligned data, just do the store.
+diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
+index 77244182ff02..ea0cc4e08f80 100644
+--- a/drivers/acpi/acpica/dsopcode.c
++++ b/drivers/acpi/acpica/dsopcode.c
+@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+ obj_desc,
+- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
++ ACPI_FORMAT_UINT64(obj_desc->region.address),
+ obj_desc->region.length));
+
+ /* Now the address and length are valid for this opregion */
+@@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+- obj_desc->region.address =
+- (acpi_physical_address) ACPI_TO_INTEGER(table);
++ obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
+ obj_desc->region.length = table->length;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+ obj_desc,
+- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
++ ACPI_FORMAT_UINT64(obj_desc->region.address),
+ obj_desc->region.length));
+
+ /* Now the address and length are valid for this opregion */
+diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
+index 9abace3401f9..2ba28a63fb68 100644
+--- a/drivers/acpi/acpica/evregion.c
++++ b/drivers/acpi/acpica/evregion.c
+@@ -272,7 +272,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+ &region_obj->region.handler->address_space, handler,
+- ACPI_FORMAT_NATIVE_UINT(address),
++ ACPI_FORMAT_UINT64(address),
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
+index 7c213b6b6472..1da52bef632e 100644
+--- a/drivers/acpi/acpica/exdump.c
++++ b/drivers/acpi/acpica/exdump.c
+@@ -767,8 +767,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
+ acpi_os_printf("\n");
+ } else {
+ acpi_os_printf(" base %8.8X%8.8X Length %X\n",
+- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
+- address),
++ ACPI_FORMAT_UINT64(obj_desc->region.
++ address),
+ obj_desc->region.length);
+ }
+ break;
+diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
+index 49479927e7f7..725a3746a2df 100644
+--- a/drivers/acpi/acpica/exfldio.c
++++ b/drivers/acpi/acpica/exfldio.c
+@@ -263,17 +263,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
+- " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
++ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
+ acpi_ut_get_region_name(rgn_desc->region.
+ space_id),
+ rgn_desc->region.space_id,
+ obj_desc->common_field.access_byte_width,
+ obj_desc->common_field.base_byte_offset,
+- field_datum_byte_offset, ACPI_CAST_PTR(void,
+- (rgn_desc->
+- region.
+- address +
+- region_offset))));
++ field_datum_byte_offset,
++ ACPI_FORMAT_UINT64(rgn_desc->region.address +
++ region_offset)));
+
+ /* Invoke the appropriate address_space/op_region handler */
+
+diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
+index 0fe188e238ef..b4bbf3150bc1 100644
+--- a/drivers/acpi/acpica/exregion.c
++++ b/drivers/acpi/acpica/exregion.c
+@@ -181,7 +181,7 @@ acpi_ex_system_memory_space_handler(u32 function,
+ if (!mem_info->mapped_logical_address) {
+ ACPI_ERROR((AE_INFO,
+ "Could not map memory at 0x%8.8X%8.8X, size %u",
+- ACPI_FORMAT_NATIVE_UINT(address),
++ ACPI_FORMAT_UINT64(address),
+ (u32) map_length));
+ mem_info->mapped_length = 0;
+ return_ACPI_STATUS(AE_NO_MEMORY);
+@@ -202,8 +202,7 @@ acpi_ex_system_memory_space_handler(u32 function,
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
+- bit_width, function,
+- ACPI_FORMAT_NATIVE_UINT(address)));
++ bit_width, function, ACPI_FORMAT_UINT64(address)));
+
+ /*
+ * Perform the memory read or write
+@@ -318,8 +317,7 @@ acpi_ex_system_io_space_handler(u32 function,
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
+- bit_width, function,
+- ACPI_FORMAT_NATIVE_UINT(address)));
++ bit_width, function, ACPI_FORMAT_UINT64(address)));
+
+ /* Decode the function parameter */
+
+diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
+index 2bd33fe56cb3..29033d71417b 100644
+--- a/drivers/acpi/acpica/hwvalid.c
++++ b/drivers/acpi/acpica/hwvalid.c
+@@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
+ byte_width = ACPI_DIV_8(bit_width);
+ last_address = address + byte_width - 1;
+
+- ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
+- ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
+- last_address),
+- byte_width));
++ ACPI_DEBUG_PRINT((ACPI_DB_IO,
++ "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
++ ACPI_FORMAT_UINT64(address),
++ ACPI_FORMAT_UINT64(last_address), byte_width));
+
+ /* Maximum 16-bit address in I/O space */
+
+ if (last_address > ACPI_UINT16_MAX) {
+ ACPI_ERROR((AE_INFO,
+- "Illegal I/O port address/length above 64K: %p/0x%X",
+- ACPI_CAST_PTR(void, address), byte_width));
++ "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
++ ACPI_FORMAT_UINT64(address), byte_width));
+ return_ACPI_STATUS(AE_LIMIT);
+ }
+
+@@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
+
+ if (acpi_gbl_osi_data >= port_info->osi_dependency) {
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+- "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
+- ACPI_CAST_PTR(void, address),
++ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
++ ACPI_FORMAT_UINT64(address),
+ byte_width, port_info->name,
+ port_info->start,
+ port_info->end));
+diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
+index 80f097eb7381..d259393505fa 100644
+--- a/drivers/acpi/acpica/nsdump.c
++++ b/drivers/acpi/acpica/nsdump.c
+@@ -271,12 +271,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
+ switch (type) {
+ case ACPI_TYPE_PROCESSOR:
+
+- acpi_os_printf("ID %02X Len %02X Addr %p\n",
++ acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
+ obj_desc->processor.proc_id,
+ obj_desc->processor.length,
+- ACPI_CAST_PTR(void,
+- obj_desc->processor.
+- address));
++ ACPI_FORMAT_UINT64(obj_desc->processor.
++ address));
+ break;
+
+ case ACPI_TYPE_DEVICE:
+@@ -347,8 +346,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
+ space_id));
+ if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
+ acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
+- ACPI_FORMAT_NATIVE_UINT
+- (obj_desc->region.address),
++ ACPI_FORMAT_UINT64(obj_desc->
++ region.
++ address),
+ obj_desc->region.length);
+ } else {
+ acpi_os_printf
+diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
+index 6a144957aadd..fd5998b2b46b 100644
+--- a/drivers/acpi/acpica/tbdata.c
++++ b/drivers/acpi/acpica/tbdata.c
+@@ -113,9 +113,9 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc,
+ case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
+ case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
+
+- table =
+- ACPI_CAST_PTR(struct acpi_table_header,
+- table_desc->address);
++ table = ACPI_CAST_PTR(struct acpi_table_header,
++ ACPI_PHYSADDR_TO_PTR(table_desc->
++ address));
+ break;
+
+ default:
+@@ -214,7 +214,8 @@ acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
+ case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
+ case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
+
+- table_header = ACPI_CAST_PTR(struct acpi_table_header, address);
++ table_header = ACPI_CAST_PTR(struct acpi_table_header,
++ ACPI_PHYSADDR_TO_PTR(address));
+ if (!table_header) {
+ return (AE_NO_MEMORY);
+ }
+@@ -398,14 +399,14 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
+ table_desc->length);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+- "%4.4s " ACPI_PRINTF_UINT
++ "%4.4s 0x%8.8X%8.8X"
+ " Attempted table install failed",
+ acpi_ut_valid_acpi_name(table_desc->
+ signature.
+ ascii) ?
+ table_desc->signature.ascii : "????",
+- ACPI_FORMAT_TO_UINT(table_desc->
+- address)));
++ ACPI_FORMAT_UINT64(table_desc->
++ address)));
+ goto invalidate_and_exit;
+ }
+ }
+diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
+index 7fbc2b9dcbbb..7e69bc73bd16 100644
+--- a/drivers/acpi/acpica/tbinstal.c
++++ b/drivers/acpi/acpica/tbinstal.c
+@@ -187,8 +187,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
+ status = acpi_tb_acquire_temp_table(&new_table_desc, address,
+ ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
+ if (ACPI_FAILURE(status)) {
+- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
+- ACPI_CAST_PTR(void, address)));
++ ACPI_ERROR((AE_INFO,
++ "Could not acquire table length at %8.8X%8.8X",
++ ACPI_FORMAT_UINT64(address)));
+ return_ACPI_STATUS(status);
+ }
+
+@@ -246,8 +247,9 @@ acpi_tb_install_standard_table(acpi_physical_address address,
+
+ status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
+ if (ACPI_FAILURE(status)) {
+- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
+- ACPI_CAST_PTR(void, address)));
++ ACPI_ERROR((AE_INFO,
++ "Could not acquire table length at %8.8X%8.8X",
++ ACPI_FORMAT_UINT64(address)));
+ return_ACPI_STATUS(status);
+ }
+
+@@ -258,9 +260,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
+ if (!reload &&
+ acpi_gbl_disable_ssdt_table_install &&
+ ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
+- ACPI_INFO((AE_INFO, "Ignoring installation of %4.4s at %p",
+- new_table_desc.signature.ascii, ACPI_CAST_PTR(void,
+- address)));
++ ACPI_INFO((AE_INFO,
++ "Ignoring installation of %4.4s at %8.8X%8.8X",
++ new_table_desc.signature.ascii,
++ ACPI_FORMAT_UINT64(address)));
+ goto release_and_exit;
+ }
+
+@@ -428,11 +431,11 @@ finish_override:
+ return;
+ }
+
+- ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
+- " %s table override, new table: " ACPI_PRINTF_UINT,
++ ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
++ " %s table override, new table: 0x%8.8X%8.8X",
+ old_table_desc->signature.ascii,
+- ACPI_FORMAT_TO_UINT(old_table_desc->address),
+- override_type, ACPI_FORMAT_TO_UINT(new_table_desc.address)));
++ ACPI_FORMAT_UINT64(old_table_desc->address),
++ override_type, ACPI_FORMAT_UINT64(new_table_desc.address)));
+
+ /* We can now uninstall the original table */
+
+@@ -516,7 +519,7 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
+
+ if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
+ ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
+- ACPI_FREE(ACPI_CAST_PTR(void, table_desc->address));
++ ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
+ }
+
+ table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
+diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
+index ef16c06e5091..77ba5c71c6e7 100644
+--- a/drivers/acpi/acpica/tbprint.c
++++ b/drivers/acpi/acpica/tbprint.c
+@@ -127,18 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
+ {
+ struct acpi_table_header local_header;
+
+- /*
+- * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
+- * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
+- * The %p specifier does not emit uniform output on all hosts. On some,
+- * leading zeros are not supported.
+- */
+ if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
+
+ /* FACS only has signature and length fields */
+
+- ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
+- header->signature, ACPI_FORMAT_TO_UINT(address),
++ ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
++ header->signature, ACPI_FORMAT_UINT64(address),
+ header->length));
+ } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
+
+@@ -149,9 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
+ header)->oem_id, ACPI_OEM_ID_SIZE);
+ acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
+
+- ACPI_INFO((AE_INFO,
+- "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
+- ACPI_FORMAT_TO_UINT(address),
++ ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
++ ACPI_FORMAT_UINT64(address),
+ (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
+ revision >
+ 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
+@@ -165,9 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
+ acpi_tb_cleanup_table_header(&local_header, header);
+
+ ACPI_INFO((AE_INFO,
+- "%-4.4s " ACPI_PRINTF_UINT
++ "%-4.4s 0x%8.8X%8.8X"
+ " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
+- local_header.signature, ACPI_FORMAT_TO_UINT(address),
++ local_header.signature, ACPI_FORMAT_UINT64(address),
+ local_header.length, local_header.revision,
+ local_header.oem_id, local_header.oem_table_id,
+ local_header.oem_revision,
+diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
+index eac52cf14f1a..fa76a3603aa1 100644
+--- a/drivers/acpi/acpica/tbxfroot.c
++++ b/drivers/acpi/acpica/tbxfroot.c
+@@ -142,7 +142,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
+ *
+ ******************************************************************************/
+
+-acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
++acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
+ {
+ u8 *table_ptr;
+ u8 *mem_rover;
+@@ -200,7 +200,8 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
+ physical_address +=
+ (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
+
+- *table_address = physical_address;
++ *table_address =
++ (acpi_physical_address) physical_address;
+ return_ACPI_STATUS(AE_OK);
+ }
+ }
+@@ -233,7 +234,7 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
+ (ACPI_HI_RSDP_WINDOW_BASE +
+ ACPI_PTR_DIFF(mem_rover, table_ptr));
+
+- *table_address = physical_address;
++ *table_address = (acpi_physical_address) physical_address;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
+index 1279f50da757..911ea8e7fe87 100644
+--- a/drivers/acpi/acpica/utaddress.c
++++ b/drivers/acpi/acpica/utaddress.c
+@@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
+ acpi_gbl_address_range_list[space_id] = range_info;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+- "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
++ "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
+ acpi_ut_get_node_name(range_info->region_node),
+- ACPI_CAST_PTR(void, address),
+- ACPI_CAST_PTR(void, range_info->end_address)));
++ ACPI_FORMAT_UINT64(address),
++ ACPI_FORMAT_UINT64(range_info->end_address)));
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+@@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+- "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
++ "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
+ acpi_ut_get_node_name(range_info->
+ region_node),
+- ACPI_CAST_PTR(void,
+- range_info->
+- start_address),
+- ACPI_CAST_PTR(void,
+- range_info->
+- end_address)));
++ ACPI_FORMAT_UINT64(range_info->
++ start_address),
++ ACPI_FORMAT_UINT64(range_info->
++ end_address)));
+
+ ACPI_FREE(range_info);
+ return_VOID;
+@@ -245,16 +243,14 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
+ region_node);
+
+ ACPI_WARNING((AE_INFO,
+- "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)",
++ "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)",
+ acpi_ut_get_region_name(space_id),
+- ACPI_CAST_PTR(void, address),
+- ACPI_CAST_PTR(void, end_address),
+- ACPI_CAST_PTR(void,
+- range_info->
+- start_address),
+- ACPI_CAST_PTR(void,
+- range_info->
+- end_address),
++ ACPI_FORMAT_UINT64(address),
++ ACPI_FORMAT_UINT64(end_address),
++ ACPI_FORMAT_UINT64(range_info->
++ start_address),
++ ACPI_FORMAT_UINT64(range_info->
++ end_address),
+ pathname));
+ ACPI_FREE(pathname);
+ }
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 5589a6e2a023..8244f013f210 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
+ * @ares: Input ACPI resource object.
+ * @types: Valid resource types of IORESOURCE_XXX
+ *
+- * This is a hepler function to support acpi_dev_get_resources(), which filters
++ * This is a helper function to support acpi_dev_get_resources(), which filters
+ * ACPI resource objects according to resource types.
+ */
+ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
+index 26e5b5060523..bf034f8b7c1a 100644
+--- a/drivers/acpi/sbshc.c
++++ b/drivers/acpi/sbshc.c
+@@ -14,6 +14,7 @@
+ #include <linux/delay.h>
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
++#include <linux/dmi.h>
+ #include "sbshc.h"
+
+ #define PREFIX "ACPI: "
+@@ -87,6 +88,8 @@ enum acpi_smb_offset {
+ ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
+ };
+
++static bool macbook;
++
+ static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
+ {
+ return ec_read(hc->offset + address, data);
+@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
+ }
+
+ mutex_lock(&hc->lock);
++ if (macbook)
++ udelay(5);
+ if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
+ goto end;
+ if (temp) {
+@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data);
+
++static int macbook_dmi_match(const struct dmi_system_id *d)
++{
++ pr_debug("Detected MacBook, enabling workaround\n");
++ macbook = true;
++ return 0;
++}
++
++static struct dmi_system_id acpi_smbus_dmi_table[] = {
++ { macbook_dmi_match, "Apple MacBook", {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
++ },
++ { },
++};
++
+ static int acpi_smbus_hc_add(struct acpi_device *device)
+ {
+ int status;
+ unsigned long long val;
+ struct acpi_smb_hc *hc;
+
++ dmi_check_system(acpi_smbus_dmi_table);
++
+ if (!device)
+ return -EINVAL;
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index d1f168b73634..773e964f14d9 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1672,8 +1672,8 @@ out:
+
+ static void loop_remove(struct loop_device *lo)
+ {
+- del_gendisk(lo->lo_disk);
+ blk_cleanup_queue(lo->lo_queue);
++ del_gendisk(lo->lo_disk);
+ blk_mq_free_tag_set(&lo->tag_set);
+ put_disk(lo->lo_disk);
+ kfree(lo);
+diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
+index 7722ed53bd65..af3bc7a8033b 100644
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -551,6 +551,7 @@ static struct class gpio_class = {
+ */
+ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+ {
++ struct gpio_chip *chip;
+ unsigned long flags;
+ int status;
+ const char *ioname = NULL;
+@@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+ return -EINVAL;
+ }
+
++ chip = desc->chip;
++
+ mutex_lock(&sysfs_lock);
+
++ /* check if chip is being removed */
++ if (!chip || !chip->exported) {
++ status = -ENODEV;
++ goto fail_unlock;
++ }
++
+ spin_lock_irqsave(&gpio_lock, flags);
+ if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
+ test_bit(FLAG_EXPORT, &desc->flags)) {
+@@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip)
+ {
+ int status;
+ struct device *dev;
++ struct gpio_desc *desc;
++ unsigned int i;
+
+ mutex_lock(&sysfs_lock);
+ dev = class_find_device(&gpio_class, NULL, chip, match_export);
+ if (dev) {
+ put_device(dev);
+ device_unregister(dev);
++ /* prevent further gpiod exports */
+ chip->exported = false;
+ status = 0;
+ } else
+@@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip)
+
+ if (status)
+ chip_dbg(chip, "%s: status %d\n", __func__, status);
++
++ /* unregister gpiod class devices owned by sysfs */
++ for (i = 0; i < chip->ngpio; i++) {
++ desc = &chip->desc[i];
++ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
++ gpiod_free(desc);
++ }
+ }
+
+ static int __init gpiolib_sysfs_init(void)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index d8135adb2238..39762a7d2ec7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -429,9 +429,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
+
+ BUG_ON(!dqm || !qpd);
+
+- BUG_ON(!list_empty(&qpd->queues_list));
++ pr_debug("In func %s\n", __func__);
+
+- pr_debug("kfd: In func %s\n", __func__);
++ pr_debug("qpd->queues_list is %s\n",
++ list_empty(&qpd->queues_list) ? "empty" : "not empty");
+
+ retval = 0;
+ mutex_lock(&dqm->lock);
+@@ -878,6 +879,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ return -ENOMEM;
+ }
+
++ init_sdma_vm(dqm, q, qpd);
++
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0)
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 10574a0c3a55..5769db4f51f3 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+- * reinitialize delayed at next vblank interrupt in that case.
++ * reinitialize delayed at next vblank interrupt in that case and
++ * assign 0 for now, to mark the vblanktimestamp as invalid.
+ */
+- if (rc) {
+- tslot = atomic_read(&vblank->count) + diff;
+- vblanktimestamp(dev, crtc, tslot) = t_vblank;
+- }
++ tslot = atomic_read(&vblank->count) + diff;
++ vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
+
+ smp_mb__before_atomic();
+ atomic_add(diff, &vblank->count);
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index a74aaf9242b9..88b36a9173c9 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1176,7 +1176,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
+
+ pipe_config->has_dp_encoder = true;
+ pipe_config->has_drrs = false;
+- pipe_config->has_audio = intel_dp->has_audio;
++ pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
+
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+@@ -2026,8 +2026,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
+ int dotclock;
+
+ tmp = I915_READ(intel_dp->output_reg);
+- if (tmp & DP_AUDIO_OUTPUT_ENABLE)
+- pipe_config->has_audio = true;
++
++ pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
+
+ if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
+ if (tmp & DP_SYNC_HS_HIGH)
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 071b96d6e146..fbc2a83795fa 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -812,12 +812,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+ static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
++ .ident = "Apple MacBook Pro 15\" (2010)",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
++ },
++ },
++ {
++ .callback = intel_dual_link_lvds_callback,
++ .ident = "Apple MacBook Pro 15\" (2011)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
++ {
++ .callback = intel_dual_link_lvds_callback,
++ .ident = "Apple MacBook Pro 15\" (2012)",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
++ },
++ },
+ { } /* terminating entry */
+ };
+
+@@ -847,6 +863,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+ if (i915.lvds_channel_mode > 0)
+ return i915.lvds_channel_mode == 2;
+
++ /* single channel LVDS is limited to 112 MHz */
++ if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
++ > 112999)
++ return true;
++
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+@@ -1104,6 +1125,8 @@ void intel_lvds_init(struct drm_device *dev)
+ out:
+ mutex_unlock(&dev->mode_config.mutex);
+
++ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
++
+ lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+ DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
+@@ -1118,7 +1141,6 @@ out:
+ }
+ drm_connector_register(connector);
+
+- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+ return;
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
+index c0ecd128b14b..7348f222684d 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -1180,7 +1180,7 @@ static struct radeon_asic rs780_asic = {
+ static struct radeon_asic_ring rv770_uvd_ring = {
+ .ib_execute = &uvd_v1_0_ib_execute,
+ .emit_fence = &uvd_v2_2_fence_emit,
+- .emit_semaphore = &uvd_v1_0_semaphore_emit,
++ .emit_semaphore = &uvd_v2_2_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &uvd_v1_0_ring_test,
+ .ib_test = &uvd_v1_0_ib_test,
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 72bdd3bf0d8e..c2fd3a5e6c55 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -919,6 +919,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+ int uvd_v2_2_resume(struct radeon_device *rdev);
+ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
++bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
++ struct radeon_ring *ring,
++ struct radeon_semaphore *semaphore,
++ bool emit_wait);
+
+ /* uvd v3.1 */
+ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index b7d33a13db9f..b7c6bb69f3c7 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -464,6 +464,10 @@ void radeon_audio_detect(struct drm_connector *connector,
+ return;
+
+ rdev = connector->encoder->dev->dev_private;
++
++ if (!radeon_audio_chipset_supported(rdev))
++ return;
++
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ dig = radeon_encoder->enc_priv;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index b292aca0f342..edafd3c2b170 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+ {
+ struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+- struct scatterlist *sg;
+- int i;
++ struct sg_page_iter sg_iter;
+
+ int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+ enum dma_data_direction direction = write ?
+@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+ /* free the sg table and pages again */
+ dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+
+- for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
+- struct page *page = sg_page(sg);
+-
++ for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
++ struct page *page = sg_page_iter_page(&sg_iter);
+ if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
+ set_page_dirty(page);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index c10b2aec6450..cd630287cf0a 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -396,6 +396,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+ return 0;
+ }
+
++static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
++ unsigned stream_type)
++{
++ switch (stream_type) {
++ case 0: /* H264 */
++ case 1: /* VC1 */
++ /* always supported */
++ return 0;
++
++ case 3: /* MPEG2 */
++ case 4: /* MPEG4 */
++ /* only since UVD 3 */
++ if (p->rdev->family >= CHIP_PALM)
++ return 0;
++
++ /* fall through */
++ default:
++ DRM_ERROR("UVD codec not supported by hardware %d!\n",
++ stream_type);
++ return -EINVAL;
++ }
++}
++
+ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+ unsigned offset, unsigned buf_sizes[])
+ {
+@@ -436,50 +459,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+ return -EINVAL;
+ }
+
+- if (msg_type == 1) {
+- /* it's a decode msg, calc buffer sizes */
+- r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+- /* calc image size (width * height) */
+- img_size = msg[6] * msg[7];
++ switch (msg_type) {
++ case 0:
++ /* it's a create msg, calc image size (width * height) */
++ img_size = msg[7] * msg[8];
++
++ r = radeon_uvd_validate_codec(p, msg[4]);
++ radeon_bo_kunmap(bo);
++ if (r)
++ return r;
++
++ /* try to alloc a new handle */
++ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
++ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
++ DRM_ERROR("Handle 0x%x already in use!\n", handle);
++ return -EINVAL;
++ }
++
++ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
++ p->rdev->uvd.filp[i] = p->filp;
++ p->rdev->uvd.img_size[i] = img_size;
++ return 0;
++ }
++ }
++
++ DRM_ERROR("No more free UVD handles!\n");
++ return -EINVAL;
++
++ case 1:
++ /* it's a decode msg, validate codec and calc buffer sizes */
++ r = radeon_uvd_validate_codec(p, msg[4]);
++ if (!r)
++ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ radeon_bo_kunmap(bo);
+ if (r)
+ return r;
+
+- } else if (msg_type == 2) {
++ /* validate the handle */
++ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
++ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
++ if (p->rdev->uvd.filp[i] != p->filp) {
++ DRM_ERROR("UVD handle collision detected!\n");
++ return -EINVAL;
++ }
++ return 0;
++ }
++ }
++
++ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
++ return -ENOENT;
++
++ case 2:
+ /* it's a destroy msg, free the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
+ radeon_bo_kunmap(bo);
+ return 0;
+- } else {
+- /* it's a create msg, calc image size (width * height) */
+- img_size = msg[7] * msg[8];
+- radeon_bo_kunmap(bo);
+
+- if (msg_type != 0) {
+- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+- return -EINVAL;
+- }
+-
+- /* it's a create msg, no special handling needed */
+- }
+-
+- /* create or decode, validate the handle */
+- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+- if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
+- return 0;
+- }
++ default:
+
+- /* handle not found try to alloc a new one */
+- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+- if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+- p->rdev->uvd.filp[i] = p->filp;
+- p->rdev->uvd.img_size[i] = img_size;
+- return 0;
+- }
++ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
++ return -EINVAL;
+ }
+
+- DRM_ERROR("No more free UVD handles!\n");
++ BUG();
+ return -EINVAL;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
+index 976fe432f4e2..7ed561225007 100644
+--- a/drivers/gpu/drm/radeon/radeon_vce.c
++++ b/drivers/gpu/drm/radeon/radeon_vce.c
+@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+ *
+ * @p: parser context
+ * @handle: handle to validate
++ * @allocated: allocated a new handle?
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+-int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
++static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
++ uint32_t handle, bool *allocated)
+ {
+ unsigned i;
+
++ *allocated = false;
++
+ /* validate the handle */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+- if (atomic_read(&p->rdev->vce.handles[i]) == handle)
++ if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
++ if (p->rdev->vce.filp[i] != p->filp) {
++ DRM_ERROR("VCE handle collision detected!\n");
++ return -EINVAL;
++ }
+ return i;
++ }
+ }
+
+ /* handle not found try to alloc a new one */
+@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+ if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+ p->rdev->vce.filp[i] = p->filp;
+ p->rdev->vce.img_size[i] = 0;
++ *allocated = true;
+ return i;
+ }
+ }
+@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+ {
+ int session_idx = -1;
+- bool destroyed = false;
++ bool destroyed = false, created = false, allocated = false;
+ uint32_t tmp, handle = 0;
+ uint32_t *size = &tmp;
+- int i, r;
++ int i, r = 0;
+
+ while (p->idx < p->chunk_ib->length_dw) {
+ uint32_t len = radeon_get_ib_value(p, p->idx);
+@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+
+ if ((len < 8) || (len & 3)) {
+ DRM_ERROR("invalid VCE command length (%d)!\n", len);
+- return -EINVAL;
++ r = -EINVAL;
++ goto out;
+ }
+
+ if (destroyed) {
+ DRM_ERROR("No other command allowed after destroy!\n");
+- return -EINVAL;
++ r = -EINVAL;
++ goto out;
+ }
+
+ switch (cmd) {
+ case 0x00000001: // session
+ handle = radeon_get_ib_value(p, p->idx + 2);
+- session_idx = radeon_vce_validate_handle(p, handle);
++ session_idx = radeon_vce_validate_handle(p, handle,
++ &allocated);
+ if (session_idx < 0)
+ return session_idx;
+ size = &p->rdev->vce.img_size[session_idx];
+@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+ break;
+
+ case 0x01000001: // create
++ created = true;
++ if (!allocated) {
++ DRM_ERROR("Handle already in use!\n");
++ r = -EINVAL;
++ goto out;
++ }
++
+ *size = radeon_get_ib_value(p, p->idx + 8) *
+ radeon_get_ib_value(p, p->idx + 10) *
+ 8 * 3 / 2;
+@@ -577,12 +597,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+ r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+ *size);
+ if (r)
+- return r;
++ goto out;
+
+ r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+ *size / 3);
+ if (r)
+- return r;
++ goto out;
+ break;
+
+ case 0x02000001: // destroy
+@@ -593,7 +613,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ *size * 2);
+ if (r)
+- return r;
++ goto out;
+ break;
+
+ case 0x05000004: // video bitstream buffer
+@@ -601,36 +621,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ tmp);
+ if (r)
+- return r;
++ goto out;
+ break;
+
+ case 0x05000005: // feedback buffer
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ 4096);
+ if (r)
+- return r;
++ goto out;
+ break;
+
+ default:
+ DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
+- return -EINVAL;
++ r = -EINVAL;
++ goto out;
+ }
+
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+- return -EINVAL;
++ r = -EINVAL;
++ goto out;
+ }
+
+ p->idx += len / 4;
+ }
+
+- if (destroyed) {
+- /* IB contains a destroy msg, free the handle */
++ if (allocated && !created) {
++ DRM_ERROR("New session without create command!\n");
++ r = -ENOENT;
++ }
++
++out:
++ if ((!r && destroyed) || (r && allocated)) {
++ /*
++ * IB contains a destroy msg or we have allocated an
++ * handle and got an error, anyway free the handle
++ */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
+ atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
+ }
+
+- return 0;
++ return r;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
+index 3cf1e2921545..9ef2064b1c9c 100644
+--- a/drivers/gpu/drm/radeon/rv770d.h
++++ b/drivers/gpu/drm/radeon/rv770d.h
+@@ -989,6 +989,9 @@
+ ((n) & 0x3FFF) << 16)
+
+ /* UVD */
++#define UVD_SEMA_ADDR_LOW 0xef00
++#define UVD_SEMA_ADDR_HIGH 0xef04
++#define UVD_SEMA_CMD 0xef08
+ #define UVD_GPCOM_VCPU_CMD 0xef0c
+ #define UVD_GPCOM_VCPU_DATA0 0xef10
+ #define UVD_GPCOM_VCPU_DATA1 0xef14
+diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
+index e72b3cb59358..c6b1cbca47fc 100644
+--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
++++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
+@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+ {
+- uint64_t addr = semaphore->gpu_addr;
+-
+- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+-
+- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+-
+- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+- radeon_ring_write(ring, emit_wait ? 1 : 0);
+-
+- return true;
++ /* disable semaphores for UVD V1 hardware */
++ return false;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
+index 89193519f8a1..7ed778cec7c6 100644
+--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
++++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
+@@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ }
+
+ /**
++ * uvd_v2_2_semaphore_emit - emit semaphore command
++ *
++ * @rdev: radeon_device pointer
++ * @ring: radeon_ring pointer
++ * @semaphore: semaphore to emit commands for
++ * @emit_wait: true if we should emit a wait command
++ *
++ * Emit a semaphore command (either wait or signal) to the UVD ring.
++ */
++bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
++ struct radeon_ring *ring,
++ struct radeon_semaphore *semaphore,
++ bool emit_wait)
++{
++ uint64_t addr = semaphore->gpu_addr;
++
++ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
++ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
++
++ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
++ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
++
++ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
++ radeon_ring_write(ring, emit_wait ? 1 : 0);
++
++ return true;
++}
++
++/**
+ * uvd_v2_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index d570030d899c..06441a43c3aa 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
+ memcpy(&ib->sib_addr, &path->dgid, 16);
+ }
+
++static __be16 ss_get_port(const struct sockaddr_storage *ss)
++{
++ if (ss->ss_family == AF_INET)
++ return ((struct sockaddr_in *)ss)->sin_port;
++ else if (ss->ss_family == AF_INET6)
++ return ((struct sockaddr_in6 *)ss)->sin6_port;
++ BUG();
++}
++
+ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+ struct cma_hdr *hdr)
+ {
+- struct sockaddr_in *listen4, *ip4;
++ struct sockaddr_in *ip4;
+
+- listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
+ ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
+- ip4->sin_family = listen4->sin_family;
++ ip4->sin_family = AF_INET;
+ ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
+- ip4->sin_port = listen4->sin_port;
++ ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
+
+ ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
+- ip4->sin_family = listen4->sin_family;
++ ip4->sin_family = AF_INET;
+ ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
+ ip4->sin_port = hdr->port;
+ }
+@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
+ static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+ struct cma_hdr *hdr)
+ {
+- struct sockaddr_in6 *listen6, *ip6;
++ struct sockaddr_in6 *ip6;
+
+- listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
+ ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
+- ip6->sin6_family = listen6->sin6_family;
++ ip6->sin6_family = AF_INET6;
+ ip6->sin6_addr = hdr->dst_addr.ip6;
+- ip6->sin6_port = listen6->sin6_port;
++ ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
+
+ ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
+- ip6->sin6_family = listen6->sin6_family;
++ ip6->sin6_family = AF_INET6;
+ ip6->sin6_addr = hdr->src_addr.ip6;
+ ip6->sin6_port = hdr->port;
+ }
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 414739295d04..713a96237a80 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc,
+
+ switch (r) {
+ /* async */
+- case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
++ /* fall through*/
++ case -EINPROGRESS:
+ ctx->req = NULL;
+ ctx->cc_sector++;
+ continue;
+@@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
+ struct crypt_config *cc = io->cc;
+
+- if (error == -EINPROGRESS)
++ if (error == -EINPROGRESS) {
++ complete(&ctx->restart);
+ return;
++ }
+
+ if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+@@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
+
+ if (!atomic_dec_and_test(&ctx->cc_pending))
+- goto done;
++ return;
+
+ if (bio_data_dir(io->base_bio) == READ)
+ kcryptd_crypt_read_done(io);
+ else
+ kcryptd_crypt_write_io_submit(io, 1);
+-done:
+- if (!completion_done(&ctx->restart))
+- complete(&ctx->restart);
+ }
+
+ static void kcryptd_crypt(struct work_struct *work)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e6178787ce3d..e47d1dd046da 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4754,12 +4754,12 @@ static void md_free(struct kobject *ko)
+ if (mddev->sysfs_state)
+ sysfs_put(mddev->sysfs_state);
+
++ if (mddev->queue)
++ blk_cleanup_queue(mddev->queue);
+ if (mddev->gendisk) {
+ del_gendisk(mddev->gendisk);
+ put_disk(mddev->gendisk);
+ }
+- if (mddev->queue)
+- blk_cleanup_queue(mddev->queue);
+
+ kfree(mddev);
+ }
+diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
+index dd5b1415f974..f902eb4ee569 100644
+--- a/drivers/media/platform/marvell-ccic/mcam-core.c
++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
+@@ -116,8 +116,8 @@ static struct mcam_format_struct {
+ .planar = false,
+ },
+ {
+- .desc = "UYVY 4:2:2",
+- .pixelformat = V4L2_PIX_FMT_UYVY,
++ .desc = "YVYU 4:2:2",
++ .pixelformat = V4L2_PIX_FMT_YVYU,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = false,
+@@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+- case V4L2_PIX_FMT_UYVY:
++ case V4L2_PIX_FMT_YVYU:
+ widthy = fmt->width * 2;
+ widthuv = 0;
+ break;
+@@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+- C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
++ C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
++ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
+ break;
+- case V4L2_PIX_FMT_UYVY:
++ case V4L2_PIX_FMT_YVYU:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
++ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
+index aa0c6eac254a..7ffdf4dbaf8c 100644
+--- a/drivers/media/platform/marvell-ccic/mcam-core.h
++++ b/drivers/media/platform/marvell-ccic/mcam-core.h
+@@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam);
+ #define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
+ #define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
+ #define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
+-#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
+-#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
+-#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
+-#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
++#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */
++#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */
++#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */
++#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */
+ /* Bayer bits 18,19 if needed */
+ #define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
+ #define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index c69afb5e264e..ed2e71a74a58 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+ md->reset_done &= ~type;
+ }
+
++int mmc_access_rpmb(struct mmc_queue *mq)
++{
++ struct mmc_blk_data *md = mq->data;
++ /*
++ * If this is a RPMB partition access, return ture
++ */
++ if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
++ return true;
++
++ return false;
++}
++
+ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+ {
+ struct mmc_blk_data *md = mq->data;
+diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
+index 236d194c2883..8efa3684aef8 100644
+--- a/drivers/mmc/card/queue.c
++++ b/drivers/mmc/card/queue.c
+@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
+ return BLKPREP_KILL;
+ }
+
+- if (mq && mmc_card_removed(mq->card))
++ if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
+ return BLKPREP_KILL;
+
+ req->cmd_flags |= REQ_DONTPREP;
+diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
+index 5752d50049a3..99e6521e6169 100644
+--- a/drivers/mmc/card/queue.h
++++ b/drivers/mmc/card/queue.h
+@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
+ extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
+ extern void mmc_packed_clean(struct mmc_queue *);
+
++extern int mmc_access_rpmb(struct mmc_queue *);
++
+ #endif
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 23f10f72e5f3..57a8d00672d3 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -2648,6 +2648,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
++ case PM_RESTORE_PREPARE:
+ spin_lock_irqsave(&host->lock, flags);
+ host->rescan_disable = 1;
+ spin_unlock_irqrestore(&host->lock, flags);
+diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
+index 7d9d6a321521..5165ae75d540 100644
+--- a/drivers/mmc/host/sh_mmcif.c
++++ b/drivers/mmc/host/sh_mmcif.c
+@@ -1402,7 +1402,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->addr = reg;
+- host->timeout = msecs_to_jiffies(1000);
++ host->timeout = msecs_to_jiffies(10000);
+ host->ccs_enable = !pd || !pd->ccs_unsupported;
+ host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
+
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 89dca77ca038..18ee2089df4a 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
+ EXPORT_SYMBOL_GPL(devm_pinctrl_put);
+
+ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+- bool dup, bool locked)
++ bool dup)
+ {
+ int i, ret;
+ struct pinctrl_maps *maps_node;
+@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+ maps_node->maps = maps;
+ }
+
+- if (!locked)
+- mutex_lock(&pinctrl_maps_mutex);
++ mutex_lock(&pinctrl_maps_mutex);
+ list_add_tail(&maps_node->node, &pinctrl_maps);
+- if (!locked)
+- mutex_unlock(&pinctrl_maps_mutex);
++ mutex_unlock(&pinctrl_maps_mutex);
+
+ return 0;
+ }
+@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+ int pinctrl_register_mappings(struct pinctrl_map const *maps,
+ unsigned num_maps)
+ {
+- return pinctrl_register_map(maps, num_maps, true, false);
++ return pinctrl_register_map(maps, num_maps, true);
+ }
+
+ void pinctrl_unregister_map(struct pinctrl_map const *map)
+diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
+index 75476b3d87da..b24ea846c867 100644
+--- a/drivers/pinctrl/core.h
++++ b/drivers/pinctrl/core.h
+@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
+ }
+
+ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+- bool dup, bool locked);
++ bool dup);
+ void pinctrl_unregister_map(struct pinctrl_map const *map);
+
+ extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index eda13de2e7c0..0bbf7d71b281 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
+ dt_map->num_maps = num_maps;
+ list_add_tail(&dt_map->node, &p->dt_maps);
+
+- return pinctrl_register_map(map, num_maps, false, true);
++ return pinctrl_register_map(map, num_maps, false);
+ }
+
+ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
+diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
+index 43e04af39e09..cb70ced7e0db 100644
+--- a/drivers/rtc/rtc-armada38x.c
++++ b/drivers/rtc/rtc-armada38x.c
+@@ -40,6 +40,13 @@ struct armada38x_rtc {
+ void __iomem *regs;
+ void __iomem *regs_soc;
+ spinlock_t lock;
++ /*
++ * While setting the time, the RTC TIME register should not be
++ * accessed. Setting the RTC time involves sleeping during
++ * 100ms, so a mutex instead of a spinlock is used to protect
++ * it
++ */
++ struct mutex mutex_time;
+ int irq;
+ };
+
+@@ -59,8 +66,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long time, time_check, flags;
+
+- spin_lock_irqsave(&rtc->lock, flags);
+-
++ mutex_lock(&rtc->mutex_time);
+ time = readl(rtc->regs + RTC_TIME);
+ /*
+ * WA for failing time set attempts. As stated in HW ERRATA if
+@@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ if ((time_check - time) > 1)
+ time_check = readl(rtc->regs + RTC_TIME);
+
+- spin_unlock_irqrestore(&rtc->lock, flags);
++ mutex_unlock(&rtc->mutex_time);
+
+ rtc_time_to_tm(time_check, tm);
+
+@@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ * then wait for 100ms before writing to the time register to be
+ * sure that the data will be taken into account.
+ */
+- spin_lock_irqsave(&rtc->lock, flags);
+-
++ mutex_lock(&rtc->mutex_time);
+ rtc_delayed_write(0, rtc, RTC_STATUS);
+-
+- spin_unlock_irqrestore(&rtc->lock, flags);
+-
+ msleep(100);
+-
+- spin_lock_irqsave(&rtc->lock, flags);
+-
+ rtc_delayed_write(time, rtc, RTC_TIME);
++ mutex_unlock(&rtc->mutex_time);
+
+- spin_unlock_irqrestore(&rtc->lock, flags);
+ out:
+ return ret;
+ }
+@@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ spin_lock_init(&rtc->lock);
++ mutex_init(&rtc->mutex_time);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
+ rtc->regs = devm_ioremap_resource(&pdev->dev, res);
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index f1e57425e39f..5bab1c684bb1 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
+ return 0;
+ }
+
++static void xen_console_update_evtchn(struct xencons_info *info)
++{
++ if (xen_hvm_domain()) {
++ uint64_t v;
++ int err;
++
++ err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
++ if (!err && v)
++ info->evtchn = v;
++ } else
++ info->evtchn = xen_start_info->console.domU.evtchn;
++}
++
+ void xen_console_resume(void)
+ {
+ struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
+- if (info != NULL && info->irq)
++ if (info != NULL && info->irq) {
++ if (!xen_initial_domain())
++ xen_console_update_evtchn(info);
+ rebind_evtchn_irq(info->evtchn, info->irq);
++ }
+ }
+
+ static void xencons_disconnect_backend(struct xencons_info *info)
+diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
+index 4cde85501444..837d1778970b 100644
+--- a/drivers/vfio/vfio.c
++++ b/drivers/vfio/vfio.c
+@@ -711,6 +711,8 @@ void *vfio_del_group_dev(struct device *dev)
+ void *device_data = device->device_data;
+ struct vfio_unbound_dev *unbound;
+ unsigned int i = 0;
++ long ret;
++ bool interrupted = false;
+
+ /*
+ * The group exists so long as we have a device reference. Get
+@@ -756,9 +758,22 @@ void *vfio_del_group_dev(struct device *dev)
+
+ vfio_device_put(device);
+
+- } while (wait_event_interruptible_timeout(vfio.release_q,
+- !vfio_dev_present(group, dev),
+- HZ * 10) <= 0);
++ if (interrupted) {
++ ret = wait_event_timeout(vfio.release_q,
++ !vfio_dev_present(group, dev), HZ * 10);
++ } else {
++ ret = wait_event_interruptible_timeout(vfio.release_q,
++ !vfio_dev_present(group, dev), HZ * 10);
++ if (ret == -ERESTARTSYS) {
++ interrupted = true;
++ dev_warn(dev,
++ "Device is currently in use, task"
++ " \"%s\" (%d) "
++ "blocked until device is released",
++ current->comm, task_pid_nr(current));
++ }
++ }
++ } while (ret <= 0);
+
+ vfio_group_put(group);
+
+diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
+index 5db43fc100a4..7dd46312c180 100644
+--- a/drivers/xen/events/events_2l.c
++++ b/drivers/xen/events/events_2l.c
+@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
+ return IRQ_HANDLED;
+ }
+
++static void evtchn_2l_resume(void)
++{
++ int i;
++
++ for_each_online_cpu(i)
++ memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
++ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
++}
++
+ static const struct evtchn_ops evtchn_ops_2l = {
+ .max_channels = evtchn_2l_max_channels,
+ .nr_channels = evtchn_2l_max_channels,
+@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
+ .mask = evtchn_2l_mask,
+ .unmask = evtchn_2l_unmask,
+ .handle_events = evtchn_2l_handle_events,
++ .resume = evtchn_2l_resume,
+ };
+
+ void __init xen_evtchn_2l_init(void)
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 70fba973a107..2b8553bd8715 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq)
+ if (rc)
+ goto err;
+
+- bind_evtchn_to_cpu(evtchn, 0);
+ info->evtchn = evtchn;
++ bind_evtchn_to_cpu(evtchn, 0);
+
+ rc = xen_evtchn_port_setup(info);
+ if (rc)
+@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
+
+ mutex_unlock(&irq_mapping_update_lock);
+
+- /* new event channels are always bound to cpu 0 */
+- irq_set_affinity(irq, cpumask_of(0));
++ bind_evtchn_to_cpu(evtchn, info->cpu);
++ /* This will be deferred until interrupt is processed */
++ irq_set_affinity(irq, cpumask_of(info->cpu));
+
+ /* Unmask the event channel. */
+ enable_irq(irq);
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 75fe3d466515..9c234209d8b5 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -16,8 +16,8 @@
+ #include "conf_space.h"
+ #include "conf_space_quirks.h"
+
+-bool permissive;
+-module_param(permissive, bool, 0644);
++bool xen_pcibk_permissive;
++module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
+
+ /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
+ * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
+@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
+ * This means that some fields may still be read-only because
+ * they have entries in the config_field list that intercept
+ * the write and do nothing. */
+- if (dev_data->permissive || permissive) {
++ if (dev_data->permissive || xen_pcibk_permissive) {
+ switch (size) {
+ case 1:
+ err = pci_write_config_byte(dev, offset,
+diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
+index 2e1d73d1d5d0..62461a8ba1d6 100644
+--- a/drivers/xen/xen-pciback/conf_space.h
++++ b/drivers/xen/xen-pciback/conf_space.h
+@@ -64,7 +64,7 @@ struct config_field_entry {
+ void *data;
+ };
+
+-extern bool permissive;
++extern bool xen_pcibk_permissive;
+
+ #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
+
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index 2d7369391472..f8baf463dd35 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -105,7 +105,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+
+ cmd->val = value;
+
+- if (!permissive && (!dev_data || !dev_data->permissive))
++ if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
+ return 0;
+
+ /* Only allow the guest to control certain bits. */
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 564b31584860..5390a674b5e3 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -57,6 +57,7 @@
+ #include <xen/xen.h>
+ #include <xen/xenbus.h>
+ #include <xen/events.h>
++#include <xen/xen-ops.h>
+ #include <xen/page.h>
+
+ #include <xen/hvm.h>
+@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void)
+ return err;
+ }
+
++static int xenbus_resume_cb(struct notifier_block *nb,
++ unsigned long action, void *data)
++{
++ int err = 0;
++
++ if (xen_hvm_domain()) {
++ uint64_t v;
++
++ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
++ if (!err && v)
++ xen_store_evtchn = v;
++ else
++ pr_warn("Cannot update xenstore event channel: %d\n",
++ err);
++ } else
++ xen_store_evtchn = xen_start_info->store_evtchn;
++
++ return err;
++}
++
++static struct notifier_block xenbus_resume_nb = {
++ .notifier_call = xenbus_resume_cb,
++};
++
+ static int __init xenbus_init(void)
+ {
+ int err = 0;
+@@ -793,6 +818,10 @@ static int __init xenbus_init(void)
+ goto out_error;
+ }
+
++ if ((xen_store_domain_type != XS_LOCAL) &&
++ (xen_store_domain_type != XS_UNKNOWN))
++ xen_resume_notifier_register(&xenbus_resume_nb);
++
+ #ifdef CONFIG_XEN_COMPAT_XENFS
+ /*
+ * Create xenfs mountpoint in /proc for compatibility with
+diff --git a/fs/coredump.c b/fs/coredump.c
+index f319926ddf8c..bbbe139ab280 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -657,7 +657,7 @@ void do_coredump(const siginfo_t *siginfo)
+ */
+ if (!uid_eq(inode->i_uid, current_fsuid()))
+ goto close_fail;
+- if (!cprm.file->f_op->write)
++ if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
+ goto close_fail;
+ if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
+ goto close_fail;
+diff --git a/fs/namei.c b/fs/namei.c
+index caa38a24e1f7..50a8583e8156 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3228,7 +3228,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+
+ if (unlikely(file->f_flags & __O_TMPFILE)) {
+ error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
+- goto out;
++ goto out2;
+ }
+
+ error = path_init(dfd, pathname->name, flags, nd);
+@@ -3258,6 +3258,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+ }
+ out:
+ path_cleanup(nd);
++out2:
+ if (!(opened & FILE_OPENED)) {
+ BUG_ON(!error);
+ put_filp(file);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 4622ee32a5e2..38ed1e1bed41 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -3178,6 +3178,12 @@ bool fs_fully_visible(struct file_system_type *type)
+ if (mnt->mnt.mnt_sb->s_type != type)
+ continue;
+
++ /* This mount is not fully visible if it's root directory
++ * is not the root directory of the filesystem.
++ */
++ if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
++ continue;
++
+ /* This mount is not fully visible if there are any child mounts
+ * that cover anything except for empty directories.
+ */
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index ecdbae19a766..090d8ce25bd1 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
+ nchildren = nilfs_btree_node_get_nchildren(node);
+
+ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+- level > NILFS_BTREE_LEVEL_MAX ||
++ level >= NILFS_BTREE_LEVEL_MAX ||
+ nchildren < 0 ||
+ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
+ pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index a6944b25fd5b..fdf4b41d0609 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -757,6 +757,19 @@ lookup:
+ if (tmpres) {
+ spin_unlock(&dlm->spinlock);
+ spin_lock(&tmpres->spinlock);
++
++ /*
++ * Right after dlm spinlock was released, dlm_thread could have
++ * purged the lockres. Check if lockres got unhashed. If so
++ * start over.
++ */
++ if (hlist_unhashed(&tmpres->hash_node)) {
++ spin_unlock(&tmpres->spinlock);
++ dlm_lockres_put(tmpres);
++ tmpres = NULL;
++ goto lookup;
++ }
++
+ /* Wait on the thread that is mastering the resource */
+ if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ __dlm_wait_on_lockres(tmpres);
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index d56f5d722138..65aa4fa0ae4e 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -431,13 +431,13 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
+ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
+
+ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
+- acpi_find_root_pointer(acpi_size * rsdp_address))
+-
++ acpi_find_root_pointer(acpi_physical_address *
++ rsdp_address))
+ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+- acpi_get_table_header(acpi_string signature,
+- u32 instance,
+- struct acpi_table_header
+- *out_table_header))
++ acpi_get_table_header(acpi_string signature,
++ u32 instance,
++ struct acpi_table_header
++ *out_table_header))
+ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_get_table(acpi_string signature, u32 instance,
+ struct acpi_table_header
+diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
+index ff3fea3194c6..9abb763e4b86 100644
+--- a/include/linux/nilfs2_fs.h
++++ b/include/linux/nilfs2_fs.h
+@@ -460,7 +460,7 @@ struct nilfs_btree_node {
+ /* level */
+ #define NILFS_BTREE_LEVEL_DATA 0
+ #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
+-#define NILFS_BTREE_LEVEL_MAX 14
++#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
+
+ /**
+ * struct nilfs_palloc_group_desc - block group descriptor
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index d487f8dc6d39..72a5224c8084 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1141,10 +1141,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ * The check (unnecessarily) ignores LRU pages being isolated and
+ * walked by the page reclaim code, however that's not a big loss.
+ */
+- if (!PageHuge(p) && !PageTransTail(p)) {
+- if (!PageLRU(p))
+- shake_page(p, 0);
+- if (!PageLRU(p)) {
++ if (!PageHuge(p)) {
++ if (!PageLRU(hpage))
++ shake_page(hpage, 0);
++ if (!PageLRU(hpage)) {
+ /*
+ * shake_page could have turned it free.
+ */
+@@ -1721,12 +1721,12 @@ int soft_offline_page(struct page *page, int flags)
+ } else if (ret == 0) { /* for free pages */
+ if (PageHuge(page)) {
+ set_page_hwpoison_huge_page(hpage);
+- dequeue_hwpoisoned_huge_page(hpage);
+- atomic_long_add(1 << compound_order(hpage),
++ if (!dequeue_hwpoisoned_huge_page(hpage))
++ atomic_long_add(1 << compound_order(hpage),
+ &num_poisoned_pages);
+ } else {
+- SetPageHWPoison(page);
+- atomic_long_inc(&num_poisoned_pages);
++ if (!TestSetPageHWPoison(page))
++ atomic_long_inc(&num_poisoned_pages);
+ }
+ }
+ unset_migratetype_isolate(page, MIGRATE_MOVABLE);
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 644bcb665773..ad05f2f7bb65 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
+ long x;
+
+ x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+- limit - setpoint + 1);
++ (limit - setpoint) | 1);
+ pos_ratio = x;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+ * scale global setpoint to bdi's:
+ * bdi_setpoint = setpoint * bdi_thresh / thresh
+ */
+- x = div_u64((u64)bdi_thresh << 16, thresh + 1);
++ x = div_u64((u64)bdi_thresh << 16, thresh | 1);
+ bdi_setpoint = setpoint * (u64)x >> 16;
+ /*
+ * Use span=(8*write_bw) in single bdi case as indicated by
+@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+
+ if (bdi_dirty < x_intercept - span / 4) {
+ pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
+- x_intercept - bdi_setpoint + 1);
++ (x_intercept - bdi_setpoint) | 1);
+ } else
+ pos_ratio /= 4;
+
+diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
+index c0eea1dfe90f..f19da4b47c1d 100644
+--- a/sound/oss/sequencer.c
++++ b/sound/oss/sequencer.c
+@@ -681,13 +681,8 @@ static int seq_timing_event(unsigned char *event_rec)
+ break;
+
+ case TMR_ECHO:
+- if (seq_mode == SEQ_2)
+- seq_copy_to_input(event_rec, 8);
+- else
+- {
+- parm = (parm << 8 | SEQ_ECHO);
+- seq_copy_to_input((unsigned char *) &parm, 4);
+- }
++ parm = (parm << 8 | SEQ_ECHO);
++ seq_copy_to_input((unsigned char *) &parm, 4);
+ break;
+
+ default:;
+@@ -1324,7 +1319,6 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
+ int mode = translate_mode(file);
+ struct synth_info inf;
+ struct seq_event_rec event_rec;
+- unsigned long flags;
+ int __user *p = arg;
+
+ orig_dev = dev = dev >> 4;
+@@ -1479,9 +1473,7 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
+ case SNDCTL_SEQ_OUTOFBAND:
+ if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
+ return -EFAULT;
+- spin_lock_irqsave(&lock,flags);
+ play_event(event_rec.arr);
+- spin_unlock_irqrestore(&lock,flags);
+ return 0;
+
+ case SNDCTL_MIDI_INFO:
diff --git a/1004_linux-4.0.5.patch b/1004_linux-4.0.5.patch
new file mode 100644
index 0000000..84509c0
--- /dev/null
+++ b/1004_linux-4.0.5.patch
@@ -0,0 +1,4937 @@
+diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401
+index 8eb88e974055..711f75e189eb 100644
+--- a/Documentation/hwmon/tmp401
++++ b/Documentation/hwmon/tmp401
+@@ -20,7 +20,7 @@ Supported chips:
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
+ * Texas Instruments TMP435
+ Prefix: 'tmp435'
+- Addresses scanned: I2C 0x37, 0x48 - 0x4f
++ Addresses scanned: I2C 0x48 - 0x4f
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
+
+ Authors:
+diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
+index 1e52d67d0abf..dbe6623fed1c 100644
+--- a/Documentation/serial/tty.txt
++++ b/Documentation/serial/tty.txt
+@@ -198,6 +198,9 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
+
+ TTY_OTHER_CLOSED Device is a pty and the other side has closed.
+
++TTY_OTHER_DONE Device is a pty and the other side has closed and
++ all pending input processing has been completed.
++
+ TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
+ smaller chunks.
+
+diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
+index 53838d9c6295..c59bd9bc41ef 100644
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -169,6 +169,10 @@ Shadow pages contain the following information:
+ Contains the value of cr4.smep && !cr0.wp for which the page is valid
+ (pages for which this is true are different from other pages; see the
+ treatment of cr0.wp=0 below).
++ role.smap_andnot_wp:
++ Contains the value of cr4.smap && !cr0.wp for which the page is valid
++ (pages for which this is true are different from other pages; see the
++ treatment of cr0.wp=0 below).
+ gfn:
+ Either the guest page table containing the translations shadowed by this
+ page, or the base page frame for linear translations. See role.direct.
+@@ -344,10 +348,16 @@ on fault type:
+
+ (user write faults generate a #PF)
+
+-In the first case there is an additional complication if CR4.SMEP is
+-enabled: since we've turned the page into a kernel page, the kernel may now
+-execute it. We handle this by also setting spte.nx. If we get a user
+-fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
++In the first case there are two additional complications:
++- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
++ the kernel may now execute it. We handle this by also setting spte.nx.
++ If we get a user fetch or read fault, we'll change spte.u=1 and
++ spte.nx=gpte.nx back.
++- if CR4.SMAP is disabled: since the page has been changed to a kernel
++ page, it can not be reused when CR4.SMAP is enabled. We set
++ CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
++ here we do not care the case that CR4.SMAP is enabled since KVM will
++ directly inject #PF to guest due to failed permission check.
+
+ To prevent an spte that was converted into a kernel page with cr0.wp=0
+ from being written by the kernel after cr0.wp has changed to 1, we make
+diff --git a/Makefile b/Makefile
+index 3d16bcc87585..1880cf77059b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 0
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma sheep
+
+diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
+index 067551b6920a..9917a45fc430 100644
+--- a/arch/arc/include/asm/atomic.h
++++ b/arch/arc/include/asm/atomic.h
+@@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
+ atomic_ops_unlock(flags); \
+ }
+
+-#define ATOMIC_OP_RETURN(op, c_op) \
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ static inline int atomic_##op##_return(int i, atomic_t *v) \
+ { \
+ unsigned long flags; \
+diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
+index a1c776b8dcec..992ea0b063d5 100644
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -215,7 +215,7 @@ dtb-$(CONFIG_SOC_IMX25) += \
+ imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \
+ imx25-karo-tx25.dtb \
+ imx25-pdk.dtb
+-dtb-$(CONFIG_SOC_IMX31) += \
++dtb-$(CONFIG_SOC_IMX27) += \
+ imx27-apf27.dtb \
+ imx27-apf27dev.dtb \
+ imx27-eukrea-mbimxsd27-baseboard.dtb \
+diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
+index 173ffa479ad3..792394dd0f2a 100644
+--- a/arch/arm/boot/dts/exynos4412-trats2.dts
++++ b/arch/arm/boot/dts/exynos4412-trats2.dts
+@@ -736,7 +736,7 @@
+
+ display-timings {
+ timing-0 {
+- clock-frequency = <0>;
++ clock-frequency = <57153600>;
+ hactive = <720>;
+ vactive = <1280>;
+ hfront-porch = <5>;
+diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
+index 4b063b68db44..9ce1d2128749 100644
+--- a/arch/arm/boot/dts/imx27.dtsi
++++ b/arch/arm/boot/dts/imx27.dtsi
+@@ -531,7 +531,7 @@
+
+ fec: ethernet@1002b000 {
+ compatible = "fsl,imx27-fec";
+- reg = <0x1002b000 0x4000>;
++ reg = <0x1002b000 0x1000>;
+ interrupts = <50>;
+ clocks = <&clks IMX27_CLK_FEC_IPG_GATE>,
+ <&clks IMX27_CLK_FEC_AHB_GATE>;
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index f8ccc21fa032..4e7f40c577e6 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -33,7 +33,9 @@ ret_fast_syscall:
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind )
+ disable_irq @ disable interrupts
+- ldr r1, [tsk, #TI_FLAGS]
++ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
++ tst r1, #_TIF_SYSCALL_WORK
++ bne __sys_trace_return
+ tst r1, #_TIF_WORK_MASK
+ bne fast_work_pending
+ asm_trace_hardirqs_on
+diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
+index 37266a826437..1f02bcb350e5 100644
+--- a/arch/arm/mach-exynos/pm_domains.c
++++ b/arch/arm/mach-exynos/pm_domains.c
+@@ -169,7 +169,7 @@ no_clk:
+ args.np = np;
+ args.args_count = 0;
+ child_domain = of_genpd_get_from_provider(&args);
+- if (!child_domain)
++ if (IS_ERR(child_domain))
+ continue;
+
+ if (of_parse_phandle_with_args(np, "power-domains",
+@@ -177,7 +177,7 @@ no_clk:
+ continue;
+
+ parent_domain = of_genpd_get_from_provider(&args);
+- if (!parent_domain)
++ if (IS_ERR(parent_domain))
+ continue;
+
+ if (pm_genpd_add_subdomain(parent_domain, child_domain))
+diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
+index 31d25834b9c4..cf950790fbdc 100644
+--- a/arch/arm/mach-exynos/sleep.S
++++ b/arch/arm/mach-exynos/sleep.S
+@@ -23,14 +23,7 @@
+ #define CPU_MASK 0xff0ffff0
+ #define CPU_CORTEX_A9 0x410fc090
+
+- /*
+- * The following code is located into the .data section. This is to
+- * allow l2x0_regs_phys to be accessed with a relative load while we
+- * can't rely on any MMU translation. We could have put l2x0_regs_phys
+- * in the .text section as well, but some setups might insist on it to
+- * be truly read-only. (Reference from: arch/arm/kernel/sleep.S)
+- */
+- .data
++ .text
+ .align
+
+ /*
+@@ -69,10 +62,12 @@ ENTRY(exynos_cpu_resume_ns)
+ cmp r0, r1
+ bne skip_cp15
+
+- adr r0, cp15_save_power
++ adr r0, _cp15_save_power
+ ldr r1, [r0]
+- adr r0, cp15_save_diag
++ ldr r1, [r0, r1]
++ adr r0, _cp15_save_diag
+ ldr r2, [r0]
++ ldr r2, [r0, r2]
+ mov r0, #SMC_CMD_C15RESUME
+ dsb
+ smc #0
+@@ -118,14 +113,20 @@ skip_l2x0:
+ skip_cp15:
+ b cpu_resume
+ ENDPROC(exynos_cpu_resume_ns)
++
++ .align
++_cp15_save_power:
++ .long cp15_save_power - .
++_cp15_save_diag:
++ .long cp15_save_diag - .
++#ifdef CONFIG_CACHE_L2X0
++1: .long l2x0_saved_regs - .
++#endif /* CONFIG_CACHE_L2X0 */
++
++ .data
+ .globl cp15_save_diag
+ cp15_save_diag:
+ .long 0 @ cp15 diagnostic
+ .globl cp15_save_power
+ cp15_save_power:
+ .long 0 @ cp15 power control
+-
+-#ifdef CONFIG_CACHE_L2X0
+- .align
+-1: .long l2x0_saved_regs - .
+-#endif /* CONFIG_CACHE_L2X0 */
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 4e6ef896c619..7186382672b5 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
+ }
+
+ /*
+- * Find the first non-section-aligned page, and point
++ * Find the first non-pmd-aligned page, and point
+ * memblock_limit at it. This relies on rounding the
+- * limit down to be section-aligned, which happens at
+- * the end of this function.
++ * limit down to be pmd-aligned, which happens at the
++ * end of this function.
+ *
+ * With this algorithm, the start or end of almost any
+- * bank can be non-section-aligned. The only exception
+- * is that the start of the bank 0 must be section-
++ * bank can be non-pmd-aligned. The only exception is
++ * that the start of the bank 0 must be section-
+ * aligned, since otherwise memory would need to be
+ * allocated when mapping the start of bank 0, which
+ * occurs before any free memory is mapped.
+ */
+ if (!memblock_limit) {
+- if (!IS_ALIGNED(block_start, SECTION_SIZE))
++ if (!IS_ALIGNED(block_start, PMD_SIZE))
+ memblock_limit = block_start;
+- else if (!IS_ALIGNED(block_end, SECTION_SIZE))
++ else if (!IS_ALIGNED(block_end, PMD_SIZE))
+ memblock_limit = arm_lowmem_limit;
+ }
+
+@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+
+ /*
+- * Round the memblock limit down to a section size. This
++ * Round the memblock limit down to a pmd size. This
+ * helps to ensure that we will allocate memory from the
+- * last full section, which should be mapped.
++ * last full pmd, which should be mapped.
+ */
+ if (memblock_limit)
+- memblock_limit = round_down(memblock_limit, SECTION_SIZE);
++ memblock_limit = round_down(memblock_limit, PMD_SIZE);
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index edba042b2325..dc6a4842683a 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -487,7 +487,7 @@ emit_cond_jmp:
+ return -EINVAL;
+ }
+
+- imm64 = (u64)insn1.imm << 32 | imm;
++ imm64 = (u64)insn1.imm << 32 | (u32)imm;
+ emit_a64_mov_i64(dst, imm64, ctx);
+
+ return 1;
+diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
+index d2c09f6475c5..f20cedcb50f1 100644
+--- a/arch/mips/kernel/elf.c
++++ b/arch/mips/kernel/elf.c
+@@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+
+ /* Lets see if this is an O32 ELF */
+ if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
+- /* FR = 1 for N32 */
+- if (ehdr32->e_flags & EF_MIPS_ABI2)
+- state->overall_fp_mode = FP_FR1;
+- else
+- /* Set a good default FPU mode for O32 */
+- state->overall_fp_mode = cpu_has_mips_r6 ?
+- FP_FRE : FP_FR0;
+-
+ if (ehdr32->e_flags & EF_MIPS_FP64) {
+ /*
+ * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
+@@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+ (char *)&abiflags,
+ sizeof(abiflags));
+ } else {
+- /* FR=1 is really the only option for 64-bit */
+- state->overall_fp_mode = FP_FR1;
+-
+ if (phdr64->p_type != PT_MIPS_ABIFLAGS)
+ return 0;
+ if (phdr64->p_filesz < sizeof(abiflags))
+@@ -147,6 +136,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
+ struct elf32_hdr *ehdr = _ehdr;
+ struct mode_req prog_req, interp_req;
+ int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
++ bool is_mips64;
+
+ if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return 0;
+@@ -162,10 +152,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
+ abi0 = abi1 = fp_abi;
+ }
+
+- /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
+- max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+- (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
+- MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
++ is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
++ (ehdr->e_flags & EF_MIPS_ABI2);
++
++ if (is_mips64) {
++ /* MIPS64 code always uses FR=1, thus the default is easy */
++ state->overall_fp_mode = FP_FR1;
++
++ /* Disallow access to the various FPXX & FP64 ABIs */
++ max_abi = MIPS_ABI_FP_SOFT;
++ } else {
++ /* Default to a mode capable of running code expecting FR=0 */
++ state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
++
++ /* Allow all ABIs we know about */
++ max_abi = MIPS_ABI_FP_64A;
++ }
+
+ if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
+ (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 3391d061eccc..78c9fd32c554 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_HWCAP 0
+
++#define STACK_RND_MASK (is_32bit_task() ? \
++ 0x7ff >> (PAGE_SHIFT - 12) : \
++ 0x3ffff >> (PAGE_SHIFT - 12))
++
+ struct mm_struct;
+ extern unsigned long arch_randomize_brk(struct mm_struct *);
+ #define arch_randomize_brk arch_randomize_brk
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index e1ffea2f9a0b..5aba01ac457f 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void)
+ if (stack_base > STACK_SIZE_MAX)
+ stack_base = STACK_SIZE_MAX;
+
++ /* Add space for stack randomization. */
++ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
++
+ return PAGE_ALIGN(STACK_TOP - stack_base);
+ }
+
+diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
+index 15c99b649b04..b2eb4686bd8f 100644
+--- a/arch/powerpc/kernel/mce.c
++++ b/arch/powerpc/kernel/mce.c
+@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
+ uint64_t nip, uint64_t addr)
+ {
+ uint64_t srr1;
+- int index = __this_cpu_inc_return(mce_nest_count);
++ int index = __this_cpu_inc_return(mce_nest_count) - 1;
+ struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
+
+ /*
+@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
+ if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
+ return;
+
+- index = __this_cpu_inc_return(mce_queue_count);
++ index = __this_cpu_inc_return(mce_queue_count) - 1;
+ /* If queue is full, just return for now. */
+ if (index >= MAX_MC_EVT) {
+ __this_cpu_dec(mce_queue_count);
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index f096e72262f4..1db685104ffc 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -213,6 +213,7 @@ SECTIONS
+ *(.opd)
+ }
+
++ . = ALIGN(256);
+ .got : AT(ADDR(.got) - LOAD_OFFSET) {
+ __toc_start = .;
+ #ifndef CONFIG_RELOCATABLE
+diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
+index 7940dc90e80b..b258110da952 100644
+--- a/arch/s390/crypto/ghash_s390.c
++++ b/arch/s390/crypto/ghash_s390.c
+@@ -16,11 +16,12 @@
+ #define GHASH_DIGEST_SIZE 16
+
+ struct ghash_ctx {
+- u8 icv[16];
+- u8 key[16];
++ u8 key[GHASH_BLOCK_SIZE];
+ };
+
+ struct ghash_desc_ctx {
++ u8 icv[GHASH_BLOCK_SIZE];
++ u8 key[GHASH_BLOCK_SIZE];
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+ };
+@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
+ static int ghash_init(struct shash_desc *desc)
+ {
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+
+ memset(dctx, 0, sizeof(*dctx));
++ memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
+
+ return 0;
+ }
+@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
+ }
+
+ memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
+- memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
+
+ return 0;
+ }
+@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+ {
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ unsigned int n;
+ u8 *buf = dctx->buffer;
+ int ret;
+@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
+ src += n;
+
+ if (!dctx->bytes) {
+- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
++ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
+ GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
+@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
+
+ n = srclen & ~(GHASH_BLOCK_SIZE - 1);
+ if (n) {
+- ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
++ ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
+ if (ret != n)
+ return -EIO;
+ src += n;
+@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
+ return 0;
+ }
+
+-static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
++static int ghash_flush(struct ghash_desc_ctx *dctx)
+ {
+ u8 *buf = dctx->buffer;
+ int ret;
+@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+
+ memset(pos, 0, dctx->bytes);
+
+- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
++ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
++
++ dctx->bytes = 0;
+ }
+
+- dctx->bytes = 0;
+ return 0;
+ }
+
+ static int ghash_final(struct shash_desc *desc, u8 *dst)
+ {
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ int ret;
+
+- ret = ghash_flush(ctx, dctx);
++ ret = ghash_flush(dctx);
+ if (!ret)
+- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
++ memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
+ return ret;
+ }
+
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index e08ec38f8c6e..e10112da008d 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -600,7 +600,7 @@ static inline int pmd_large(pmd_t pmd)
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
+ }
+
+-static inline int pmd_pfn(pmd_t pmd)
++static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+ unsigned long origin_mask;
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index a236e39cc385..1c0fb570b5c2 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -212,6 +212,7 @@ union kvm_mmu_page_role {
+ unsigned nxe:1;
+ unsigned cr0_wp:1;
+ unsigned smep_andnot_wp:1;
++ unsigned smap_andnot_wp:1;
+ };
+ };
+
+@@ -404,6 +405,7 @@ struct kvm_vcpu_arch {
+ struct kvm_mmu_memory_cache mmu_page_header_cache;
+
+ struct fpu guest_fpu;
++ bool eager_fpu;
+ u64 xcr0;
+ u64 guest_supported_xcr0;
+ u32 guest_xstate_size;
+@@ -735,6 +737,7 @@ struct kvm_x86_ops {
+ void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+ unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+ void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
++ void (*fpu_activate)(struct kvm_vcpu *vcpu);
+ void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
+
+ void (*tlb_flush)(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 3c036cb4a370..11dd8f23fcea 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -705,6 +705,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+ struct pt_regs *regs)
+ {
+ int i, ret = 0;
++ char *tmp;
+
+ for (i = 0; i < mca_cfg.banks; i++) {
+ m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+@@ -713,9 +714,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+ if (quirk_no_way_out)
+ quirk_no_way_out(i, m, regs);
+ }
+- if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
+- MCE_PANIC_SEVERITY)
++
++ if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
++ *msg = tmp;
+ ret = 1;
++ }
+ }
+ return ret;
+ }
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+index c4bb8b8e5017..76d8cbe5a10f 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+@@ -680,6 +680,7 @@ static int __init rapl_pmu_init(void)
+ break;
+ case 60: /* Haswell */
+ case 69: /* Haswell-Celeron */
++ case 61: /* Broadwell */
+ rapl_cntr_mask = RAPL_IDX_HSW;
+ rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
+ break;
+diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
+index d5651fce0b71..f341d56b7883 100644
+--- a/arch/x86/kernel/i387.c
++++ b/arch/x86/kernel/i387.c
+@@ -169,6 +169,21 @@ static void init_thread_xstate(void)
+ xstate_size = sizeof(struct i387_fxsave_struct);
+ else
+ xstate_size = sizeof(struct i387_fsave_struct);
++
++ /*
++ * Quirk: we don't yet handle the XSAVES* instructions
++ * correctly, as we don't correctly convert between
++ * standard and compacted format when interfacing
++ * with user-space - so disable it for now.
++ *
++ * The difference is small: with recent CPUs the
++ * compacted format is only marginally smaller than
++ * the standard FPU state format.
++ *
++ * ( This is easy to backport while we are fixing
++ * XSAVES* support. )
++ */
++ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+ }
+
+ /*
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 8a80737ee6e6..307f9ec28e08 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -16,6 +16,8 @@
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
++#include <asm/i387.h> /* For use_eager_fpu. Ugh! */
++#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */
+ #include <asm/user.h>
+ #include <asm/xsave.h>
+ #include "cpuid.h"
+@@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
+ if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+ best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+
++ vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
++
+ /*
+ * The existing code assumes virtual address is 48-bit in the canonical
+ * address checks; exit if it is ever changed.
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 4452eedfaedd..9bec2b8cdced 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -111,4 +111,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_RTM));
+ }
++
++static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
++{
++ struct kvm_cpuid_entry2 *best;
++
++ best = kvm_find_cpuid_entry(vcpu, 7, 0);
++ return best && (best->ebx & bit(X86_FEATURE_MPX));
++}
+ #endif
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index cee759299a35..88ee9282a57e 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
+ }
+ }
+
+-void update_permission_bitmask(struct kvm_vcpu *vcpu,
+- struct kvm_mmu *mmu, bool ept)
++static void update_permission_bitmask(struct kvm_vcpu *vcpu,
++ struct kvm_mmu *mmu, bool ept)
+ {
+ unsigned bit, byte, pfec;
+ u8 map;
+@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
+ {
+ bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
++ bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
+ struct kvm_mmu *context = &vcpu->arch.mmu;
+
+ MMU_WARN_ON(VALID_PAGE(context->root_hpa));
+@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
+ context->base_role.cr0_wp = is_write_protection(vcpu);
+ context->base_role.smep_andnot_wp
+ = smep && !is_write_protection(vcpu);
++ context->base_role.smap_andnot_wp
++ = smap && !is_write_protection(vcpu);
+ }
+ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
+
+@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *new, int bytes)
+ {
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+- union kvm_mmu_page_role mask = { .word = 0 };
+ struct kvm_mmu_page *sp;
+ LIST_HEAD(invalid_list);
+ u64 entry, gentry, *spte;
+ int npte;
+ bool remote_flush, local_flush, zap_page;
++ union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
++ .cr0_wp = 1,
++ .cr4_pae = 1,
++ .nxe = 1,
++ .smep_andnot_wp = 1,
++ .smap_andnot_wp = 1,
++ };
+
+ /*
+ * If we don't have indirect shadow pages, it means no page is
+@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ ++vcpu->kvm->stat.mmu_pte_write;
+ kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
+
+- mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
+ if (detect_write_misaligned(sp, gpa, bytes) ||
+ detect_write_flooding(sp)) {
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index c7d65637c851..0ada65ecddcf 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -71,8 +71,6 @@ enum {
+ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
+ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
+ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
+-void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+- bool ept);
+
+ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+ {
+@@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ int index = (pfec >> 1) +
+ (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
+
++ WARN_ON(pfec & PFERR_RSVD_MASK);
++
+ return (mmu->permissions[index] >> pte_access) & 1;
+ }
+
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index fd49c867b25a..6e6d115fe9b5 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
+ mmu_is_nested(vcpu));
+ if (likely(r != RET_MMIO_PF_INVALID))
+ return r;
++
++ /*
++ * page fault with PFEC.RSVD = 1 is caused by shadow
++ * page fault, should not be used to walk guest page
++ * table.
++ */
++ error_code &= ~PFERR_RSVD_MASK;
+ };
+
+ r = mmu_topup_memory_caches(vcpu);
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index cc618c882f90..a4e62fcfabcb 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4374,6 +4374,7 @@ static struct kvm_x86_ops svm_x86_ops = {
+ .cache_reg = svm_cache_reg,
+ .get_rflags = svm_get_rflags,
+ .set_rflags = svm_set_rflags,
++ .fpu_activate = svm_fpu_activate,
+ .fpu_deactivate = svm_fpu_deactivate,
+
+ .tlb_flush = svm_flush_tlb,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index a60bd3aa0965..5318d64674b0 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -10179,6 +10179,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
+ .cache_reg = vmx_cache_reg,
+ .get_rflags = vmx_get_rflags,
+ .set_rflags = vmx_set_rflags,
++ .fpu_activate = vmx_fpu_activate,
+ .fpu_deactivate = vmx_fpu_deactivate,
+
+ .tlb_flush = vmx_flush_tlb,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e222ba5d2beb..8838057da9c3 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
+ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ {
+ unsigned long old_cr4 = kvm_read_cr4(vcpu);
+- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
+- X86_CR4_PAE | X86_CR4_SMEP;
++ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
++ X86_CR4_SMEP | X86_CR4_SMAP;
++
+ if (cr4 & CR4_RESERVED_BITS)
+ return 1;
+
+@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
+ kvm_mmu_reset_context(vcpu);
+
+- if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
+- update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
+-
+ if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
+ kvm_update_cpuid(vcpu);
+
+@@ -6141,6 +6139,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
+ return;
+
+ page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
++ if (is_error_page(page))
++ return;
+ kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
+
+ /*
+@@ -6996,7 +6996,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+ fpu_save_init(&vcpu->arch.guest_fpu);
+ __kernel_fpu_end();
+ ++vcpu->stat.fpu_reload;
+- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
++ if (!vcpu->arch.eager_fpu)
++ kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
++
+ trace_kvm_fpu(0);
+ }
+
+@@ -7012,11 +7014,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+ {
++ struct kvm_vcpu *vcpu;
++
+ if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
+ printk_once(KERN_WARNING
+ "kvm: SMP vm created on host with unstable TSC; "
+ "guest TSC will not be reliable\n");
+- return kvm_x86_ops->vcpu_create(kvm, id);
++
++ vcpu = kvm_x86_ops->vcpu_create(kvm, id);
++
++ /*
++ * Activate fpu unconditionally in case the guest needs eager FPU. It will be
++ * deactivated soon if it doesn't.
++ */
++ kvm_x86_ops->fpu_activate(vcpu);
++ return vcpu;
+ }
+
+ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index f9eeae871593..5aa1f6e281d2 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
+ request_mem_region(addr, length, desc);
+ }
+
+-static int __init acpi_reserve_resources(void)
++static void __init acpi_reserve_resources(void)
+ {
+ acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
+ "ACPI PM1a_EVT_BLK");
+@@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void)
+ if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
+ acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
+ acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
+-
+- return 0;
+ }
+-device_initcall(acpi_reserve_resources);
+
+ void acpi_os_printf(const char *fmt, ...)
+ {
+@@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void)
+
+ acpi_status __init acpi_os_initialize1(void)
+ {
++ acpi_reserve_resources();
+ kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 33bb06e006c9..adce56fa9cef 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -66,6 +66,7 @@ enum board_ids {
+ board_ahci_yes_fbs,
+
+ /* board IDs for specific chipsets in alphabetical order */
++ board_ahci_avn,
+ board_ahci_mcp65,
+ board_ahci_mcp77,
+ board_ahci_mcp89,
+@@ -84,6 +85,8 @@ enum board_ids {
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
++static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline);
+ static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
+ static bool is_mcp89_apple(struct pci_dev *pdev);
+ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+@@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
+ .hardreset = ahci_p5wdh_hardreset,
+ };
+
++static struct ata_port_operations ahci_avn_ops = {
++ .inherits = &ahci_ops,
++ .hardreset = ahci_avn_hardreset,
++};
++
+ static const struct ata_port_info ahci_port_info[] = {
+ /* by features */
+ [board_ahci] = {
+@@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = {
+ .port_ops = &ahci_ops,
+ },
+ /* by chipsets */
++ [board_ahci_avn] = {
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_avn_ops,
++ },
+ [board_ahci_mcp65] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
+ AHCI_HFLAG_YES_NCQ),
+@@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+- { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
+- { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
+- { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
+- { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
+- { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
+- { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
+- { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
+- { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+@@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+ return rc;
+ }
+
++/*
++ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
++ *
++ * It has been observed with some SSDs that the timing of events in the
++ * link synchronization phase can leave the port in a state that can not
++ * be recovered by a SATA-hard-reset alone. The failing signature is
++ * SStatus.DET stuck at 1 ("Device presence detected but Phy
++ * communication not established"). It was found that unloading and
++ * reloading the driver when this problem occurs allows the drive
++ * connection to be recovered (DET advanced to 0x3). The critical
++ * component of reloading the driver is that the port state machines are
++ * reset by bouncing "port enable" in the AHCI PCS configuration
++ * register. So, reproduce that effect by bouncing a port whenever we
++ * see DET==1 after a reset.
++ */
++static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline)
++{
++ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
++ struct ata_port *ap = link->ap;
++ struct ahci_port_priv *pp = ap->private_data;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
++ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
++ unsigned long tmo = deadline - jiffies;
++ struct ata_taskfile tf;
++ bool online;
++ int rc, i;
++
++ DPRINTK("ENTER\n");
++
++ ahci_stop_engine(ap);
++
++ for (i = 0; i < 2; i++) {
++ u16 val;
++ u32 sstatus;
++ int port = ap->port_no;
++ struct ata_host *host = ap->host;
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++
++ /* clear D2H reception area to properly wait for D2H FIS */
++ ata_tf_init(link->device, &tf);
++ tf.command = ATA_BUSY;
++ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
++
++ rc = sata_link_hardreset(link, timing, deadline, &online,
++ ahci_check_ready);
++
++ if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
++ (sstatus & 0xf) != 1)
++ break;
++
++ ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
++ port);
++
++ pci_read_config_word(pdev, 0x92, &val);
++ val &= ~(1 << port);
++ pci_write_config_word(pdev, 0x92, val);
++ ata_msleep(ap, 1000);
++ val |= 1 << port;
++ pci_write_config_word(pdev, 0x92, val);
++ deadline += tmo;
++ }
++
++ hpriv->start_engine(ap);
++
++ if (online)
++ *class = ahci_dev_classify(ap);
++
++ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
++ return rc;
++}
++
++
+ #ifdef CONFIG_PM
+ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+ {
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 61a9c07e0dff..287c4ba0219f 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
+ if (unlikely(resetting))
+ status &= ~PORT_IRQ_BAD_PMP;
+
+- /* if LPM is enabled, PHYRDY doesn't mean anything */
+- if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
++ if (sata_lpm_ignore_phy_events(&ap->link)) {
+ status &= ~PORT_IRQ_PHYRDY;
+ ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
+ }
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 23dac3babfe3..87b4b7f9fdc6 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4214,7 +4214,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+- { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
++ { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+
+ /*
+@@ -6728,6 +6728,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
+ return tmp;
+ }
+
++/**
++ * sata_lpm_ignore_phy_events - test if PHY event should be ignored
++ * @link: Link receiving the event
++ *
++ * Test whether the received PHY event has to be ignored or not.
++ *
++ * LOCKING:
++ * None:
++ *
++ * RETURNS:
++ * True if the event has to be ignored.
++ */
++bool sata_lpm_ignore_phy_events(struct ata_link *link)
++{
++ unsigned long lpm_timeout = link->last_lpm_change +
++ msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
++
++ /* if LPM is enabled, PHYRDY doesn't mean anything */
++ if (link->lpm_policy > ATA_LPM_MAX_POWER)
++ return true;
++
++ /* ignore the first PHY event after the LPM policy changed
++ * as it is might be spurious
++ */
++ if ((link->flags & ATA_LFLAG_CHANGED) &&
++ time_before(jiffies, lpm_timeout))
++ return true;
++
++ return false;
++}
++EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
++
+ /*
+ * Dummy port_ops
+ */
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index d2029a462e2c..89c3d83e1ca7 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -3489,6 +3489,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ }
+ }
+
++ link->last_lpm_change = jiffies;
++ link->flags |= ATA_LFLAG_CHANGED;
++
+ return 0;
+
+ fail:
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 237f23f68bfc..1daa0ea2f1ac 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -1443,8 +1443,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
+ */
+ if (clk->prepare_count) {
+ clk_core_prepare(parent);
++ flags = clk_enable_lock();
+ clk_core_enable(parent);
+ clk_core_enable(clk);
++ clk_enable_unlock(flags);
+ }
+
+ /* update the clk tree topology */
+@@ -1459,13 +1461,17 @@ static void __clk_set_parent_after(struct clk_core *core,
+ struct clk_core *parent,
+ struct clk_core *old_parent)
+ {
++ unsigned long flags;
++
+ /*
+ * Finish the migration of prepare state and undo the changes done
+ * for preventing a race with clk_enable().
+ */
+ if (core->prepare_count) {
++ flags = clk_enable_lock();
+ clk_core_disable(core);
+ clk_core_disable(old_parent);
++ clk_enable_unlock(flags);
+ clk_core_unprepare(old_parent);
+ }
+ }
+@@ -1489,8 +1495,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
+ clk_enable_unlock(flags);
+
+ if (clk->prepare_count) {
++ flags = clk_enable_lock();
+ clk_core_disable(clk);
+ clk_core_disable(parent);
++ clk_enable_unlock(flags);
+ clk_core_unprepare(parent);
+ }
+ return ret;
+diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
+index 07d666cc6a29..bea4a173eef5 100644
+--- a/drivers/clk/samsung/clk-exynos5420.c
++++ b/drivers/clk/samsung/clk-exynos5420.c
+@@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
+ { .offset = SRC_MASK_PERIC0, .value = 0x11111110, },
+ { .offset = SRC_MASK_PERIC1, .value = 0x11111100, },
+ { .offset = SRC_MASK_ISP, .value = 0x11111000, },
++ { .offset = GATE_BUS_TOP, .value = 0xffffffff, },
+ { .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
+ { .offset = GATE_IP_PERIC, .value = 0xffffffff, },
+ };
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 2eebd28b4c40..ccc20188f00c 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -499,18 +499,19 @@ static int __init dmi_present(const u8 *buf)
+ buf += 16;
+
+ if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
++ if (smbios_ver)
++ dmi_ver = smbios_ver;
++ else
++ dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
+ dmi_num = get_unaligned_le16(buf + 12);
+ dmi_len = get_unaligned_le16(buf + 6);
+ dmi_base = get_unaligned_le32(buf + 8);
+
+ if (dmi_walk_early(dmi_decode) == 0) {
+ if (smbios_ver) {
+- dmi_ver = smbios_ver;
+ pr_info("SMBIOS %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
+ } else {
+- dmi_ver = (buf[14] & 0xF0) << 4 |
+- (buf[14] & 0x0F);
+ pr_info("Legacy DMI %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
+ }
+diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
+index 443518f63f15..a6b0def4bd7b 100644
+--- a/drivers/gpio/gpio-kempld.c
++++ b/drivers/gpio/gpio-kempld.c
+@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+ = container_of(chip, struct kempld_gpio_data, chip);
+ struct kempld_device_data *pld = gpio->pld;
+
+- return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
++ return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
+ }
+
+ static int kempld_gpio_pincount(struct kempld_device_data *pld)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 498399323a8c..406624a0b201 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -729,7 +729,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
+ kfd2kgd->get_max_engine_clock_in_mhz(
+ dev->gpu->kgd));
+ sysfs_show_64bit_prop(buffer, "local_mem_size",
+- kfd2kgd->get_vmem_size(dev->gpu->kgd));
++ (unsigned long long int) 0);
+
+ sysfs_show_32bit_prop(buffer, "fw_version",
+ kfd2kgd->get_fw_version(
+diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
+index 5ba5792bfdba..98b125763ecd 100644
+--- a/drivers/gpu/drm/drm_plane_helper.c
++++ b/drivers/gpu/drm/drm_plane_helper.c
+@@ -476,6 +476,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
+ if (!crtc[i])
+ continue;
+
++ if (crtc[i]->cursor == plane)
++ continue;
++
+ /* There's no other way to figure out whether the crtc is running. */
+ ret = drm_crtc_vblank_get(crtc[i]);
+ if (ret == 0) {
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 1afc0b419da2..965a45619f6b 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1789,7 +1789,9 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ if ((crtc->mode.clock == test_crtc->mode.clock) &&
+ (adjusted_clock == test_adjusted_clock) &&
+ (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
+- (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
++ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
++ (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
++ drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
+ return test_radeon_crtc->pll_id;
+ }
+ }
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 8d74de82456e..8b2c4c890507 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -412,19 +412,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+ {
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[DP_DPCD_SIZE];
+- int ret;
++ int ret, i;
+
+- ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+- DP_DPCD_SIZE);
+- if (ret > 0) {
+- memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
++ for (i = 0; i < 7; i++) {
++ ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
++ DP_DPCD_SIZE);
++ if (ret == DP_DPCD_SIZE) {
++ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+
+- DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+- dig_connector->dpcd);
++ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
++ dig_connector->dpcd);
+
+- radeon_dp_probe_oui(radeon_connector);
++ radeon_dp_probe_oui(radeon_connector);
+
+- return true;
++ return true;
++ }
+ }
+ dig_connector->dpcd[0] = 0;
+ return false;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 3e670d344a20..19aafb71fd8e 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -5804,7 +5804,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
+ /* restore context1-15 */
+ /* set vm size, must be a multiple of 4 */
+ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
++ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
+ for (i = 1; i < 16; i++) {
+ if (i < 8)
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index 0926739c9fa7..9953356fe263 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+ if (enable) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+- if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++ if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
+@@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
+ if (!dig || !dig->afmt)
+ return;
+
+- if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++ if (enable && connector &&
++ drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index dab00812abaa..02d585455f49 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1272,7 +1272,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
+ */
+ for (i = 1; i < 8; i++) {
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
++ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
++ rdev->vm_manager.max_pfn - 1);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+ rdev->vm_manager.saved_table_addr[i]);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index b7c6bb69f3c7..88c04bc0a7f6 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector,
+ if (!connector || !connector->encoder)
+ return;
+
+- if (!radeon_encoder_is_digital(connector->encoder))
+- return;
+-
+ rdev = connector->encoder->dev->dev_private;
+
+ if (!radeon_audio_chipset_supported(rdev))
+@@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector,
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ dig = radeon_encoder->enc_priv;
+
+- if (!dig->afmt)
+- return;
+-
+ if (status == connector_status_connected) {
+- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++ struct radeon_connector *radeon_connector;
++ int sink_type;
++
++ if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++ radeon_encoder->audio = NULL;
++ return;
++ }
++
++ radeon_connector = to_radeon_connector(connector);
++ sink_type = radeon_dp_getsinktype(radeon_connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+- radeon_dp_getsinktype(radeon_connector) ==
+- CONNECTOR_OBJECT_ID_DISPLAYPORT)
++ sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ radeon_encoder->audio = rdev->audio.dp_funcs;
+ else
+ radeon_encoder->audio = rdev->audio.hdmi_funcs;
+
+ dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
+- if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+- } else {
+- radeon_audio_enable(rdev, dig->afmt->pin, 0);
+- dig->afmt->pin = NULL;
+- }
++ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ } else {
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ dig->afmt->pin = NULL;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 27973e3faf0e..27def67cb6be 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1333,10 +1333,8 @@ out:
+ /* updated in get modes as well since we need to know if it's analog or digital */
+ radeon_connector_update_scratch_regs(connector, ret);
+
+- if (radeon_audio != 0) {
+- radeon_connector_get_edid(connector);
++ if (radeon_audio != 0)
+ radeon_audio_detect(connector, ret);
+- }
+
+ exit:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+@@ -1661,10 +1659,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+
+ radeon_connector_update_scratch_regs(connector, ret);
+
+- if (radeon_audio != 0) {
+- radeon_connector_get_edid(connector);
++ if (radeon_audio != 0)
+ radeon_audio_detect(connector, ret);
+- }
+
+ out:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index a7fb2735d4a9..f433491fab6f 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -4288,7 +4288,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
+ /* empty context1-15 */
+ /* set vm size, must be a multiple of 4 */
+ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
++ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
+ /* Assign the pt base to something valid for now; the pts used for
+ * the VMs are determined by the application and setup and assigned
+ * on the fly in the vm part of radeon_gart.c
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index e77658cd037c..2caf5b2f3446 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -39,7 +39,6 @@ MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
+ /* bits 1..20 are reserved for classes */
+ #define HIDPP_QUIRK_DELAYED_INIT BIT(21)
+ #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
+-#define HIDPP_QUIRK_MULTI_INPUT BIT(23)
+
+ /*
+ * There are two hidpp protocols in use, the first version hidpp10 is known
+@@ -701,12 +700,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+ {
+- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+-
+- if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
+- (field->application == HID_GD_KEYBOARD))
+- return 0;
+-
+ return -1;
+ }
+
+@@ -715,10 +708,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
+ {
+ struct wtp_data *wd = hidpp->private_data;
+
+- if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
+- /* this is the generic hid-input call */
+- return;
+-
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __clear_bit(EV_REL, input_dev->evbit);
+@@ -1234,10 +1223,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
+ connect_mask &= ~HID_CONNECT_HIDINPUT;
+
+- /* Re-enable hidinput for multi-input devices */
+- if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
+- connect_mask |= HID_CONNECT_HIDINPUT;
+-
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+@@ -1285,11 +1270,6 @@ static const struct hid_device_id hidpp_devices[] = {
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_T651),
+ .driver_data = HIDPP_QUIRK_CLASS_WTP },
+- { /* Keyboard TK820 */
+- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+- USB_VENDOR_ID_LOGITECH, 0x4102),
+- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
+- HIDPP_QUIRK_CLASS_WTP },
+
+ { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
+index f3830db02d46..37f01702d081 100644
+--- a/drivers/hwmon/nct6683.c
++++ b/drivers/hwmon/nct6683.c
+@@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
+ (*t)->dev_attr.attr.name, tg->base + i);
+ if ((*t)->s2) {
+ a2 = &su->u.a2;
++ sysfs_attr_init(&a2->dev_attr.attr);
+ a2->dev_attr.attr.name = su->name;
+ a2->nr = (*t)->u.s.nr + i;
+ a2->index = (*t)->u.s.index;
+@@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
+ *attrs = &a2->dev_attr.attr;
+ } else {
+ a = &su->u.a1;
++ sysfs_attr_init(&a->dev_attr.attr);
+ a->dev_attr.attr.name = su->name;
+ a->index = (*t)->u.index + i;
+ a->dev_attr.attr.mode =
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index 1be41177b620..0773930c110e 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -994,6 +994,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
+ (*t)->dev_attr.attr.name, tg->base + i);
+ if ((*t)->s2) {
+ a2 = &su->u.a2;
++ sysfs_attr_init(&a2->dev_attr.attr);
+ a2->dev_attr.attr.name = su->name;
+ a2->nr = (*t)->u.s.nr + i;
+ a2->index = (*t)->u.s.index;
+@@ -1004,6 +1005,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
+ *attrs = &a2->dev_attr.attr;
+ } else {
+ a = &su->u.a1;
++ sysfs_attr_init(&a->dev_attr.attr);
+ a->dev_attr.attr.name = su->name;
+ a->index = (*t)->u.index + i;
+ a->dev_attr.attr.mode =
+diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
+index 112e4d45e4a0..68800115876b 100644
+--- a/drivers/hwmon/ntc_thermistor.c
++++ b/drivers/hwmon/ntc_thermistor.c
+@@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data *
+ ntc_thermistor_parse_dt(struct platform_device *pdev)
+ {
+ struct iio_channel *chan;
++ enum iio_chan_type type;
+ struct device_node *np = pdev->dev.of_node;
+ struct ntc_thermistor_platform_data *pdata;
++ int ret;
+
+ if (!np)
+ return NULL;
+@@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
+ if (IS_ERR(chan))
+ return ERR_CAST(chan);
+
++ ret = iio_get_channel_type(chan, &type);
++ if (ret < 0)
++ return ERR_PTR(ret);
++
++ if (type != IIO_VOLTAGE)
++ return ERR_PTR(-EINVAL);
++
+ if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
+ return ERR_PTR(-ENODEV);
+ if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
+diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
+index 99664ebc738d..ccf4cffe0ee1 100644
+--- a/drivers/hwmon/tmp401.c
++++ b/drivers/hwmon/tmp401.c
+@@ -44,7 +44,7 @@
+ #include <linux/sysfs.h>
+
+ /* Addresses to scan */
+-static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d,
++static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
+ 0x4e, 0x4f, I2C_CLIENT_END };
+
+ enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
+diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
+index 53f32629283a..6805db0e4f07 100644
+--- a/drivers/iio/accel/st_accel_core.c
++++ b/drivers/iio/accel/st_accel_core.c
+@@ -465,6 +465,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &accel_info;
++ mutex_init(&adata->tb.buf_lock);
+
+ st_sensors_power_enable(indio_dev);
+
+diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
+index 08bcfb061ca5..56008a86b78f 100644
+--- a/drivers/iio/adc/axp288_adc.c
++++ b/drivers/iio/adc/axp288_adc.c
+@@ -53,39 +53,42 @@ static const struct iio_chan_spec const axp288_adc_channels[] = {
+ .channel = 0,
+ .address = AXP288_TS_ADC_H,
+ .datasheet_name = "TS_PIN",
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = 1,
+ .address = AXP288_PMIC_ADC_H,
+ .datasheet_name = "PMIC_TEMP",
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = 2,
+ .address = AXP288_GP_ADC_H,
+ .datasheet_name = "GPADC",
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_CURRENT,
+ .channel = 3,
+ .address = AXP20X_BATT_CHRG_I_H,
+ .datasheet_name = "BATT_CHG_I",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_CURRENT,
+ .channel = 4,
+ .address = AXP20X_BATT_DISCHRG_I_H,
+ .datasheet_name = "BATT_DISCHRG_I",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_VOLTAGE,
+ .channel = 5,
+ .address = AXP20X_BATT_V_H,
+ .datasheet_name = "BATT_V",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ };
+
+@@ -151,9 +154,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
+ chan->address))
+ dev_err(&indio_dev->dev, "TS pin restore\n");
+ break;
+- case IIO_CHAN_INFO_PROCESSED:
+- ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+- break;
+ default:
+ ret = -EINVAL;
+ }
+diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
+index 51e2a83c9404..115f6e99a7fa 100644
+--- a/drivers/iio/adc/cc10001_adc.c
++++ b/drivers/iio/adc/cc10001_adc.c
+@@ -35,8 +35,9 @@
+ #define CC10001_ADC_EOC_SET BIT(0)
+
+ #define CC10001_ADC_CHSEL_SAMPLED 0x0c
+-#define CC10001_ADC_POWER_UP 0x10
+-#define CC10001_ADC_POWER_UP_SET BIT(0)
++#define CC10001_ADC_POWER_DOWN 0x10
++#define CC10001_ADC_POWER_DOWN_SET BIT(0)
++
+ #define CC10001_ADC_DEBUG 0x14
+ #define CC10001_ADC_DATA_COUNT 0x20
+
+@@ -62,7 +63,6 @@ struct cc10001_adc_device {
+ u16 *buf;
+
+ struct mutex lock;
+- unsigned long channel_map;
+ unsigned int start_delay_ns;
+ unsigned int eoc_delay_ns;
+ };
+@@ -79,6 +79,18 @@ static inline u32 cc10001_adc_read_reg(struct cc10001_adc_device *adc_dev,
+ return readl(adc_dev->reg_base + reg);
+ }
+
++static void cc10001_adc_power_up(struct cc10001_adc_device *adc_dev)
++{
++ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, 0);
++ ndelay(adc_dev->start_delay_ns);
++}
++
++static void cc10001_adc_power_down(struct cc10001_adc_device *adc_dev)
++{
++ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN,
++ CC10001_ADC_POWER_DOWN_SET);
++}
++
+ static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
+ unsigned int channel)
+ {
+@@ -88,6 +100,7 @@ static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
+ val = (channel & CC10001_ADC_CH_MASK) | CC10001_ADC_MODE_SINGLE_CONV;
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
+
++ udelay(1);
+ val = cc10001_adc_read_reg(adc_dev, CC10001_ADC_CONFIG);
+ val = val | CC10001_ADC_START_CONV;
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
+@@ -129,6 +142,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
+ struct iio_dev *indio_dev;
+ unsigned int delay_ns;
+ unsigned int channel;
++ unsigned int scan_idx;
+ bool sample_invalid;
+ u16 *data;
+ int i;
+@@ -139,20 +153,17 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
+
+ mutex_lock(&adc_dev->lock);
+
+- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
+- CC10001_ADC_POWER_UP_SET);
+-
+- /* Wait for 8 (6+2) clock cycles before activating START */
+- ndelay(adc_dev->start_delay_ns);
++ cc10001_adc_power_up(adc_dev);
+
+ /* Calculate delay step for eoc and sampled data */
+ delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
+
+ i = 0;
+ sample_invalid = false;
+- for_each_set_bit(channel, indio_dev->active_scan_mask,
++ for_each_set_bit(scan_idx, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+
++ channel = indio_dev->channels[scan_idx].channel;
+ cc10001_adc_start(adc_dev, channel);
+
+ data[i] = cc10001_adc_poll_done(indio_dev, channel, delay_ns);
+@@ -166,7 +177,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
+ }
+
+ done:
+- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
++ cc10001_adc_power_down(adc_dev);
+
+ mutex_unlock(&adc_dev->lock);
+
+@@ -185,11 +196,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
+ unsigned int delay_ns;
+ u16 val;
+
+- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
+- CC10001_ADC_POWER_UP_SET);
+-
+- /* Wait for 8 (6+2) clock cycles before activating START */
+- ndelay(adc_dev->start_delay_ns);
++ cc10001_adc_power_up(adc_dev);
+
+ /* Calculate delay step for eoc and sampled data */
+ delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
+@@ -198,7 +205,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
+
+ val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns);
+
+- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
++ cc10001_adc_power_down(adc_dev);
+
+ return val;
+ }
+@@ -224,7 +231,7 @@ static int cc10001_adc_read_raw(struct iio_dev *indio_dev,
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(adc_dev->reg);
+- if (ret)
++ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+@@ -255,22 +262,22 @@ static const struct iio_info cc10001_adc_info = {
+ .update_scan_mode = &cc10001_update_scan_mode,
+ };
+
+-static int cc10001_adc_channel_init(struct iio_dev *indio_dev)
++static int cc10001_adc_channel_init(struct iio_dev *indio_dev,
++ unsigned long channel_map)
+ {
+- struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
+ struct iio_chan_spec *chan_array, *timestamp;
+ unsigned int bit, idx = 0;
+
+- indio_dev->num_channels = bitmap_weight(&adc_dev->channel_map,
+- CC10001_ADC_NUM_CHANNELS);
++ indio_dev->num_channels = bitmap_weight(&channel_map,
++ CC10001_ADC_NUM_CHANNELS) + 1;
+
+- chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels + 1,
++ chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels,
+ sizeof(struct iio_chan_spec),
+ GFP_KERNEL);
+ if (!chan_array)
+ return -ENOMEM;
+
+- for_each_set_bit(bit, &adc_dev->channel_map, CC10001_ADC_NUM_CHANNELS) {
++ for_each_set_bit(bit, &channel_map, CC10001_ADC_NUM_CHANNELS) {
+ struct iio_chan_spec *chan = &chan_array[idx];
+
+ chan->type = IIO_VOLTAGE;
+@@ -305,6 +312,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
+ unsigned long adc_clk_rate;
+ struct resource *res;
+ struct iio_dev *indio_dev;
++ unsigned long channel_map;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
+@@ -313,9 +321,9 @@ static int cc10001_adc_probe(struct platform_device *pdev)
+
+ adc_dev = iio_priv(indio_dev);
+
+- adc_dev->channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
++ channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
+ if (!of_property_read_u32(node, "adc-reserved-channels", &ret))
+- adc_dev->channel_map &= ~ret;
++ channel_map &= ~ret;
+
+ adc_dev->reg = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(adc_dev->reg))
+@@ -361,7 +369,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
+ adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES;
+
+ /* Setup the ADC channels available on the device */
+- ret = cc10001_adc_channel_init(indio_dev);
++ ret = cc10001_adc_channel_init(indio_dev, channel_map);
+ if (ret < 0)
+ goto err_disable_clk;
+
+diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
+index 3211729bcb0b..0c4618b4d515 100644
+--- a/drivers/iio/adc/qcom-spmi-vadc.c
++++ b/drivers/iio/adc/qcom-spmi-vadc.c
+@@ -18,6 +18,7 @@
+ #include <linux/iio/iio.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
++#include <linux/math64.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+@@ -471,11 +472,11 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
+ const struct vadc_channel_prop *prop, u16 adc_code)
+ {
+ const struct vadc_prescale_ratio *prescale;
+- s32 voltage;
++ s64 voltage;
+
+ voltage = adc_code - vadc->graph[prop->calibration].gnd;
+ voltage *= vadc->graph[prop->calibration].dx;
+- voltage = voltage / vadc->graph[prop->calibration].dy;
++ voltage = div64_s64(voltage, vadc->graph[prop->calibration].dy);
+
+ if (prop->calibration == VADC_CALIB_ABSOLUTE)
+ voltage += vadc->graph[prop->calibration].dx;
+@@ -487,7 +488,7 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
+
+ voltage = voltage * prescale->den;
+
+- return voltage / prescale->num;
++ return div64_s64(voltage, prescale->num);
+ }
+
+ static int vadc_decimation_from_dt(u32 value)
+diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
+index a221f7329b79..ce93bd8e3f68 100644
+--- a/drivers/iio/adc/xilinx-xadc-core.c
++++ b/drivers/iio/adc/xilinx-xadc-core.c
+@@ -856,6 +856,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
+ switch (chan->address) {
+ case XADC_REG_VCCINT:
+ case XADC_REG_VCCAUX:
++ case XADC_REG_VREFP:
+ case XADC_REG_VCCBRAM:
+ case XADC_REG_VCCPINT:
+ case XADC_REG_VCCPAUX:
+@@ -996,7 +997,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+- .sign = 'u', \
++ .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+@@ -1008,7 +1009,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
+ static const struct iio_chan_spec xadc_channels[] = {
+ XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
+ XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
+- XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true),
++ XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
+ XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
+ XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
+ XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
+diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
+index c7487e8d7f80..54adc5087210 100644
+--- a/drivers/iio/adc/xilinx-xadc.h
++++ b/drivers/iio/adc/xilinx-xadc.h
+@@ -145,9 +145,9 @@ static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ #define XADC_REG_MAX_VCCPINT 0x28
+ #define XADC_REG_MAX_VCCPAUX 0x29
+ #define XADC_REG_MAX_VCCO_DDR 0x2a
+-#define XADC_REG_MIN_VCCPINT 0x2b
+-#define XADC_REG_MIN_VCCPAUX 0x2c
+-#define XADC_REG_MIN_VCCO_DDR 0x2d
++#define XADC_REG_MIN_VCCPINT 0x2c
++#define XADC_REG_MIN_VCCPAUX 0x2d
++#define XADC_REG_MIN_VCCO_DDR 0x2e
+
+ #define XADC_REG_CONF0 0x40
+ #define XADC_REG_CONF1 0x41
+diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
+index edd13d2b4121..8dd0477e201c 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_core.c
++++ b/drivers/iio/common/st_sensors/st_sensors_core.c
+@@ -304,8 +304,6 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *of_pdata;
+ int err = 0;
+
+- mutex_init(&sdata->tb.buf_lock);
+-
+ /* If OF/DT pdata exists, it will take precedence of anything else */
+ of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
+ if (of_pdata)
+diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
+index f07a2336f7dc..566f7d2df031 100644
+--- a/drivers/iio/gyro/st_gyro_core.c
++++ b/drivers/iio/gyro/st_gyro_core.c
+@@ -317,6 +317,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &gyro_info;
++ mutex_init(&gdata->tb.buf_lock);
+
+ st_sensors_power_enable(indio_dev);
+
+diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
+index 3ecf79ed08ac..88f21bbe947c 100644
+--- a/drivers/iio/light/hid-sensor-prox.c
++++ b/drivers/iio/light/hid-sensor-prox.c
+@@ -43,8 +43,6 @@ struct prox_state {
+ static const struct iio_chan_spec prox_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+- .modified = 1,
+- .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
+index 8ade473f99fe..2e56f812a644 100644
+--- a/drivers/iio/magnetometer/st_magn_core.c
++++ b/drivers/iio/magnetometer/st_magn_core.c
+@@ -369,6 +369,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &magn_info;
++ mutex_init(&mdata->tb.buf_lock);
+
+ st_sensors_power_enable(indio_dev);
+
+diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
+index 1af314926ebd..476a7d03d2ce 100644
+--- a/drivers/iio/pressure/hid-sensor-press.c
++++ b/drivers/iio/pressure/hid-sensor-press.c
+@@ -47,8 +47,6 @@ struct press_state {
+ static const struct iio_chan_spec press_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+- .modified = 1,
+- .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
+index 97baf40d424b..e881fa6291e9 100644
+--- a/drivers/iio/pressure/st_pressure_core.c
++++ b/drivers/iio/pressure/st_pressure_core.c
+@@ -417,6 +417,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &press_info;
++ mutex_init(&press_data->tb.buf_lock);
+
+ st_sensors_power_enable(indio_dev);
+
+diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
+index b85ddbc979e0..e5558b2660f2 100644
+--- a/drivers/infiniband/core/iwpm_msg.c
++++ b/drivers/infiniband/core/iwpm_msg.c
+@@ -33,7 +33,7 @@
+
+ #include "iwpm_util.h"
+
+-static const char iwpm_ulib_name[] = "iWarpPortMapperUser";
++static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
+ static int iwpm_ulib_version = 3;
+ static int iwpm_user_pid = IWPM_PID_UNDEFINED;
+ static atomic_t echo_nlmsg_seq;
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 991dc6b20a58..79363b687195 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -315,7 +315,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
+ unsigned int x2, unsigned int y2)
+ {
+ elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
+- elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
++ elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
+ }
+
+ /*
+diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
+index 6d5a5c44453b..173e70dbf61b 100644
+--- a/drivers/iommu/amd_iommu_v2.c
++++ b/drivers/iommu/amd_iommu_v2.c
+@@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
+
+ static void put_pasid_state_wait(struct pasid_state *pasid_state)
+ {
++ atomic_dec(&pasid_state->count);
+ wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
+ free_pasid_state(pasid_state);
+ }
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index a3adde6519f0..bd6252b01510 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -224,14 +224,7 @@
+ #define RESUME_TERMINATE (1 << 0)
+
+ #define TTBCR2_SEP_SHIFT 15
+-#define TTBCR2_SEP_MASK 0x7
+-
+-#define TTBCR2_ADDR_32 0
+-#define TTBCR2_ADDR_36 1
+-#define TTBCR2_ADDR_40 2
+-#define TTBCR2_ADDR_42 3
+-#define TTBCR2_ADDR_44 4
+-#define TTBCR2_ADDR_48 5
++#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
+
+ #define TTBRn_HI_ASID_SHIFT 16
+
+@@ -783,26 +776,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ if (smmu->version > ARM_SMMU_V1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+- switch (smmu->va_size) {
+- case 32:
+- reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
+- break;
+- case 36:
+- reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
+- break;
+- case 40:
+- reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
+- break;
+- case 42:
+- reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
+- break;
+- case 44:
+- reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
+- break;
+- case 48:
+- reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
+- break;
+- }
++ reg |= TTBCR2_SEP_UPSTREAM;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+ }
+ } else {
+diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
+index 7dc93aa004c8..312ffd3d0017 100644
+--- a/drivers/lguest/core.c
++++ b/drivers/lguest/core.c
+@@ -173,7 +173,7 @@ static void unmap_switcher(void)
+ bool lguest_address_ok(const struct lguest *lg,
+ unsigned long addr, unsigned long len)
+ {
+- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
++ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
+ }
+
+ /*
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 6554d9148927..757f1ba34c4d 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -823,6 +823,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
+ }
+ EXPORT_SYMBOL(dm_consume_args);
+
++static bool __table_type_request_based(unsigned table_type)
++{
++ return (table_type == DM_TYPE_REQUEST_BASED ||
++ table_type == DM_TYPE_MQ_REQUEST_BASED);
++}
++
+ static int dm_table_set_type(struct dm_table *t)
+ {
+ unsigned i;
+@@ -855,8 +861,7 @@ static int dm_table_set_type(struct dm_table *t)
+ * Determine the type from the live device.
+ * Default to bio-based if device is new.
+ */
+- if (live_md_type == DM_TYPE_REQUEST_BASED ||
+- live_md_type == DM_TYPE_MQ_REQUEST_BASED)
++ if (__table_type_request_based(live_md_type))
+ request_based = 1;
+ else
+ bio_based = 1;
+@@ -906,7 +911,7 @@ static int dm_table_set_type(struct dm_table *t)
+ }
+ t->type = DM_TYPE_MQ_REQUEST_BASED;
+
+- } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
++ } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
+ /* inherit live MD type */
+ t->type = live_md_type;
+
+@@ -928,10 +933,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
+
+ bool dm_table_request_based(struct dm_table *t)
+ {
+- unsigned table_type = dm_table_get_type(t);
+-
+- return (table_type == DM_TYPE_REQUEST_BASED ||
+- table_type == DM_TYPE_MQ_REQUEST_BASED);
++ return __table_type_request_based(dm_table_get_type(t));
+ }
+
+ bool dm_table_mq_request_based(struct dm_table *t)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 8001fe9e3434..9b4e30a82e4a 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1642,8 +1642,7 @@ static int dm_merge_bvec(struct request_queue *q,
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_live_table_fast(md);
+ struct dm_target *ti;
+- sector_t max_sectors;
+- int max_size = 0;
++ sector_t max_sectors, max_size = 0;
+
+ if (unlikely(!map))
+ goto out;
+@@ -1658,8 +1657,16 @@ static int dm_merge_bvec(struct request_queue *q,
+ max_sectors = min(max_io_len(bvm->bi_sector, ti),
+ (sector_t) queue_max_sectors(q));
+ max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
+- if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
+- max_size = 0;
++
++ /*
++ * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
++ * to the targets' merge function since it holds sectors not bytes).
++ * Just doing this as an interim fix for stable@ because the more
++ * comprehensive cleanup of switching to sector_t will impact every
++ * DM target that implements a ->merge hook.
++ */
++ if (max_size > INT_MAX)
++ max_size = INT_MAX;
+
+ /*
+ * merge_bvec_fn() returns number of bytes
+@@ -1667,7 +1674,7 @@ static int dm_merge_bvec(struct request_queue *q,
+ * max is precomputed maximal io size
+ */
+ if (max_size && ti->type->merge)
+- max_size = ti->type->merge(ti, bvm, biovec, max_size);
++ max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
+ /*
+ * If the target doesn't support merge method and some of the devices
+ * provided their merge_bvec method (we know this by looking for the
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e47d1dd046da..907534b7f40d 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4138,12 +4138,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ if (!mddev->pers || !mddev->pers->sync_request)
+ return -EINVAL;
+
+- if (cmd_match(page, "frozen"))
+- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+- else
+- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+ if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
++ if (cmd_match(page, "frozen"))
++ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
++ else
++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ flush_workqueue(md_misc_wq);
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+@@ -4156,16 +4156,17 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return -EBUSY;
+ else if (cmd_match(page, "resync"))
+- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ else if (cmd_match(page, "recover")) {
++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ } else if (cmd_match(page, "reshape")) {
+ int err;
+ if (mddev->pers->start_reshape == NULL)
+ return -EINVAL;
+ err = mddev_lock(mddev);
+ if (!err) {
++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ err = mddev->pers->start_reshape(mddev);
+ mddev_unlock(mddev);
+ }
+@@ -4177,6 +4178,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ else if (!cmd_match(page, "repair"))
+ return -EINVAL;
++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ }
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 3b5d7f704aa3..903391ce9353 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -517,6 +517,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
+ ? (sector & (chunk_sects-1))
+ : sector_div(sector, chunk_sects));
+
++ /* Restore due to sector_div */
++ sector = bio->bi_iter.bi_sector;
++
+ if (sectors < bio_sectors(bio)) {
+ split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+@@ -524,7 +527,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
+ split = bio;
+ }
+
+- sector = bio->bi_iter.bi_sector;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index cd2f96b2c572..007ab861eca0 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1933,7 +1933,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+
+ conf->slab_cache = sc;
+ conf->active_name = 1-conf->active_name;
+- conf->pool_size = newsize;
++ if (!err)
++ conf->pool_size = newsize;
+ return err;
+ }
+
+diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
+index ae498b53ee40..46e3840c7a37 100644
+--- a/drivers/mfd/da9052-core.c
++++ b/drivers/mfd/da9052-core.c
+@@ -433,6 +433,10 @@ EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
+ static const struct mfd_cell da9052_subdev_info[] = {
+ {
+ .name = "da9052-regulator",
++ .id = 0,
++ },
++ {
++ .name = "da9052-regulator",
+ .id = 1,
+ },
+ {
+@@ -484,10 +488,6 @@ static const struct mfd_cell da9052_subdev_info[] = {
+ .id = 13,
+ },
+ {
+- .name = "da9052-regulator",
+- .id = 14,
+- },
+- {
+ .name = "da9052-onkey",
+ },
+ {
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 03d7c7521d97..9a39e0b7e583 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1304,7 +1304,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+
+ if (ios->clock) {
+ unsigned int clock_min = ~0U;
+- u32 clkdiv;
++ int clkdiv;
+
+ spin_lock_bh(&host->lock);
+ if (!host->mode_reg) {
+@@ -1328,7 +1328,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ /* Calculate clock divider */
+ if (host->caps.has_odd_clk_div) {
+ clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
+- if (clkdiv > 511) {
++ if (clkdiv < 0) {
++ dev_warn(&mmc->class_dev,
++ "clock %u too fast; using %lu\n",
++ clock_min, host->bus_hz / 2);
++ clkdiv = 0;
++ } else if (clkdiv > 511) {
+ dev_warn(&mmc->class_dev,
+ "clock %u too slow; using %lu\n",
+ clock_min, host->bus_hz / (511 + 2));
+diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
+index db2c05b6fe7f..c9eb78f10a0d 100644
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -310,6 +310,8 @@ static void ubiblock_do_work(struct work_struct *work)
+ blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+
+ ret = ubiblock_read(pdu);
++ rq_flush_dcache_pages(req);
++
+ blk_mq_end_request(req, ret);
+ }
+
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+index 6262612dec45..7a3231d8b933 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+@@ -512,11 +512,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
+ msgbuf->rx_pktids,
+ msgbuf->ioctl_resp_pktid);
+ if (msgbuf->ioctl_resp_ret_len != 0) {
+- if (!skb) {
+- brcmf_err("Invalid packet id idx recv'd %d\n",
+- msgbuf->ioctl_resp_pktid);
++ if (!skb)
+ return -EBADF;
+- }
++
+ memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
+ len : msgbuf->ioctl_resp_ret_len);
+ }
+@@ -875,10 +873,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
+ flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->tx_pktids, idx);
+- if (!skb) {
+- brcmf_err("Invalid packet id idx recv'd %d\n", idx);
++ if (!skb)
+ return;
+- }
+
+ set_bit(flowid, msgbuf->txstatus_done_map);
+ commonring = msgbuf->flowrings[flowid];
+@@ -1157,6 +1153,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
+
+ skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->rx_pktids, idx);
++ if (!skb)
++ return;
+
+ if (data_offset)
+ skb_pull(skb, data_offset);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
+index 14e8fd661889..fd5a0bb1493f 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
+@@ -1742,8 +1742,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ int i, j, n_matches, ret;
+
+ fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+- if (!IS_ERR_OR_NULL(fw_status))
++ if (!IS_ERR_OR_NULL(fw_status)) {
+ reasons = le32_to_cpu(fw_status->wakeup_reasons);
++ kfree(fw_status);
++ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+@@ -1860,15 +1862,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+ /* get the BSS vif pointer again */
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+- goto out_unlock;
++ goto err;
+
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
+ if (ret)
+- goto out_unlock;
++ goto err;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mvm, "Device was reset during suspend\n");
+- goto out_unlock;
++ goto err;
+ }
+
+ /* query SRAM first in case we want event logging */
+@@ -1886,7 +1888,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+ /* has unlocked the mutex, so skip that */
+ goto out;
+
+- out_unlock:
++err:
++ iwl_mvm_free_nd(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ out:
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 69935aa5a1b3..cb72edb3d16a 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -5,8 +5,8 @@
+ *
+ * GPL LICENSE SUMMARY
+ *
+- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
++ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+@@ -31,8 +31,8 @@
+ *
+ * BSD LICENSE
+ *
+- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
++ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+@@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
+ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+- struct page *page;
++ struct page *page = NULL;
+ dma_addr_t phys;
+ u32 size;
+ u8 power;
+@@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, phys)) {
+ __free_pages(page, order);
++ page = NULL;
+ continue;
+ }
+ IWL_INFO(trans,
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 8444313eabe2..8694dddcce9a 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -1040,6 +1040,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x07d1, 0x3c17) },
+ { USB_DEVICE(0x2001, 0x3317) },
+ { USB_DEVICE(0x2001, 0x3c1b) },
++ { USB_DEVICE(0x2001, 0x3c25) },
+ /* Draytek */
+ { USB_DEVICE(0x07fa, 0x7712) },
+ /* DVICO */
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 46ee956d0235..27cd6cabf6c5 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
+
+ do {
+ status = usb_control_msg(udev, pipe, request, reqtype, value,
+- index, pdata, len, 0); /*max. timeout*/
++ index, pdata, len, 1000);
+ if (status < 0) {
+ /* firmware download is checksumed, don't retry */
+ if ((value >= FW_8192C_START_ADDRESS &&
+diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
+index 13584e24736a..4d7d60e593b8 100644
+--- a/drivers/power/reset/at91-reset.c
++++ b/drivers/power/reset/at91-reset.c
+@@ -212,9 +212,9 @@ static int at91_reset_platform_probe(struct platform_device *pdev)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 );
+ at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+- if (IS_ERR(at91_ramc_base[idx])) {
++ if (!at91_ramc_base[idx]) {
+ dev_err(&pdev->dev, "Could not map ram controller address\n");
+- return PTR_ERR(at91_ramc_base[idx]);
++ return -ENOMEM;
+ }
+ }
+
+diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
+index 476171a768d6..8a029f9bc18c 100644
+--- a/drivers/pwm/pwm-img.c
++++ b/drivers/pwm/pwm-img.c
+@@ -16,6 +16,7 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
++#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/pwm.h>
+ #include <linux/regmap.h>
+@@ -38,7 +39,22 @@
+ #define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
+ #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
+
+-#define MAX_TMBASE_STEPS 65536
++/*
++ * PWM period is specified with a timebase register,
++ * in number of step periods. The PWM duty cycle is also
++ * specified in step periods, in the [0, $timebase] range.
++ * In other words, the timebase imposes the duty cycle
++ * resolution. Therefore, let's constraint the timebase to
++ * a minimum value to allow a sane range of duty cycle values.
++ * Imposing a minimum timebase, will impose a maximum PWM frequency.
++ *
++ * The value chosen is completely arbitrary.
++ */
++#define MIN_TMBASE_STEPS 16
++
++struct img_pwm_soc_data {
++ u32 max_timebase;
++};
+
+ struct img_pwm_chip {
+ struct device *dev;
+@@ -47,6 +63,9 @@ struct img_pwm_chip {
+ struct clk *sys_clk;
+ void __iomem *base;
+ struct regmap *periph_regs;
++ int max_period_ns;
++ int min_period_ns;
++ const struct img_pwm_soc_data *data;
+ };
+
+ static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
+@@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ u32 val, div, duty, timebase;
+ unsigned long mul, output_clk_hz, input_clk_hz;
+ struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
++ unsigned int max_timebase = pwm_chip->data->max_timebase;
++
++ if (period_ns < pwm_chip->min_period_ns ||
++ period_ns > pwm_chip->max_period_ns) {
++ dev_err(chip->dev, "configured period not in range\n");
++ return -ERANGE;
++ }
+
+ input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
+ output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
+
+ mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
+- if (mul <= MAX_TMBASE_STEPS) {
++ if (mul <= max_timebase) {
+ div = PWM_CTRL_CFG_NO_SUB_DIV;
+ timebase = DIV_ROUND_UP(mul, 1);
+- } else if (mul <= MAX_TMBASE_STEPS * 8) {
++ } else if (mul <= max_timebase * 8) {
+ div = PWM_CTRL_CFG_SUB_DIV0;
+ timebase = DIV_ROUND_UP(mul, 8);
+- } else if (mul <= MAX_TMBASE_STEPS * 64) {
++ } else if (mul <= max_timebase * 64) {
+ div = PWM_CTRL_CFG_SUB_DIV1;
+ timebase = DIV_ROUND_UP(mul, 64);
+- } else if (mul <= MAX_TMBASE_STEPS * 512) {
++ } else if (mul <= max_timebase * 512) {
+ div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
+ timebase = DIV_ROUND_UP(mul, 512);
+- } else if (mul > MAX_TMBASE_STEPS * 512) {
++ } else if (mul > max_timebase * 512) {
+ dev_err(chip->dev,
+ "failed to configure timebase steps/divider value\n");
+ return -EINVAL;
+@@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = {
+ .owner = THIS_MODULE,
+ };
+
++static const struct img_pwm_soc_data pistachio_pwm = {
++ .max_timebase = 255,
++};
++
++static const struct of_device_id img_pwm_of_match[] = {
++ {
++ .compatible = "img,pistachio-pwm",
++ .data = &pistachio_pwm,
++ },
++ { }
++};
++MODULE_DEVICE_TABLE(of, img_pwm_of_match);
++
+ static int img_pwm_probe(struct platform_device *pdev)
+ {
+ int ret;
++ u64 val;
++ unsigned long clk_rate;
+ struct resource *res;
+ struct img_pwm_chip *pwm;
++ const struct of_device_id *of_dev_id;
+
+ pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+ if (!pwm)
+@@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev)
+ if (IS_ERR(pwm->base))
+ return PTR_ERR(pwm->base);
+
++ of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
++ if (!of_dev_id)
++ return -ENODEV;
++ pwm->data = of_dev_id->data;
++
+ pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "img,cr-periph");
+ if (IS_ERR(pwm->periph_regs))
+@@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev)
+ goto disable_sysclk;
+ }
+
++ clk_rate = clk_get_rate(pwm->pwm_clk);
++
++ /* The maximum input clock divider is 512 */
++ val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
++ do_div(val, clk_rate);
++ pwm->max_period_ns = val;
++
++ val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
++ do_div(val, clk_rate);
++ pwm->min_period_ns = val;
++
+ pwm->chip.dev = &pdev->dev;
+ pwm->chip.ops = &img_pwm_ops;
+ pwm->chip.base = -1;
+@@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev)
+ return pwmchip_remove(&pwm_chip->chip);
+ }
+
+-static const struct of_device_id img_pwm_of_match[] = {
+- { .compatible = "img,pistachio-pwm", },
+- { }
+-};
+-MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+-
+ static struct platform_driver img_pwm_driver = {
+ .driver = {
+ .name = "img-pwm",
+diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
+index 8a4df7a1f2ee..e628d4c2f2ae 100644
+--- a/drivers/regulator/da9052-regulator.c
++++ b/drivers/regulator/da9052-regulator.c
+@@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
+
+ static int da9052_regulator_probe(struct platform_device *pdev)
+ {
++ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ struct regulator_config config = { };
+ struct da9052_regulator *regulator;
+ struct da9052 *da9052;
+@@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
+ regulator->da9052 = da9052;
+
+ regulator->info = find_regulator_info(regulator->da9052->chip_id,
+- pdev->id);
++ cell->id);
+ if (regulator->info == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+@@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
+ config.driver_data = regulator;
+ config.regmap = da9052->regmap;
+ if (pdata && pdata->regulators) {
+- config.init_data = pdata->regulators[pdev->id];
++ config.init_data = pdata->regulators[cell->id];
+ } else {
+ #ifdef CONFIG_OF
+ struct device_node *nproot = da9052->dev->of_node;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 3290a3ed5b31..a661d339adf7 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1624,6 +1624,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
+ {
+ u64 start_lba = blk_rq_pos(scmd->request);
+ u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
++ u64 factor = scmd->device->sector_size / 512;
+ u64 bad_lba;
+ int info_valid;
+ /*
+@@ -1645,16 +1646,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
+ if (scsi_bufflen(scmd) <= scmd->device->sector_size)
+ return 0;
+
+- if (scmd->device->sector_size < 512) {
+- /* only legitimate sector_size here is 256 */
+- start_lba <<= 1;
+- end_lba <<= 1;
+- } else {
+- /* be careful ... don't want any overflows */
+- unsigned int factor = scmd->device->sector_size / 512;
+- do_div(start_lba, factor);
+- do_div(end_lba, factor);
+- }
++ /* be careful ... don't want any overflows */
++ do_div(start_lba, factor);
++ do_div(end_lba, factor);
+
+ /* The bad lba was reported incorrectly, we have no idea where
+ * the error is.
+@@ -2212,8 +2206,7 @@ got_data:
+ if (sector_size != 512 &&
+ sector_size != 1024 &&
+ sector_size != 2048 &&
+- sector_size != 4096 &&
+- sector_size != 256) {
++ sector_size != 4096) {
+ sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
+ sector_size);
+ /*
+@@ -2268,8 +2261,6 @@ got_data:
+ sdkp->capacity <<= 2;
+ else if (sector_size == 1024)
+ sdkp->capacity <<= 1;
+- else if (sector_size == 256)
+- sdkp->capacity >>= 1;
+
+ blk_queue_physical_block_size(sdp->request_queue,
+ sdkp->physical_block_size);
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index bf8c5c1e254e..75efaaeb0eca 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1565,8 +1565,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ break;
+ default:
+ vm_srb->data_in = UNKNOWN_TYPE;
+- vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
+- SRB_FLAGS_DATA_OUT);
++ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
+ break;
+ }
+
+diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
+index d1ab996b3305..a21a51efaad0 100644
+--- a/drivers/staging/gdm724x/gdm_mux.c
++++ b/drivers/staging/gdm724x/gdm_mux.c
+@@ -158,7 +158,7 @@ static int up_to_host(struct mux_rx *r)
+ unsigned int start_flag;
+ unsigned int payload_size;
+ unsigned short packet_type;
+- int dummy_cnt;
++ int total_len;
+ u32 packet_size_sum = r->offset;
+ int index;
+ int ret = TO_HOST_INVALID_PACKET;
+@@ -176,10 +176,10 @@ static int up_to_host(struct mux_rx *r)
+ break;
+ }
+
+- dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
++ total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
+
+ if (len - packet_size_sum <
+- MUX_HEADER_SIZE + payload_size + dummy_cnt) {
++ total_len) {
+ pr_err("invalid payload : %d %d %04x\n",
+ payload_size, len, packet_type);
+ break;
+@@ -202,7 +202,7 @@ static int up_to_host(struct mux_rx *r)
+ break;
+ }
+
+- packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
++ packet_size_sum += total_len;
+ if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
+ ret = r->callback(NULL,
+ 0,
+@@ -361,7 +361,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
+ struct mux_pkt_header *mux_header;
+ struct mux_tx *t = NULL;
+ static u32 seq_num = 1;
+- int dummy_cnt;
+ int total_len;
+ int ret;
+ unsigned long flags;
+@@ -374,9 +373,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
+
+ spin_lock_irqsave(&mux_dev->write_lock, flags);
+
+- dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
+-
+- total_len = len + MUX_HEADER_SIZE + dummy_cnt;
++ total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
+
+ t = alloc_mux_tx(total_len);
+ if (!t) {
+@@ -392,7 +389,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
+ mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
+
+ memcpy(t->buf+MUX_HEADER_SIZE, data, len);
+- memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
++ memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE -
++ len);
+
+ t->len = total_len;
+ t->callback = cb;
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index 03b2a90b9ac0..992236f605d8 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -911,7 +911,11 @@ static int vnt_int_report_rate(struct vnt_private *priv,
+
+ if (!(tsr1 & TSR1_TERR)) {
+ info->status.rates[0].idx = idx;
+- info->flags |= IEEE80211_TX_STAT_ACK;
++
++ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
++ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
++ else
++ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ return 0;
+@@ -936,9 +940,6 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
+ //Only the status of first TD in the chain is correct
+ if (pTD->m_td1TD1.byTCR & TCR_STP) {
+ if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
+-
+- vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
+-
+ if (!(byTsr1 & TSR1_TERR)) {
+ if (byTsr0 != 0) {
+ pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
+@@ -957,6 +958,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
+ (int)uIdx, byTsr1, byTsr0);
+ }
+ }
++
++ vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
++
+ device_free_tx_buf(pDevice, pTD);
+ pDevice->iTDUsed[uIdx]--;
+ }
+@@ -988,10 +992,8 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
+ PCI_DMA_TODEVICE);
+ }
+
+- if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
++ if (skb)
+ ieee80211_tx_status_irqsafe(pDevice->hw, skb);
+- else
+- dev_kfree_skb_irq(skb);
+
+ pTDInfo->skb_dma = 0;
+ pTDInfo->skb = NULL;
+@@ -1201,14 +1203,6 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
+ if (dma_idx == TYPE_AC0DMA)
+ head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
+
+- priv->iTDUsed[dma_idx]++;
+-
+- /* Take ownership */
+- wmb();
+- head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
+-
+- /* get Next */
+- wmb();
+ priv->apCurrTD[dma_idx] = head_td->next;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1229,11 +1223,18 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
+
+ head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
+
++ /* Poll Transmit the adapter */
++ wmb();
++ head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
++ wmb(); /* second memory barrier */
++
+ if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
+ MACvTransmitAC0(priv->PortOffset);
+ else
+ MACvTransmit0(priv->PortOffset);
+
++ priv->iTDUsed[dma_idx]++;
++
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+@@ -1413,9 +1414,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+
+ priv->current_aid = conf->aid;
+
+- if (changed & BSS_CHANGED_BSSID)
++ if (changed & BSS_CHANGED_BSSID) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->lock, flags);
++
+ MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
+
++ spin_unlock_irqrestore(&priv->lock, flags);
++ }
++
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ priv->basic_rates = conf->basic_rates;
+
+diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
+index 33baf26de4b5..ee9ce165dcde 100644
+--- a/drivers/staging/vt6656/rxtx.c
++++ b/drivers/staging/vt6656/rxtx.c
+@@ -805,10 +805,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
+ vnt_schedule_command(priv, WLAN_CMD_SETPOWER);
+ }
+
+- if (current_rate > RATE_11M)
+- pkt_type = priv->packet_type;
+- else
++ if (current_rate > RATE_11M) {
++ if (info->band == IEEE80211_BAND_5GHZ) {
++ pkt_type = PK_TYPE_11A;
++ } else {
++ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
++ pkt_type = PK_TYPE_11GB;
++ else
++ pkt_type = PK_TYPE_11GA;
++ }
++ } else {
+ pkt_type = PK_TYPE_11B;
++ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index f6c954c4635f..4073869d2090 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
+ " pdv_host_id: %d\n", pdv->pdv_host_id);
+ return -EINVAL;
+ }
++ pdv->pdv_lld_host = sh;
+ }
+ } else {
+ if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
+@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev)
+ if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+ (phv->phv_lld_host != NULL))
+ scsi_host_put(phv->phv_lld_host);
++ else if (pdv->pdv_lld_host)
++ scsi_host_put(pdv->pdv_lld_host);
+
+ if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+ scsi_device_put(sd);
+diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
+index 1bd757dff8ee..820d3052b775 100644
+--- a/drivers/target/target_core_pscsi.h
++++ b/drivers/target/target_core_pscsi.h
+@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
+ int pdv_lun_id;
+ struct block_device *pdv_bd;
+ struct scsi_device *pdv_sd;
++ struct Scsi_Host *pdv_lld_host;
+ } ____cacheline_aligned;
+
+ typedef enum phv_modes {
+diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
+index c2556cf5186b..01255fd65135 100644
+--- a/drivers/thermal/armada_thermal.c
++++ b/drivers/thermal/armada_thermal.c
+@@ -224,9 +224,9 @@ static const struct armada_thermal_data armada380_data = {
+ .is_valid_shift = 10,
+ .temp_shift = 0,
+ .temp_mask = 0x3ff,
+- .coef_b = 1169498786UL,
+- .coef_m = 2000000UL,
+- .coef_div = 4289,
++ .coef_b = 2931108200UL,
++ .coef_m = 5000000UL,
++ .coef_div = 10502,
+ .inverted = true,
+ };
+
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 5bab1c684bb1..7a3d146a5f0e 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void)
+ return -ENOMEM;
+ }
+
+- info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
++ info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
+ info->vtermno = HVC_COOKIE;
+
+ spin_lock(&xencons_lock);
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index c4343764cc5b..bce16e405d59 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -3170,7 +3170,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
+ return gsmtty_modem_update(dlci, encode);
+ }
+
+-static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
++static void gsmtty_cleanup(struct tty_struct *tty)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
+ struct gsm_mux *gsm = dlci->gsm;
+@@ -3178,7 +3178,6 @@ static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
+ dlci_put(dlci);
+ dlci_put(gsm->dlci[0]);
+ mux_put(gsm);
+- driver->ttys[tty->index] = NULL;
+ }
+
+ /* Virtual ttys for the demux */
+@@ -3199,7 +3198,7 @@ static const struct tty_operations gsmtty_ops = {
+ .tiocmget = gsmtty_tiocmget,
+ .tiocmset = gsmtty_tiocmset,
+ .break_ctl = gsmtty_break_ctl,
+- .remove = gsmtty_remove,
++ .cleanup = gsmtty_cleanup,
+ };
+
+
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index 644ddb841d9f..bbc4ce66c2c1 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ add_wait_queue(&tty->read_wait, &wait);
+
+ for (;;) {
+- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
++ if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
+ ret = -EIO;
+ break;
+ }
+@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+ /* set bits for operations that won't block */
+ if (n_hdlc->rx_buf_list.head)
+ mask |= POLLIN | POLLRDNORM; /* readable */
+- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
++ if (test_bit(TTY_OTHER_DONE, &tty->flags))
+ mask |= POLLHUP;
+ if (tty_hung_up_p(filp))
+ mask |= POLLHUP;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index cf6e0f2e1331..cc57a3a6b02b 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1949,6 +1949,18 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+ return ldata->commit_head - ldata->read_tail >= amt;
+ }
+
++static inline int check_other_done(struct tty_struct *tty)
++{
++ int done = test_bit(TTY_OTHER_DONE, &tty->flags);
++ if (done) {
++ /* paired with cmpxchg() in check_other_closed(); ensures
++ * read buffer head index is not stale
++ */
++ smp_mb__after_atomic();
++ }
++ return done;
++}
++
+ /**
+ * copy_from_read_buf - copy read data directly
+ * @tty: terminal device
+@@ -2167,7 +2179,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ struct n_tty_data *ldata = tty->disc_data;
+ unsigned char __user *b = buf;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+- int c;
++ int c, done;
+ int minimum, time;
+ ssize_t retval = 0;
+ long timeout;