summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2015-06-23 08:54:37 -0400
committerMike Pagano <mpagano@gentoo.org>2015-06-23 08:54:37 -0400
commit3b05debcd4dd78810c90dc8e2a07b7843275df2c (patch)
tree4db220e4fbf853eb9935e997794348745836d200
parentAdd check to saved_root_name for supported filesystem path naming. (diff)
downloadlinux-patches-3b05debcd4dd78810c90dc8e2a07b7843275df2c.tar.gz
linux-patches-3b05debcd4dd78810c90dc8e2a07b7843275df2c.tar.bz2
linux-patches-3b05debcd4dd78810c90dc8e2a07b7843275df2c.zip
Clean up master
-rw-r--r--1000_linux-4.0.1.patch479
-rw-r--r--1001_linux-4.0.2.patch8587
-rw-r--r--1002_linux-4.0.3.patch2827
-rw-r--r--1003_linux-4.0.4.patch2713
-rw-r--r--1004_linux-4.0.5.patch4937
-rw-r--r--1500_XATTR_USER_PREFIX.patch54
-rw-r--r--1510_fs-enable-link-security-restrictions-by-default.patch22
-rw-r--r--2600_select-REGMAP_IRQ-for-rt5033.patch30
-rw-r--r--2700_ThinkPad-30-brightness-control-fix.patch67
-rw-r--r--2900_dev-root-proc-mount-fix.patch30
-rw-r--r--2905_2disk-resume-image-fix.patch24
-rw-r--r--2910_lz4-compression-fix.patch30
-rw-r--r--4200_fbcondecor-3.19.patch2119
-rw-r--r--5000_enable-additional-cpu-optimizations-for-gcc.patch327
-rw-r--r--5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch104
-rw-r--r--5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch16966
-rw-r--r--5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch1222
-rw-r--r--5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch402
18 files changed, 0 insertions, 30940 deletions
diff --git a/1000_linux-4.0.1.patch b/1000_linux-4.0.1.patch
deleted file mode 100644
index ac585520..00000000
--- a/1000_linux-4.0.1.patch
+++ /dev/null
@@ -1,479 +0,0 @@
-diff --git a/Makefile b/Makefile
-index fbd43bfe4445..f499cd2f5738 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 0
--SUBLEVEL = 0
-+SUBLEVEL = 1
- EXTRAVERSION =
- NAME = Hurr durr I'ma sheep
-
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
-index 4085c4b31047..355d5fea5be9 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
-@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
- struct napi_struct napi;
-
- #ifdef CONFIG_NET_RX_BUSY_POLL
-- unsigned int state;
--#define BNX2X_FP_STATE_IDLE 0
--#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
--#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
--#define BNX2X_FP_STATE_DISABLED (1 << 2)
--#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
--#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
--#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
--#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
--#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
--#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
-- /* protect state */
-- spinlock_t lock;
--#endif /* CONFIG_NET_RX_BUSY_POLL */
-+ unsigned long busy_poll_state;
-+#endif
-
- union host_hc_status_block status_blk;
- /* chip independent shortcuts into sb structure */
-@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
- #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
-
- #ifdef CONFIG_NET_RX_BUSY_POLL
--static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
-+
-+enum bnx2x_fp_state {
-+ BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
-+
-+ BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
-+ BNX2X_STATE_FP_NAPI_REQ = BIT(1),
-+
-+ BNX2X_STATE_FP_POLL_BIT = 2,
-+ BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
-+
-+ BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
-+};
-+
-+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
- {
-- spin_lock_init(&fp->lock);
-- fp->state = BNX2X_FP_STATE_IDLE;
-+ WRITE_ONCE(fp->busy_poll_state, 0);
- }
-
- /* called from the device poll routine to get ownership of a FP */
- static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
- {
-- bool rc = true;
--
-- spin_lock_bh(&fp->lock);
-- if (fp->state & BNX2X_FP_LOCKED) {
-- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
-- fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
-- rc = false;
-- } else {
-- /* we don't care if someone yielded */
-- fp->state = BNX2X_FP_STATE_NAPI;
-+ unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
-+
-+ while (1) {
-+ switch (old) {
-+ case BNX2X_STATE_FP_POLL:
-+ /* make sure bnx2x_fp_lock_poll() wont starve us */
-+ set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
-+ &fp->busy_poll_state);
-+ /* fallthrough */
-+ case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
-+ return false;
-+ default:
-+ break;
-+ }
-+ prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
-+ if (unlikely(prev != old)) {
-+ old = prev;
-+ continue;
-+ }
-+ return true;
- }
-- spin_unlock_bh(&fp->lock);
-- return rc;
- }
-
--/* returns true is someone tried to get the FP while napi had it */
--static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
- {
-- bool rc = false;
--
-- spin_lock_bh(&fp->lock);
-- WARN_ON(fp->state &
-- (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
--
-- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
-- rc = true;
--
-- /* state ==> idle, unless currently disabled */
-- fp->state &= BNX2X_FP_STATE_DISABLED;
-- spin_unlock_bh(&fp->lock);
-- return rc;
-+ smp_wmb();
-+ fp->busy_poll_state = 0;
- }
-
- /* called from bnx2x_low_latency_poll() */
- static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
- {
-- bool rc = true;
--
-- spin_lock_bh(&fp->lock);
-- if ((fp->state & BNX2X_FP_LOCKED)) {
-- fp->state |= BNX2X_FP_STATE_POLL_YIELD;
-- rc = false;
-- } else {
-- /* preserve yield marks */
-- fp->state |= BNX2X_FP_STATE_POLL;
-- }
-- spin_unlock_bh(&fp->lock);
-- return rc;
-+ return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
- }
-
--/* returns true if someone tried to get the FP while it was locked */
--static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
- {
-- bool rc = false;
--
-- spin_lock_bh(&fp->lock);
-- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
--
-- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
-- rc = true;
--
-- /* state ==> idle, unless currently disabled */
-- fp->state &= BNX2X_FP_STATE_DISABLED;
-- spin_unlock_bh(&fp->lock);
-- return rc;
-+ smp_mb__before_atomic();
-+ clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
- }
-
--/* true if a socket is polling, even if it did not get the lock */
-+/* true if a socket is polling */
- static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
- {
-- WARN_ON(!(fp->state & BNX2X_FP_OWNED));
-- return fp->state & BNX2X_FP_USER_PEND;
-+ return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
- }
-
- /* false if fp is currently owned */
- static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
- {
-- int rc = true;
--
-- spin_lock_bh(&fp->lock);
-- if (fp->state & BNX2X_FP_OWNED)
-- rc = false;
-- fp->state |= BNX2X_FP_STATE_DISABLED;
-- spin_unlock_bh(&fp->lock);
-+ set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
-+ return !bnx2x_fp_ll_polling(fp);
-
-- return rc;
- }
- #else
--static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
- {
- }
-
-@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
- return true;
- }
-
--static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
- {
-- return false;
- }
-
- static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
-@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
- return false;
- }
-
--static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
- {
-- return false;
- }
-
- static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-index 0a9faa134a9a..2f63467bce46 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
- int i;
-
- for_each_rx_queue_cnic(bp, i) {
-- bnx2x_fp_init_lock(&bp->fp[i]);
-+ bnx2x_fp_busy_poll_init(&bp->fp[i]);
- napi_enable(&bnx2x_fp(bp, i, napi));
- }
- }
-@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
- int i;
-
- for_each_eth_queue(bp, i) {
-- bnx2x_fp_init_lock(&bp->fp[i]);
-+ bnx2x_fp_busy_poll_init(&bp->fp[i]);
- napi_enable(&bnx2x_fp(bp, i, napi));
- }
- }
-@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
- }
- }
-
-+ bnx2x_fp_unlock_napi(fp);
-+
- /* Fall out from the NAPI loop if needed */
-- if (!bnx2x_fp_unlock_napi(fp) &&
-- !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-
- /* No need to update SB for FCoE L2 ring as long as
- * it's connected to the default SB and the SB
-diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index f8528a4cf54f..fceb637efd6b 100644
---- a/drivers/net/vxlan.c
-+++ b/drivers/net/vxlan.c
-@@ -1713,12 +1713,6 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
- }
- }
-
-- skb = iptunnel_handle_offloads(skb, udp_sum, type);
-- if (IS_ERR(skb)) {
-- err = -EINVAL;
-- goto err;
-- }
--
- skb_scrub_packet(skb, xnet);
-
- min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
-@@ -1738,6 +1732,12 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
- goto err;
- }
-
-+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
-+ if (IS_ERR(skb)) {
-+ err = -EINVAL;
-+ goto err;
-+ }
-+
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = md->vni;
-@@ -1798,10 +1798,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
- }
- }
-
-- skb = iptunnel_handle_offloads(skb, udp_sum, type);
-- if (IS_ERR(skb))
-- return PTR_ERR(skb);
--
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + VXLAN_HLEN + sizeof(struct iphdr)
- + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-@@ -1817,6 +1813,10 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
- if (WARN_ON(!skb))
- return -ENOMEM;
-
-+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
-+ if (IS_ERR(skb))
-+ return PTR_ERR(skb);
-+
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = md->vni;
-diff --git a/fs/exec.c b/fs/exec.c
-index c7f9b733406d..00400cf522dc 100644
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -1265,6 +1265,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
- spin_unlock(&p->fs->lock);
- }
-
-+static void bprm_fill_uid(struct linux_binprm *bprm)
-+{
-+ struct inode *inode;
-+ unsigned int mode;
-+ kuid_t uid;
-+ kgid_t gid;
-+
-+ /* clear any previous set[ug]id data from a previous binary */
-+ bprm->cred->euid = current_euid();
-+ bprm->cred->egid = current_egid();
-+
-+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
-+ return;
-+
-+ if (task_no_new_privs(current))
-+ return;
-+
-+ inode = file_inode(bprm->file);
-+ mode = READ_ONCE(inode->i_mode);
-+ if (!(mode & (S_ISUID|S_ISGID)))
-+ return;
-+
-+ /* Be careful if suid/sgid is set */
-+ mutex_lock(&inode->i_mutex);
-+
-+ /* reload atomically mode/uid/gid now that lock held */
-+ mode = inode->i_mode;
-+ uid = inode->i_uid;
-+ gid = inode->i_gid;
-+ mutex_unlock(&inode->i_mutex);
-+
-+ /* We ignore suid/sgid if there are no mappings for them in the ns */
-+ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
-+ !kgid_has_mapping(bprm->cred->user_ns, gid))
-+ return;
-+
-+ if (mode & S_ISUID) {
-+ bprm->per_clear |= PER_CLEAR_ON_SETID;
-+ bprm->cred->euid = uid;
-+ }
-+
-+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
-+ bprm->per_clear |= PER_CLEAR_ON_SETID;
-+ bprm->cred->egid = gid;
-+ }
-+}
-+
- /*
- * Fill the binprm structure from the inode.
- * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
-@@ -1273,36 +1320,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
- */
- int prepare_binprm(struct linux_binprm *bprm)
- {
-- struct inode *inode = file_inode(bprm->file);
-- umode_t mode = inode->i_mode;
- int retval;
-
--
-- /* clear any previous set[ug]id data from a previous binary */
-- bprm->cred->euid = current_euid();
-- bprm->cred->egid = current_egid();
--
-- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
-- !task_no_new_privs(current) &&
-- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
-- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
-- /* Set-uid? */
-- if (mode & S_ISUID) {
-- bprm->per_clear |= PER_CLEAR_ON_SETID;
-- bprm->cred->euid = inode->i_uid;
-- }
--
-- /* Set-gid? */
-- /*
-- * If setgid is set but no group execute bit then this
-- * is a candidate for mandatory locking, not a setgid
-- * executable.
-- */
-- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
-- bprm->per_clear |= PER_CLEAR_ON_SETID;
-- bprm->cred->egid = inode->i_gid;
-- }
-- }
-+ bprm_fill_uid(bprm);
-
- /* fill in binprm security blob */
- retval = security_bprm_set_creds(bprm);
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
-index a28e09c7825d..36508e69e92a 100644
---- a/kernel/bpf/verifier.c
-+++ b/kernel/bpf/verifier.c
-@@ -1380,7 +1380,8 @@ peek_stack:
- /* tell verifier to check for equivalent states
- * after every call and jump
- */
-- env->explored_states[t + 1] = STATE_LIST_MARK;
-+ if (t + 1 < insn_cnt)
-+ env->explored_states[t + 1] = STATE_LIST_MARK;
- } else {
- /* conditional jump with two edges */
- ret = push_insn(t, t + 1, FALLTHROUGH, env);
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 8e4ac97c8477..98d45fe72f51 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -4169,19 +4169,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
- */
- void skb_scrub_packet(struct sk_buff *skb, bool xnet)
- {
-- if (xnet)
-- skb_orphan(skb);
- skb->tstamp.tv64 = 0;
- skb->pkt_type = PACKET_HOST;
- skb->skb_iif = 0;
- skb->ignore_df = 0;
- skb_dst_drop(skb);
-- skb->mark = 0;
- skb_sender_cpu_clear(skb);
-- skb_init_secmark(skb);
- secpath_reset(skb);
- nf_reset(skb);
- nf_reset_trace(skb);
-+
-+ if (!xnet)
-+ return;
-+
-+ skb_orphan(skb);
-+ skb->mark = 0;
- }
- EXPORT_SYMBOL_GPL(skb_scrub_packet);
-
-diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
-index 5a4828ba05ad..a566a2e4715b 100644
---- a/net/ipv4/geneve.c
-+++ b/net/ipv4/geneve.c
-@@ -113,10 +113,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- int min_headroom;
- int err;
-
-- skb = udp_tunnel_handle_offloads(skb, csum);
-- if (IS_ERR(skb))
-- return PTR_ERR(skb);
--
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
- + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-@@ -131,6 +127,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- if (unlikely(!skb))
- return -ENOMEM;
-
-+ skb = udp_tunnel_handle_offloads(skb, csum);
-+ if (IS_ERR(skb))
-+ return PTR_ERR(skb);
-+
- gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
- geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
-
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 1db253e36045..d520492ba698 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -2929,6 +2929,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
- }
- #endif
-
-+ /* Do not fool tcpdump (if any), clean our debris */
-+ skb->tstamp.tv64 = 0;
- return skb;
- }
- EXPORT_SYMBOL(tcp_make_synack);
diff --git a/1001_linux-4.0.2.patch b/1001_linux-4.0.2.patch
deleted file mode 100644
index 38a75b2f..00000000
--- a/1001_linux-4.0.2.patch
+++ /dev/null
@@ -1,8587 +0,0 @@
-diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
-index 99ca40e..5c204df 100644
---- a/Documentation/networking/scaling.txt
-+++ b/Documentation/networking/scaling.txt
-@@ -282,7 +282,7 @@ following is true:
-
- - The current CPU's queue head counter >= the recorded tail counter
- value in rps_dev_flow[i]
--- The current CPU is unset (equal to RPS_NO_CPU)
-+- The current CPU is unset (>= nr_cpu_ids)
- - The current CPU is offline
-
- After this check, the packet is sent to the (possibly updated) current
-diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
-index 4ceef53..d1ad9d5 100644
---- a/Documentation/virtual/kvm/devices/s390_flic.txt
-+++ b/Documentation/virtual/kvm/devices/s390_flic.txt
-@@ -27,6 +27,9 @@ Groups:
- Copies all floating interrupts into a buffer provided by userspace.
- When the buffer is too small it returns -ENOMEM, which is the indication
- for userspace to try again with a bigger buffer.
-+ -ENOBUFS is returned when the allocation of a kernelspace buffer has
-+ failed.
-+ -EFAULT is returned when copying data to userspace failed.
- All interrupts remain pending, i.e. are not deleted from the list of
- currently pending interrupts.
- attr->addr contains the userspace address of the buffer into which all
-diff --git a/Makefile b/Makefile
-index f499cd2..0649a60 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 0
--SUBLEVEL = 1
-+SUBLEVEL = 2
- EXTRAVERSION =
- NAME = Hurr durr I'ma sheep
-
-diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
-index fec1fca..6c4bc53 100644
---- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
-+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
-@@ -167,7 +167,13 @@
-
- macb1: ethernet@f802c000 {
- phy-mode = "rmii";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
- status = "okay";
-+
-+ ethernet-phy@1 {
-+ reg = <0x1>;
-+ };
- };
-
- dbgu: serial@ffffee00 {
-diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
-index a5441d5..3cc8b83 100644
---- a/arch/arm/boot/dts/dove.dtsi
-+++ b/arch/arm/boot/dts/dove.dtsi
-@@ -154,7 +154,7 @@
-
- uart2: serial@12200 {
- compatible = "ns16550a";
-- reg = <0x12000 0x100>;
-+ reg = <0x12200 0x100>;
- reg-shift = <2>;
- interrupts = <9>;
- clocks = <&core_clk 0>;
-@@ -163,7 +163,7 @@
-
- uart3: serial@12300 {
- compatible = "ns16550a";
-- reg = <0x12100 0x100>;
-+ reg = <0x12300 0x100>;
- reg-shift = <2>;
- interrupts = <10>;
- clocks = <&core_clk 0>;
-diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
-index f027754..c41600e 100644
---- a/arch/arm/boot/dts/exynos5250-spring.dts
-+++ b/arch/arm/boot/dts/exynos5250-spring.dts
-@@ -429,7 +429,6 @@
- &mmc_0 {
- status = "okay";
- num-slots = <1>;
-- supports-highspeed;
- broken-cd;
- card-detect-delay = <200>;
- samsung,dw-mshc-ciu-div = <3>;
-@@ -437,11 +436,8 @@
- samsung,dw-mshc-ddr-timing = <1 2>;
- pinctrl-names = "default";
- pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>;
--
-- slot@0 {
-- reg = <0>;
-- bus-width = <8>;
-- };
-+ bus-width = <8>;
-+ cap-mmc-highspeed;
- };
-
- /*
-@@ -451,7 +447,6 @@
- &mmc_1 {
- status = "okay";
- num-slots = <1>;
-- supports-highspeed;
- broken-cd;
- card-detect-delay = <200>;
- samsung,dw-mshc-ciu-div = <3>;
-@@ -459,11 +454,8 @@
- samsung,dw-mshc-ddr-timing = <1 2>;
- pinctrl-names = "default";
- pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>;
--
-- slot@0 {
-- reg = <0>;
-- bus-width = <4>;
-- };
-+ bus-width = <4>;
-+ cap-sd-highspeed;
- };
-
- &pinctrl_0 {
-diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index afb9caf..674d03f 100644
---- a/arch/arm/include/asm/elf.h
-+++ b/arch/arm/include/asm/elf.h
-@@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
-+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-
- /* When the program starts, a1 contains a pointer to a function to be
- registered with atexit, as per the SVR4 ABI. A value of 0 means we
-diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
-index 0db25bc..3a42ac6 100644
---- a/arch/arm/include/uapi/asm/kvm.h
-+++ b/arch/arm/include/uapi/asm/kvm.h
-@@ -195,8 +195,14 @@ struct kvm_arch_memory_slot {
- #define KVM_ARM_IRQ_CPU_IRQ 0
- #define KVM_ARM_IRQ_CPU_FIQ 1
-
--/* Highest supported SPI, from VGIC_NR_IRQS */
-+/*
-+ * This used to hold the highest supported SPI, but it is now obsolete
-+ * and only here to provide source code level compatibility with older
-+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
-+ */
-+#ifndef __KERNEL__
- #define KVM_ARM_IRQ_GIC_MAX 127
-+#endif
-
- /* PSCI interface */
- #define KVM_PSCI_FN_BASE 0x95c1ba5e
-diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
-index c4cc50e..cfb354f 100644
---- a/arch/arm/kernel/hibernate.c
-+++ b/arch/arm/kernel/hibernate.c
-@@ -22,6 +22,7 @@
- #include <asm/suspend.h>
- #include <asm/memory.h>
- #include <asm/sections.h>
-+#include "reboot.h"
-
- int pfn_is_nosave(unsigned long pfn)
- {
-@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
-
- ret = swsusp_save();
- if (ret == 0)
-- soft_restart(virt_to_phys(cpu_resume));
-+ _soft_restart(virt_to_phys(cpu_resume), false);
- return ret;
- }
-
-@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
- for (pbe = restore_pblist; pbe; pbe = pbe->next)
- copy_page(pbe->orig_address, pbe->address);
-
-- soft_restart(virt_to_phys(cpu_resume));
-+ _soft_restart(virt_to_phys(cpu_resume), false);
- }
-
- static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
-diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index fdfa3a7..2bf1a16 100644
---- a/arch/arm/kernel/process.c
-+++ b/arch/arm/kernel/process.c
-@@ -41,6 +41,7 @@
- #include <asm/system_misc.h>
- #include <asm/mach/time.h>
- #include <asm/tls.h>
-+#include "reboot.h"
-
- #ifdef CONFIG_CC_STACKPROTECTOR
- #include <linux/stackprotector.h>
-@@ -95,7 +96,7 @@ static void __soft_restart(void *addr)
- BUG();
- }
-
--void soft_restart(unsigned long addr)
-+void _soft_restart(unsigned long addr, bool disable_l2)
- {
- u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
-
-@@ -104,7 +105,7 @@ void soft_restart(unsigned long addr)
- local_fiq_disable();
-
- /* Disable the L2 if we're the last man standing. */
-- if (num_online_cpus() == 1)
-+ if (disable_l2)
- outer_disable();
-
- /* Change to the new stack and continue with the reset. */
-@@ -114,6 +115,11 @@ void soft_restart(unsigned long addr)
- BUG();
- }
-
-+void soft_restart(unsigned long addr)
-+{
-+ _soft_restart(addr, num_online_cpus() == 1);
-+}
-+
- /*
- * Function pointers to optional machine specific functions
- */
-diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
-new file mode 100644
-index 0000000..c87f058
---- /dev/null
-+++ b/arch/arm/kernel/reboot.h
-@@ -0,0 +1,6 @@
-+#ifndef REBOOT_H
-+#define REBOOT_H
-+
-+extern void _soft_restart(unsigned long addr, bool disable_l2);
-+
-+#endif
-diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index 5560f74..b652af5 100644
---- a/arch/arm/kvm/arm.c
-+++ b/arch/arm/kvm/arm.c
-@@ -651,8 +651,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
- if (!irqchip_in_kernel(kvm))
- return -ENXIO;
-
-- if (irq_num < VGIC_NR_PRIVATE_IRQS ||
-- irq_num > KVM_ARM_IRQ_GIC_MAX)
-+ if (irq_num < VGIC_NR_PRIVATE_IRQS)
- return -EINVAL;
-
- return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
-diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
-index 8b9f5e2..4f4e222 100644
---- a/arch/arm/mach-mvebu/pmsu.c
-+++ b/arch/arm/mach-mvebu/pmsu.c
-@@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void)
- void __iomem *mpsoc_base;
- u32 reg;
-
-+ pr_warn("CPU idle is currently broken on Armada 38x: disabling");
-+ return 0;
-+
- np = of_find_compatible_node(NULL, NULL,
- "marvell,armada-380-coherency-fabric");
- if (!np)
-@@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void)
- return 0;
- of_node_put(np);
-
-+ /*
-+ * Currently the CPU idle support for Armada 38x is broken, as
-+ * the CPU hotplug uses some of the CPU idle functions it is
-+ * broken too, so let's disable it
-+ */
-+ if (of_machine_is_compatible("marvell,armada380")) {
-+ cpu_hotplug_disable();
-+ pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
-+ }
-+
- if (of_machine_is_compatible("marvell,armadaxp"))
- ret = armada_xp_cpuidle_init();
- else if (of_machine_is_compatible("marvell,armada370"))
-@@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void)
- return ret;
-
- mvebu_v7_pmsu_enable_l2_powerdown_onidle();
-- platform_device_register(&mvebu_v7_cpuidle_device);
-+ if (mvebu_v7_cpuidle_device.name)
-+ platform_device_register(&mvebu_v7_cpuidle_device);
- cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
-
- return 0;
-diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
-index 7bc6668..dcbe17f 100644
---- a/arch/arm/mach-s3c64xx/crag6410.h
-+++ b/arch/arm/mach-s3c64xx/crag6410.h
-@@ -14,6 +14,7 @@
- #include <mach/gpio-samsung.h>
-
- #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
-+#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
-
- #define PCA935X_GPIO_BASE GPIO_BOARD_START
- #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
-diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
-index 10b913b..65c426b 100644
---- a/arch/arm/mach-s3c64xx/mach-crag6410.c
-+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
-@@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {
-
- static struct wm831x_pdata crag_pmic_pdata = {
- .wm831x_num = 1,
-+ .irq_base = BANFF_PMIC_IRQ_BASE,
- .gpio_base = BANFF_PMIC_GPIO_BASE,
- .soft_shutdown = true,
-
-diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 1b8e973..a6186c2 100644
---- a/arch/arm64/Kconfig
-+++ b/arch/arm64/Kconfig
-@@ -361,6 +361,27 @@ config ARM64_ERRATUM_832075
-
- If unsure, say Y.
-
-+config ARM64_ERRATUM_845719
-+ bool "Cortex-A53: 845719: a load might read incorrect data"
-+ depends on COMPAT
-+ default y
-+ help
-+ This option adds an alternative code sequence to work around ARM
-+ erratum 845719 on Cortex-A53 parts up to r0p4.
-+
-+ When running a compat (AArch32) userspace on an affected Cortex-A53
-+ part, a load at EL0 from a virtual address that matches the bottom 32
-+ bits of the virtual address used by a recent load at (AArch64) EL1
-+ might return incorrect data.
-+
-+ The workaround is to write the contextidr_el1 register on exception
-+ return to a 32-bit task.
-+ Please note that this does not necessarily enable the workaround,
-+ as it depends on the alternative framework, which will only patch
-+ the kernel if an affected CPU is detected.
-+
-+ If unsure, say Y.
-+
- endmenu
-
-
-@@ -470,6 +491,10 @@ config HOTPLUG_CPU
-
- source kernel/Kconfig.preempt
-
-+config UP_LATE_INIT
-+ def_bool y
-+ depends on !SMP
-+
- config HZ
- int
- default 100
-diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
-index 69ceedc..4d2a925 100644
---- a/arch/arm64/Makefile
-+++ b/arch/arm64/Makefile
-@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
- core-$(CONFIG_XEN) += arch/arm64/xen/
- core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
- libs-y := arch/arm64/lib/ $(libs-y)
--libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
-+core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
-
- # Default target when executing plain make
- KBUILD_IMAGE := Image.gz
-diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
-index b6c16d5..3f0c53c 100644
---- a/arch/arm64/include/asm/cpufeature.h
-+++ b/arch/arm64/include/asm/cpufeature.h
-@@ -23,8 +23,9 @@
-
- #define ARM64_WORKAROUND_CLEAN_CACHE 0
- #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-+#define ARM64_WORKAROUND_845719 2
-
--#define ARM64_NCAPS 2
-+#define ARM64_NCAPS 3
-
- #ifndef __ASSEMBLY__
-
-diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
-index 59e2823..8dcd61e 100644
---- a/arch/arm64/include/asm/smp_plat.h
-+++ b/arch/arm64/include/asm/smp_plat.h
-@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void)
- extern u64 __cpu_logical_map[NR_CPUS];
- #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
-
-+void __init do_post_cpus_up_work(void);
-+
- #endif /* __ASM_SMP_PLAT_H */
-diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
-index 3ef77a4..bc49a18 100644
---- a/arch/arm64/include/uapi/asm/kvm.h
-+++ b/arch/arm64/include/uapi/asm/kvm.h
-@@ -188,8 +188,14 @@ struct kvm_arch_memory_slot {
- #define KVM_ARM_IRQ_CPU_IRQ 0
- #define KVM_ARM_IRQ_CPU_FIQ 1
-
--/* Highest supported SPI, from VGIC_NR_IRQS */
-+/*
-+ * This used to hold the highest supported SPI, but it is now obsolete
-+ * and only here to provide source code level compatibility with older
-+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
-+ */
-+#ifndef __KERNEL__
- #define KVM_ARM_IRQ_GIC_MAX 127
-+#endif
-
- /* PSCI interface */
- #define KVM_PSCI_FN_BASE 0x95c1ba5e
-diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
-index fa62637..ad6d523 100644
---- a/arch/arm64/kernel/cpu_errata.c
-+++ b/arch/arm64/kernel/cpu_errata.c
-@@ -88,7 +88,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
- /* Cortex-A57 r0p0 - r1p2 */
- .desc = "ARM erratum 832075",
- .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
-- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
-+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
-+ (1 << MIDR_VARIANT_SHIFT) | 2),
-+ },
-+#endif
-+#ifdef CONFIG_ARM64_ERRATUM_845719
-+ {
-+ /* Cortex-A53 r0p[01234] */
-+ .desc = "ARM erratum 845719",
-+ .capability = ARM64_WORKAROUND_845719,
-+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
- },
- #endif
- {
-diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
-index cf21bb3..959fe87 100644
---- a/arch/arm64/kernel/entry.S
-+++ b/arch/arm64/kernel/entry.S
-@@ -21,8 +21,10 @@
- #include <linux/init.h>
- #include <linux/linkage.h>
-
-+#include <asm/alternative-asm.h>
- #include <asm/assembler.h>
- #include <asm/asm-offsets.h>
-+#include <asm/cpufeature.h>
- #include <asm/errno.h>
- #include <asm/esr.h>
- #include <asm/thread_info.h>
-@@ -120,6 +122,24 @@
- ct_user_enter
- ldr x23, [sp, #S_SP] // load return stack pointer
- msr sp_el0, x23
-+
-+#ifdef CONFIG_ARM64_ERRATUM_845719
-+ alternative_insn \
-+ "nop", \
-+ "tbz x22, #4, 1f", \
-+ ARM64_WORKAROUND_845719
-+#ifdef CONFIG_PID_IN_CONTEXTIDR
-+ alternative_insn \
-+ "nop; nop", \
-+ "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
-+ ARM64_WORKAROUND_845719
-+#else
-+ alternative_insn \
-+ "nop", \
-+ "msr contextidr_el1, xzr; 1:", \
-+ ARM64_WORKAROUND_845719
-+#endif
-+#endif
- .endif
- msr elr_el1, x21 // set up the return data
- msr spsr_el1, x22
-diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
-index 07f9305..c237ffb 100644
---- a/arch/arm64/kernel/head.S
-+++ b/arch/arm64/kernel/head.S
-@@ -426,6 +426,7 @@ __create_page_tables:
- */
- mov x0, x25
- add x1, x26, #SWAPPER_DIR_SIZE
-+ dmb sy
- bl __inval_cache_range
-
- mov lr, x27
-diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
-index e8420f6..781f469 100644
---- a/arch/arm64/kernel/setup.c
-+++ b/arch/arm64/kernel/setup.c
-@@ -207,6 +207,18 @@ static void __init smp_build_mpidr_hash(void)
- }
- #endif
-
-+void __init do_post_cpus_up_work(void)
-+{
-+ apply_alternatives_all();
-+}
-+
-+#ifdef CONFIG_UP_LATE_INIT
-+void __init up_late_init(void)
-+{
-+ do_post_cpus_up_work();
-+}
-+#endif /* CONFIG_UP_LATE_INIT */
-+
- static void __init setup_processor(void)
- {
- struct cpu_info *cpu_info;
-diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
-index 328b8ce..4257369 100644
---- a/arch/arm64/kernel/smp.c
-+++ b/arch/arm64/kernel/smp.c
-@@ -309,7 +309,7 @@ void cpu_die(void)
- void __init smp_cpus_done(unsigned int max_cpus)
- {
- pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-- apply_alternatives_all();
-+ do_post_cpus_up_work();
- }
-
- void __init smp_prepare_boot_cpu(void)
-diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
-index 356ee84..04845aa 100644
---- a/arch/c6x/kernel/time.c
-+++ b/arch/c6x/kernel/time.c
-@@ -49,7 +49,7 @@ u64 sched_clock(void)
- return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
- }
-
--void time_init(void)
-+void __init time_init(void)
- {
- u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
-
-diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
-index e41c56e..1e38f0e 100644
---- a/arch/mips/include/asm/asm-eva.h
-+++ b/arch/mips/include/asm/asm-eva.h
-@@ -11,6 +11,36 @@
- #define __ASM_ASM_EVA_H
-
- #ifndef __ASSEMBLY__
-+
-+/* Kernel variants */
-+
-+#define kernel_cache(op, base) "cache " op ", " base "\n"
-+#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
-+#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
-+#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
-+#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
-+#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
-+#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
-+#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
-+#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
-+#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
-+#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
-+#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
-+#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
-+#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
-+
-+#ifdef CONFIG_32BIT
-+/*
-+ * No 'sd' or 'ld' instructions in 32-bit but the code will
-+ * do the correct thing
-+ */
-+#define kernel_sd(reg, addr) user_sw(reg, addr)
-+#define kernel_ld(reg, addr) user_lw(reg, addr)
-+#else
-+#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
-+#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
-+#endif /* CONFIG_32BIT */
-+
- #ifdef CONFIG_EVA
-
- #define __BUILD_EVA_INSN(insn, reg, addr) \
-@@ -41,37 +71,60 @@
-
- #else
-
--#define user_cache(op, base) "cache " op ", " base "\n"
--#define user_ll(reg, addr) "ll " reg ", " addr "\n"
--#define user_sc(reg, addr) "sc " reg ", " addr "\n"
--#define user_lw(reg, addr) "lw " reg ", " addr "\n"
--#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
--#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
--#define user_lh(reg, addr) "lh " reg ", " addr "\n"
--#define user_lb(reg, addr) "lb " reg ", " addr "\n"
--#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
--#define user_sw(reg, addr) "sw " reg ", " addr "\n"
--#define user_swl(reg, addr) "swl " reg ", " addr "\n"
--#define user_swr(reg, addr) "swr " reg ", " addr "\n"
--#define user_sh(reg, addr) "sh " reg ", " addr "\n"
--#define user_sb(reg, addr) "sb " reg ", " addr "\n"
-+#define user_cache(op, base) kernel_cache(op, base)
-+#define user_ll(reg, addr) kernel_ll(reg, addr)
-+#define user_sc(reg, addr) kernel_sc(reg, addr)
-+#define user_lw(reg, addr) kernel_lw(reg, addr)
-+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
-+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
-+#define user_lh(reg, addr) kernel_lh(reg, addr)
-+#define user_lb(reg, addr) kernel_lb(reg, addr)
-+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
-+#define user_sw(reg, addr) kernel_sw(reg, addr)
-+#define user_swl(reg, addr) kernel_swl(reg, addr)
-+#define user_swr(reg, addr) kernel_swr(reg, addr)
-+#define user_sh(reg, addr) kernel_sh(reg, addr)
-+#define user_sb(reg, addr) kernel_sb(reg, addr)
-
- #ifdef CONFIG_32BIT
--/*
-- * No 'sd' or 'ld' instructions in 32-bit but the code will
-- * do the correct thing
-- */
--#define user_sd(reg, addr) user_sw(reg, addr)
--#define user_ld(reg, addr) user_lw(reg, addr)
-+#define user_sd(reg, addr) kernel_sw(reg, addr)
-+#define user_ld(reg, addr) kernel_lw(reg, addr)
- #else
--#define user_sd(reg, addr) "sd " reg", " addr "\n"
--#define user_ld(reg, addr) "ld " reg", " addr "\n"
-+#define user_sd(reg, addr) kernel_sd(reg, addr)
-+#define user_ld(reg, addr) kernel_ld(reg, addr)
- #endif /* CONFIG_32BIT */
-
- #endif /* CONFIG_EVA */
-
- #else /* __ASSEMBLY__ */
-
-+#define kernel_cache(op, base) cache op, base
-+#define kernel_ll(reg, addr) ll reg, addr
-+#define kernel_sc(reg, addr) sc reg, addr
-+#define kernel_lw(reg, addr) lw reg, addr
-+#define kernel_lwl(reg, addr) lwl reg, addr
-+#define kernel_lwr(reg, addr) lwr reg, addr
-+#define kernel_lh(reg, addr) lh reg, addr
-+#define kernel_lb(reg, addr) lb reg, addr
-+#define kernel_lbu(reg, addr) lbu reg, addr
-+#define kernel_sw(reg, addr) sw reg, addr
-+#define kernel_swl(reg, addr) swl reg, addr
-+#define kernel_swr(reg, addr) swr reg, addr
-+#define kernel_sh(reg, addr) sh reg, addr
-+#define kernel_sb(reg, addr) sb reg, addr
-+
-+#ifdef CONFIG_32BIT
-+/*
-+ * No 'sd' or 'ld' instructions in 32-bit but the code will
-+ * do the correct thing
-+ */
-+#define kernel_sd(reg, addr) user_sw(reg, addr)
-+#define kernel_ld(reg, addr) user_lw(reg, addr)
-+#else
-+#define kernel_sd(reg, addr) sd reg, addr
-+#define kernel_ld(reg, addr) ld reg, addr
-+#endif /* CONFIG_32BIT */
-+
- #ifdef CONFIG_EVA
-
- #define __BUILD_EVA_INSN(insn, reg, addr) \
-@@ -101,31 +154,27 @@
- #define user_sd(reg, addr) user_sw(reg, addr)
- #else
-
--#define user_cache(op, base) cache op, base
--#define user_ll(reg, addr) ll reg, addr
--#define user_sc(reg, addr) sc reg, addr
--#define user_lw(reg, addr) lw reg, addr
--#define user_lwl(reg, addr) lwl reg, addr
--#define user_lwr(reg, addr) lwr reg, addr
--#define user_lh(reg, addr) lh reg, addr
--#define user_lb(reg, addr) lb reg, addr
--#define user_lbu(reg, addr) lbu reg, addr
--#define user_sw(reg, addr) sw reg, addr
--#define user_swl(reg, addr) swl reg, addr
--#define user_swr(reg, addr) swr reg, addr
--#define user_sh(reg, addr) sh reg, addr
--#define user_sb(reg, addr) sb reg, addr
-+#define user_cache(op, base) kernel_cache(op, base)
-+#define user_ll(reg, addr) kernel_ll(reg, addr)
-+#define user_sc(reg, addr) kernel_sc(reg, addr)
-+#define user_lw(reg, addr) kernel_lw(reg, addr)
-+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
-+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
-+#define user_lh(reg, addr) kernel_lh(reg, addr)
-+#define user_lb(reg, addr) kernel_lb(reg, addr)
-+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
-+#define user_sw(reg, addr) kernel_sw(reg, addr)
-+#define user_swl(reg, addr) kernel_swl(reg, addr)
-+#define user_swr(reg, addr) kernel_swr(reg, addr)
-+#define user_sh(reg, addr) kernel_sh(reg, addr)
-+#define user_sb(reg, addr) kernel_sb(reg, addr)
-
- #ifdef CONFIG_32BIT
--/*
-- * No 'sd' or 'ld' instructions in 32-bit but the code will
-- * do the correct thing
-- */
--#define user_sd(reg, addr) user_sw(reg, addr)
--#define user_ld(reg, addr) user_lw(reg, addr)
-+#define user_sd(reg, addr) kernel_sw(reg, addr)
-+#define user_ld(reg, addr) kernel_lw(reg, addr)
- #else
--#define user_sd(reg, addr) sd reg, addr
--#define user_ld(reg, addr) ld reg, addr
-+#define user_sd(reg, addr) kernel_sd(reg, addr)
-+#define user_ld(reg, addr) kernel_sd(reg, addr)
- #endif /* CONFIG_32BIT */
-
- #endif /* CONFIG_EVA */
-diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
-index dd083e9..9f26b07 100644
---- a/arch/mips/include/asm/fpu.h
-+++ b/arch/mips/include/asm/fpu.h
-@@ -170,6 +170,7 @@ static inline void lose_fpu(int save)
- }
- disable_msa();
- clear_thread_flag(TIF_USEDMSA);
-+ __disable_fpu();
- } else if (is_fpu_owner()) {
- if (save)
- _save_fp(current);
-diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
-index ac4fc71..f722b05 100644
---- a/arch/mips/include/asm/kvm_host.h
-+++ b/arch/mips/include/asm/kvm_host.h
-@@ -322,6 +322,7 @@ enum mips_mmu_types {
- #define T_TRAP 13 /* Trap instruction */
- #define T_VCEI 14 /* Virtual coherency exception */
- #define T_FPE 15 /* Floating point exception */
-+#define T_MSADIS 21 /* MSA disabled exception */
- #define T_WATCH 23 /* Watch address reference */
- #define T_VCED 31 /* Virtual coherency data */
-
-@@ -578,6 +579,7 @@ struct kvm_mips_callbacks {
- int (*handle_syscall)(struct kvm_vcpu *vcpu);
- int (*handle_res_inst)(struct kvm_vcpu *vcpu);
- int (*handle_break)(struct kvm_vcpu *vcpu);
-+ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
- int (*vm_init)(struct kvm *kvm);
- int (*vcpu_init)(struct kvm_vcpu *vcpu);
- int (*vcpu_setup)(struct kvm_vcpu *vcpu);
-diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
-index bbb6969..7659da2 100644
---- a/arch/mips/kernel/unaligned.c
-+++ b/arch/mips/kernel/unaligned.c
-@@ -109,10 +109,11 @@ static u32 unaligned_action;
- extern void show_registers(struct pt_regs *regs);
-
- #ifdef __BIG_ENDIAN
--#define LoadHW(addr, value, res) \
-+#define _LoadHW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ (".set\tnoat\n" \
-- "1:\t"user_lb("%0", "0(%2)")"\n" \
-- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
-+ "1:\t"type##_lb("%0", "0(%2)")"\n" \
-+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
- "li\t%1, 0\n" \
-@@ -127,13 +128,15 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
- #ifndef CONFIG_CPU_MIPSR6
--#define LoadW(addr, value, res) \
-+#define _LoadW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
-- "1:\t"user_lwl("%0", "(%2)")"\n" \
-- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
-+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
-+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
- "li\t%1, 0\n" \
- "3:\n\t" \
- ".insn\n\t" \
-@@ -146,21 +149,24 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #else
- /* MIPSR6 has no lwl instruction */
--#define LoadW(addr, value, res) \
-+#define _LoadW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n" \
- ".set\tnoat\n\t" \
-- "1:"user_lb("%0", "0(%2)")"\n\t" \
-- "2:"user_lbu("$1", "1(%2)")"\n\t" \
-+ "1:"type##_lb("%0", "0(%2)")"\n\t" \
-+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
-- "3:"user_lbu("$1", "2(%2)")"\n\t" \
-+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
-- "4:"user_lbu("$1", "3(%2)")"\n\t" \
-+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
- "li\t%1, 0\n" \
-@@ -178,14 +184,17 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t4b, 11b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #endif /* CONFIG_CPU_MIPSR6 */
-
--#define LoadHWU(addr, value, res) \
-+#define _LoadHWU(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tnoat\n" \
-- "1:\t"user_lbu("%0", "0(%2)")"\n" \
-- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
-+ "1:\t"type##_lbu("%0", "0(%2)")"\n" \
-+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
- "li\t%1, 0\n" \
-@@ -201,13 +210,15 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
- #ifndef CONFIG_CPU_MIPSR6
--#define LoadWU(addr, value, res) \
-+#define _LoadWU(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
-- "1:\t"user_lwl("%0", "(%2)")"\n" \
-- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
-+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
-+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
- "dsll\t%0, %0, 32\n\t" \
- "dsrl\t%0, %0, 32\n\t" \
- "li\t%1, 0\n" \
-@@ -222,9 +233,11 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
--#define LoadDW(addr, value, res) \
-+#define _LoadDW(addr, value, res) \
-+do { \
- __asm__ __volatile__ ( \
- "1:\tldl\t%0, (%2)\n" \
- "2:\tldr\t%0, 7(%2)\n\t" \
-@@ -240,21 +253,24 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #else
- /* MIPSR6 has not lwl and ldl instructions */
--#define LoadWU(addr, value, res) \
-+#define _LoadWU(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n\t" \
- ".set\tnoat\n\t" \
-- "1:"user_lbu("%0", "0(%2)")"\n\t" \
-- "2:"user_lbu("$1", "1(%2)")"\n\t" \
-+ "1:"type##_lbu("%0", "0(%2)")"\n\t" \
-+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
-- "3:"user_lbu("$1", "2(%2)")"\n\t" \
-+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
-- "4:"user_lbu("$1", "3(%2)")"\n\t" \
-+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
- "li\t%1, 0\n" \
-@@ -272,9 +288,11 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t4b, 11b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
--#define LoadDW(addr, value, res) \
-+#define _LoadDW(addr, value, res) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n\t" \
- ".set\tnoat\n\t" \
-@@ -319,16 +337,19 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t8b, 11b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #endif /* CONFIG_CPU_MIPSR6 */
-
-
--#define StoreHW(addr, value, res) \
-+#define _StoreHW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tnoat\n" \
-- "1:\t"user_sb("%1", "1(%2)")"\n" \
-+ "1:\t"type##_sb("%1", "1(%2)")"\n" \
- "srl\t$1, %1, 0x8\n" \
-- "2:\t"user_sb("$1", "0(%2)")"\n" \
-+ "2:\t"type##_sb("$1", "0(%2)")"\n" \
- ".set\tat\n\t" \
- "li\t%0, 0\n" \
- "3:\n\t" \
-@@ -342,13 +363,15 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=r" (res) \
-- : "r" (value), "r" (addr), "i" (-EFAULT));
-+ : "r" (value), "r" (addr), "i" (-EFAULT));\
-+} while(0)
-
- #ifndef CONFIG_CPU_MIPSR6
--#define StoreW(addr, value, res) \
-+#define _StoreW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
-- "1:\t"user_swl("%1", "(%2)")"\n" \
-- "2:\t"user_swr("%1", "3(%2)")"\n\t" \
-+ "1:\t"type##_swl("%1", "(%2)")"\n" \
-+ "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
- "li\t%0, 0\n" \
- "3:\n\t" \
- ".insn\n\t" \
-@@ -361,9 +384,11 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=r" (res) \
-- : "r" (value), "r" (addr), "i" (-EFAULT));
-+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
--#define StoreDW(addr, value, res) \
-+#define _StoreDW(addr, value, res) \
-+do { \
- __asm__ __volatile__ ( \
- "1:\tsdl\t%1,(%2)\n" \
- "2:\tsdr\t%1, 7(%2)\n\t" \
-@@ -379,20 +404,23 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=r" (res) \
-- : "r" (value), "r" (addr), "i" (-EFAULT));
-+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #else
- /* MIPSR6 has no swl and sdl instructions */
--#define StoreW(addr, value, res) \
-+#define _StoreW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n\t" \
- ".set\tnoat\n\t" \
-- "1:"user_sb("%1", "3(%2)")"\n\t" \
-+ "1:"type##_sb("%1", "3(%2)")"\n\t" \
- "srl\t$1, %1, 0x8\n\t" \
-- "2:"user_sb("$1", "2(%2)")"\n\t" \
-+ "2:"type##_sb("$1", "2(%2)")"\n\t" \
- "srl\t$1, $1, 0x8\n\t" \
-- "3:"user_sb("$1", "1(%2)")"\n\t" \
-+ "3:"type##_sb("$1", "1(%2)")"\n\t" \
- "srl\t$1, $1, 0x8\n\t" \
-- "4:"user_sb("$1", "0(%2)")"\n\t" \
-+ "4:"type##_sb("$1", "0(%2)")"\n\t" \
- ".set\tpop\n\t" \
- "li\t%0, 0\n" \
- "10:\n\t" \
-@@ -409,9 +437,11 @@ extern void show_registers(struct pt_regs *regs);
- ".previous" \
- : "=&r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT) \
-- : "memory");
-+ : "memory"); \
-+} while(0)
-
- #define StoreDW(addr, value, res) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n\t" \
- ".set\tnoat\n\t" \
-@@ -451,15 +481,18 @@ extern void show_registers(struct pt_regs *regs);
- ".previous" \
- : "=&r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT) \
-- : "memory");
-+ : "memory"); \
-+} while(0)
-+
- #endif /* CONFIG_CPU_MIPSR6 */
-
- #else /* __BIG_ENDIAN */
-
--#define LoadHW(addr, value, res) \
-+#define _LoadHW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ (".set\tnoat\n" \
-- "1:\t"user_lb("%0", "1(%2)")"\n" \
-- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
-+ "1:\t"type##_lb("%0", "1(%2)")"\n" \
-+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
- "li\t%1, 0\n" \
-@@ -474,13 +507,15 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
- #ifndef CONFIG_CPU_MIPSR6
--#define LoadW(addr, value, res) \
-+#define _LoadW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
-- "1:\t"user_lwl("%0", "3(%2)")"\n" \
-- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
-+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
-+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
- "li\t%1, 0\n" \
- "3:\n\t" \
- ".insn\n\t" \
-@@ -493,21 +528,24 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #else
- /* MIPSR6 has no lwl instruction */
--#define LoadW(addr, value, res) \
-+#define _LoadW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n" \
- ".set\tnoat\n\t" \
-- "1:"user_lb("%0", "3(%2)")"\n\t" \
-- "2:"user_lbu("$1", "2(%2)")"\n\t" \
-+ "1:"type##_lb("%0", "3(%2)")"\n\t" \
-+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
-- "3:"user_lbu("$1", "1(%2)")"\n\t" \
-+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
-- "4:"user_lbu("$1", "0(%2)")"\n\t" \
-+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
- "li\t%1, 0\n" \
-@@ -525,15 +563,18 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t4b, 11b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #endif /* CONFIG_CPU_MIPSR6 */
-
-
--#define LoadHWU(addr, value, res) \
-+#define _LoadHWU(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tnoat\n" \
-- "1:\t"user_lbu("%0", "1(%2)")"\n" \
-- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
-+ "1:\t"type##_lbu("%0", "1(%2)")"\n" \
-+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
- "li\t%1, 0\n" \
-@@ -549,13 +590,15 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
- #ifndef CONFIG_CPU_MIPSR6
--#define LoadWU(addr, value, res) \
-+#define _LoadWU(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
-- "1:\t"user_lwl("%0", "3(%2)")"\n" \
-- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
-+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
-+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
- "dsll\t%0, %0, 32\n\t" \
- "dsrl\t%0, %0, 32\n\t" \
- "li\t%1, 0\n" \
-@@ -570,9 +613,11 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
--#define LoadDW(addr, value, res) \
-+#define _LoadDW(addr, value, res) \
-+do { \
- __asm__ __volatile__ ( \
- "1:\tldl\t%0, 7(%2)\n" \
- "2:\tldr\t%0, (%2)\n\t" \
-@@ -588,21 +633,24 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #else
- /* MIPSR6 has not lwl and ldl instructions */
--#define LoadWU(addr, value, res) \
-+#define _LoadWU(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n\t" \
- ".set\tnoat\n\t" \
-- "1:"user_lbu("%0", "3(%2)")"\n\t" \
-- "2:"user_lbu("$1", "2(%2)")"\n\t" \
-+ "1:"type##_lbu("%0", "3(%2)")"\n\t" \
-+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
-- "3:"user_lbu("$1", "1(%2)")"\n\t" \
-+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
-- "4:"user_lbu("$1", "0(%2)")"\n\t" \
-+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
- "sll\t%0, 0x8\n\t" \
- "or\t%0, $1\n\t" \
- "li\t%1, 0\n" \
-@@ -620,9 +668,11 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t4b, 11b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
--#define LoadDW(addr, value, res) \
-+#define _LoadDW(addr, value, res) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n\t" \
- ".set\tnoat\n\t" \
-@@ -667,15 +717,17 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t8b, 11b\n\t" \
- ".previous" \
- : "=&r" (value), "=r" (res) \
-- : "r" (addr), "i" (-EFAULT));
-+ : "r" (addr), "i" (-EFAULT)); \
-+} while(0)
- #endif /* CONFIG_CPU_MIPSR6 */
-
--#define StoreHW(addr, value, res) \
-+#define _StoreHW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tnoat\n" \
-- "1:\t"user_sb("%1", "0(%2)")"\n" \
-+ "1:\t"type##_sb("%1", "0(%2)")"\n" \
- "srl\t$1,%1, 0x8\n" \
-- "2:\t"user_sb("$1", "1(%2)")"\n" \
-+ "2:\t"type##_sb("$1", "1(%2)")"\n" \
- ".set\tat\n\t" \
- "li\t%0, 0\n" \
- "3:\n\t" \
-@@ -689,12 +741,15 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=r" (res) \
-- : "r" (value), "r" (addr), "i" (-EFAULT));
-+ : "r" (value), "r" (addr), "i" (-EFAULT));\
-+} while(0)
-+
- #ifndef CONFIG_CPU_MIPSR6
--#define StoreW(addr, value, res) \
-+#define _StoreW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
-- "1:\t"user_swl("%1", "3(%2)")"\n" \
-- "2:\t"user_swr("%1", "(%2)")"\n\t" \
-+ "1:\t"type##_swl("%1", "3(%2)")"\n" \
-+ "2:\t"type##_swr("%1", "(%2)")"\n\t"\
- "li\t%0, 0\n" \
- "3:\n\t" \
- ".insn\n\t" \
-@@ -707,9 +762,11 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=r" (res) \
-- : "r" (value), "r" (addr), "i" (-EFAULT));
-+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-
--#define StoreDW(addr, value, res) \
-+#define _StoreDW(addr, value, res) \
-+do { \
- __asm__ __volatile__ ( \
- "1:\tsdl\t%1, 7(%2)\n" \
- "2:\tsdr\t%1, (%2)\n\t" \
-@@ -725,20 +782,23 @@ extern void show_registers(struct pt_regs *regs);
- STR(PTR)"\t2b, 4b\n\t" \
- ".previous" \
- : "=r" (res) \
-- : "r" (value), "r" (addr), "i" (-EFAULT));
-+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
-+} while(0)
-+
- #else
- /* MIPSR6 has no swl and sdl instructions */
--#define StoreW(addr, value, res) \
-+#define _StoreW(addr, value, res, type) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n\t" \
- ".set\tnoat\n\t" \
-- "1:"user_sb("%1", "0(%2)")"\n\t" \
-+ "1:"type##_sb("%1", "0(%2)")"\n\t" \
- "srl\t$1, %1, 0x8\n\t" \
-- "2:"user_sb("$1", "1(%2)")"\n\t" \
-+ "2:"type##_sb("$1", "1(%2)")"\n\t" \
- "srl\t$1, $1, 0x8\n\t" \
-- "3:"user_sb("$1", "2(%2)")"\n\t" \
-+ "3:"type##_sb("$1", "2(%2)")"\n\t" \
- "srl\t$1, $1, 0x8\n\t" \
-- "4:"user_sb("$1", "3(%2)")"\n\t" \
-+ "4:"type##_sb("$1", "3(%2)")"\n\t" \
- ".set\tpop\n\t" \
- "li\t%0, 0\n" \
- "10:\n\t" \
-@@ -755,9 +815,11 @@ extern void show_registers(struct pt_regs *regs);
- ".previous" \
- : "=&r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT) \
-- : "memory");
-+ : "memory"); \
-+} while(0)
-
--#define StoreDW(addr, value, res) \
-+#define _StoreDW(addr, value, res) \
-+do { \
- __asm__ __volatile__ ( \
- ".set\tpush\n\t" \
- ".set\tnoat\n\t" \
-@@ -797,10 +859,28 @@ extern void show_registers(struct pt_regs *regs);
- ".previous" \
- : "=&r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT) \
-- : "memory");
-+ : "memory"); \
-+} while(0)
-+
- #endif /* CONFIG_CPU_MIPSR6 */
- #endif
-
-+#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
-+#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
-+#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
-+#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
-+#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
-+#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
-+#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
-+#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
-+#define LoadDW(addr, value, res) _LoadDW(addr, value, res)
-+
-+#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
-+#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
-+#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
-+#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
-+#define StoreDW(addr, value, res) _StoreDW(addr, value, res)
-+
- static void emulate_load_store_insn(struct pt_regs *regs,
- void __user *addr, unsigned int __user *pc)
- {
-@@ -872,7 +952,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- set_fs(seg);
- goto sigbus;
- }
-- LoadHW(addr, value, res);
-+ LoadHWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
-@@ -885,7 +965,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- set_fs(seg);
- goto sigbus;
- }
-- LoadW(addr, value, res);
-+ LoadWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
-@@ -898,7 +978,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- set_fs(seg);
- goto sigbus;
- }
-- LoadHWU(addr, value, res);
-+ LoadHWUE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
-@@ -913,7 +993,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- }
- compute_return_epc(regs);
- value = regs->regs[insn.spec3_format.rt];
-- StoreHW(addr, value, res);
-+ StoreHWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
-@@ -926,7 +1006,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- }
- compute_return_epc(regs);
- value = regs->regs[insn.spec3_format.rt];
-- StoreW(addr, value, res);
-+ StoreWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
-@@ -943,7 +1023,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- if (!access_ok(VERIFY_READ, addr, 2))
- goto sigbus;
-
-- LoadHW(addr, value, res);
-+ if (config_enabled(CONFIG_EVA)) {
-+ if (segment_eq(get_fs(), get_ds()))
-+ LoadHW(addr, value, res);
-+ else
-+ LoadHWE(addr, value, res);
-+ } else {
-+ LoadHW(addr, value, res);
-+ }
-+
- if (res)
- goto fault;
- compute_return_epc(regs);
-@@ -954,7 +1042,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- if (!access_ok(VERIFY_READ, addr, 4))
- goto sigbus;
-
-- LoadW(addr, value, res);
-+ if (config_enabled(CONFIG_EVA)) {
-+ if (segment_eq(get_fs(), get_ds()))
-+ LoadW(addr, value, res);
-+ else
-+ LoadWE(addr, value, res);
-+ } else {
-+ LoadW(addr, value, res);
-+ }
-+
- if (res)
- goto fault;
- compute_return_epc(regs);
-@@ -965,7 +1061,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- if (!access_ok(VERIFY_READ, addr, 2))
- goto sigbus;
-
-- LoadHWU(addr, value, res);
-+ if (config_enabled(CONFIG_EVA)) {
-+ if (segment_eq(get_fs(), get_ds()))
-+ LoadHWU(addr, value, res);
-+ else
-+ LoadHWUE(addr, value, res);
-+ } else {
-+ LoadHWU(addr, value, res);
-+ }
-+
- if (res)
- goto fault;
- compute_return_epc(regs);
-@@ -1024,7 +1128,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
-
- compute_return_epc(regs);
- value = regs->regs[insn.i_format.rt];
-- StoreHW(addr, value, res);
-+
-+ if (config_enabled(CONFIG_EVA)) {
-+ if (segment_eq(get_fs(), get_ds()))
-+ StoreHW(addr, value, res);
-+ else
-+ StoreHWE(addr, value, res);
-+ } else {
-+ StoreHW(addr, value, res);
-+ }
-+
- if (res)
- goto fault;
- break;
-@@ -1035,7 +1148,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
-
- compute_return_epc(regs);
- value = regs->regs[insn.i_format.rt];
-- StoreW(addr, value, res);
-+
-+ if (config_enabled(CONFIG_EVA)) {
-+ if (segment_eq(get_fs(), get_ds()))
-+ StoreW(addr, value, res);
-+ else
-+ StoreWE(addr, value, res);
-+ } else {
-+ StoreW(addr, value, res);
-+ }
-+
- if (res)
- goto fault;
- break;
-diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
-index fb3e8df..838d3a6 100644
---- a/arch/mips/kvm/emulate.c
-+++ b/arch/mips/kvm/emulate.c
-@@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
- case T_SYSCALL:
- case T_BREAK:
- case T_RES_INST:
-+ case T_MSADIS:
- break;
-
- case T_COP_UNUSABLE:
-diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
-index c9eccf5..f5e7dda 100644
---- a/arch/mips/kvm/mips.c
-+++ b/arch/mips/kvm/mips.c
-@@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
- ret = kvm_mips_callbacks->handle_break(vcpu);
- break;
-
-+ case T_MSADIS:
-+ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
-+ break;
-+
- default:
- kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
- exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
-diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
-index fd7257b..4372cc8 100644
---- a/arch/mips/kvm/trap_emul.c
-+++ b/arch/mips/kvm/trap_emul.c
-@@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
- return ret;
- }
-
-+static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
-+{
-+ struct kvm_run *run = vcpu->run;
-+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-+ unsigned long cause = vcpu->arch.host_cp0_cause;
-+ enum emulation_result er = EMULATE_DONE;
-+ int ret = RESUME_GUEST;
-+
-+ /* No MSA supported in guest, guest reserved instruction exception */
-+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
-+
-+ switch (er) {
-+ case EMULATE_DONE:
-+ ret = RESUME_GUEST;
-+ break;
-+
-+ case EMULATE_FAIL:
-+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-+ ret = RESUME_HOST;
-+ break;
-+
-+ default:
-+ BUG();
-+ }
-+ return ret;
-+}
-+
- static int kvm_trap_emul_vm_init(struct kvm *kvm)
- {
- return 0;
-@@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
- .handle_syscall = kvm_trap_emul_handle_syscall,
- .handle_res_inst = kvm_trap_emul_handle_res_inst,
- .handle_break = kvm_trap_emul_handle_break,
-+ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
-
- .vm_init = kvm_trap_emul_vm_init,
- .vcpu_init = kvm_trap_emul_vcpu_init,
-diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
-index 21221ed..0f75b6b 100644
---- a/arch/mips/loongson/loongson-3/irq.c
-+++ b/arch/mips/loongson/loongson-3/irq.c
-@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
-
- static struct irqaction cascade_irqaction = {
- .handler = no_action,
-+ .flags = IRQF_NO_SUSPEND,
- .name = "cascade",
- };
-
-diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
-index 8fddd2cd..efe366d 100644
---- a/arch/mips/mti-malta/malta-memory.c
-+++ b/arch/mips/mti-malta/malta-memory.c
-@@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
- pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
- physical_memsize = 0x02000000;
- } else {
-+ if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
-+ pr_warn("Unsupported memsize value (0x%lx) detected! "
-+ "Using 0x10000000 (256M) instead\n",
-+ memsize);
-+ memsize = 256 << 20;
-+ }
- /* If ememsize is set, then set physical_memsize to that */
- physical_memsize = ememsize ? : memsize;
- }
-diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
-index 32a7c82..e7567c8 100644
---- a/arch/mips/power/hibernate.S
-+++ b/arch/mips/power/hibernate.S
-@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
- END(swsusp_arch_suspend)
-
- LEAF(swsusp_arch_resume)
-+ /* Avoid TLB mismatch during and after kernel resume */
-+ jal local_flush_tlb_all
- PTR_L t0, restore_pblist
- 0:
- PTR_L t1, PBE_ADDRESS(t0) /* source */
-@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
- bne t1, t3, 1b
- PTR_L t0, PBE_NEXT(t0)
- bnez t0, 0b
-- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
- PTR_LA t0, saved_regs
- PTR_L ra, PT_R31(t0)
- PTR_L sp, PT_R29(t0)
-diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
-index ae77b7e..c641983 100644
---- a/arch/powerpc/kernel/cacheinfo.c
-+++ b/arch/powerpc/kernel/cacheinfo.c
-@@ -61,12 +61,22 @@ struct cache_type_info {
- };
-
- /* These are used to index the cache_type_info array. */
--#define CACHE_TYPE_UNIFIED 0
--#define CACHE_TYPE_INSTRUCTION 1
--#define CACHE_TYPE_DATA 2
-+#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
-+#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
-+#define CACHE_TYPE_INSTRUCTION 2
-+#define CACHE_TYPE_DATA 3
-
- static const struct cache_type_info cache_type_info[] = {
- {
-+ /* Embedded systems that use cache-size, cache-block-size,
-+ * etc. for the Unified (typically L2) cache. */
-+ .name = "Unified",
-+ .size_prop = "cache-size",
-+ .line_size_props = { "cache-line-size",
-+ "cache-block-size", },
-+ .nr_sets_prop = "cache-sets",
-+ },
-+ {
- /* PowerPC Processor binding says the [di]-cache-*
- * must be equal on unified caches, so just use
- * d-cache properties. */
-@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
- {
- struct cache *iter;
-
-- if (cache->type == CACHE_TYPE_UNIFIED)
-+ if (cache->type == CACHE_TYPE_UNIFIED ||
-+ cache->type == CACHE_TYPE_UNIFIED_D)
- return cache;
-
- list_for_each_entry(iter, &cache_list, list)
-@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
- return of_get_property(np, "cache-unified", NULL);
- }
-
--static struct cache *cache_do_one_devnode_unified(struct device_node *node,
-- int level)
-+/*
-+ * Unified caches can have two different sets of tags. Most embedded
-+ * use cache-size, etc. for the unified cache size, but open firmware systems
-+ * use d-cache-size, etc. Check on initialization for which type we have, and
-+ * return the appropriate structure type. Assume it's embedded if it isn't
-+ * open firmware. If it's yet a 3rd type, then there will be missing entries
-+ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
-+ * to be extended further.
-+ */
-+static int cache_is_unified_d(const struct device_node *np)
- {
-- struct cache *cache;
-+ return of_get_property(np,
-+ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
-+ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
-+}
-
-+/*
-+ */
-+static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
-+{
- pr_debug("creating L%d ucache for %s\n", level, node->full_name);
-
-- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
--
-- return cache;
-+ return new_cache(cache_is_unified_d(node), level, node);
- }
-
- static struct cache *cache_do_one_devnode_split(struct device_node *node,
-diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
-index 7e408bf..cecbe00 100644
---- a/arch/powerpc/mm/hugetlbpage.c
-+++ b/arch/powerpc/mm/hugetlbpage.c
-@@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
- pmd = pmd_offset(pud, start);
- pud_clear(pud);
- pmd_free_tlb(tlb, pmd, start);
-+ mm_dec_nr_pmds(tlb->mm);
- }
-
- static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
-index 2396dda..ead5535 100644
---- a/arch/powerpc/perf/callchain.c
-+++ b/arch/powerpc/perf/callchain.c
-@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
- sp = regs->gpr[1];
- perf_callchain_store(entry, next_ip);
-
-- for (;;) {
-+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
- fp = (unsigned long __user *) sp;
- if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
- return;
-diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
-index 4c11421..3af8324 100644
---- a/arch/powerpc/platforms/cell/interrupt.c
-+++ b/arch/powerpc/platforms/cell/interrupt.c
-@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
-
- void iic_setup_cpu(void)
- {
-- out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
-+ out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
- }
-
- u8 iic_get_target_id(int cpu)
-diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
-index c7c8720..63db1b0 100644
---- a/arch/powerpc/platforms/cell/iommu.c
-+++ b/arch/powerpc/platforms/cell/iommu.c
-@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
-
- io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
-
-- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
-+ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
- io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
-
- mb();
-diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
-index 6c9ff2b..1d9369e 100644
---- a/arch/powerpc/platforms/powernv/pci-ioda.c
-+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
-@@ -1777,7 +1777,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
- region.start += phb->ioda.io_segsize;
- index++;
- }
-- } else if (res->flags & IORESOURCE_MEM) {
-+ } else if ((res->flags & IORESOURCE_MEM) &&
-+ !pnv_pci_is_mem_pref_64(res->flags)) {
- region.start = res->start -
- hose->mem_offset[0] -
- phb->ioda.m32_pci_base;
-diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
-index 1c4c5ac..d3236c9 100644
---- a/arch/s390/kernel/suspend.c
-+++ b/arch/s390/kernel/suspend.c
-@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
- {
- unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
- unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
-+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
-+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
-
- /* Always save lowcore pages (LC protection might be enabled). */
- if (pfn <= LC_PAGES)
-@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
- if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
- return 1;
- /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
-+ if (pfn >= stext_pfn && pfn <= eshared_pfn)
-+ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
- if (tprot(PFN_PHYS(pfn)))
- return 1;
- return 0;
-diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
-index 073b5f3..e7bc2fd 100644
---- a/arch/s390/kvm/interrupt.c
-+++ b/arch/s390/kvm/interrupt.c
-@@ -17,6 +17,7 @@
- #include <linux/signal.h>
- #include <linux/slab.h>
- #include <linux/bitmap.h>
-+#include <linux/vmalloc.h>
- #include <asm/asm-offsets.h>
- #include <asm/uaccess.h>
- #include <asm/sclp.h>
-@@ -1332,10 +1333,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
- return rc;
- }
-
--void kvm_s390_reinject_io_int(struct kvm *kvm,
-+int kvm_s390_reinject_io_int(struct kvm *kvm,
- struct kvm_s390_interrupt_info *inti)
- {
-- __inject_vm(kvm, inti);
-+ return __inject_vm(kvm, inti);
- }
-
- int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
-@@ -1455,61 +1456,66 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
- spin_unlock(&fi->lock);
- }
-
--static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
-- u8 *addr)
-+static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
-+ struct kvm_s390_irq *irq)
- {
-- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
-- struct kvm_s390_irq irq = {0};
--
-- irq.type = inti->type;
-+ irq->type = inti->type;
- switch (inti->type) {
- case KVM_S390_INT_PFAULT_INIT:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- case KVM_S390_INT_SERVICE:
-- irq.u.ext = inti->ext;
-+ irq->u.ext = inti->ext;
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
-- irq.u.io = inti->io;
-+ irq->u.io = inti->io;
- break;
- case KVM_S390_MCHK:
-- irq.u.mchk = inti->mchk;
-+ irq->u.mchk = inti->mchk;
- break;
-- default:
-- return -EINVAL;
- }
--
-- if (copy_to_user(uptr, &irq, sizeof(irq)))
-- return -EFAULT;
--
-- return 0;
- }
-
--static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
-+static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
- {
- struct kvm_s390_interrupt_info *inti;
- struct kvm_s390_float_interrupt *fi;
-+ struct kvm_s390_irq *buf;
-+ int max_irqs;
- int ret = 0;
- int n = 0;
-
-+ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
-+ return -EINVAL;
-+
-+ /*
-+ * We are already using -ENOMEM to signal
-+ * userspace it may retry with a bigger buffer,
-+ * so we need to use something else for this case
-+ */
-+ buf = vzalloc(len);
-+ if (!buf)
-+ return -ENOBUFS;
-+
-+ max_irqs = len / sizeof(struct kvm_s390_irq);
-+
- fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
--
- list_for_each_entry(inti, &fi->list, list) {
-- if (len < sizeof(struct kvm_s390_irq)) {
-+ if (n == max_irqs) {
- /* signal userspace to try again */
- ret = -ENOMEM;
- break;
- }
-- ret = copy_irq_to_user(inti, buf);
-- if (ret)
-- break;
-- buf += sizeof(struct kvm_s390_irq);
-- len -= sizeof(struct kvm_s390_irq);
-+ inti_to_irq(inti, &buf[n]);
- n++;
- }
--
- spin_unlock(&fi->lock);
-+ if (!ret && n > 0) {
-+ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
-+ ret = -EFAULT;
-+ }
-+ vfree(buf);
-
- return ret < 0 ? ret : n;
- }
-@@ -1520,7 +1526,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
-
- switch (attr->group) {
- case KVM_DEV_FLIC_GET_ALL_IRQS:
-- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
-+ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
- attr->attr);
- break;
- default:
-diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
-index c34109a..6995a30 100644
---- a/arch/s390/kvm/kvm-s390.h
-+++ b/arch/s390/kvm/kvm-s390.h
-@@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
- int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
- struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid);
--void kvm_s390_reinject_io_int(struct kvm *kvm,
-- struct kvm_s390_interrupt_info *inti);
-+int kvm_s390_reinject_io_int(struct kvm *kvm,
-+ struct kvm_s390_interrupt_info *inti);
- int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
-
- /* implemented in intercept.c */
-diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
-index 3511169..b982fbc 100644
---- a/arch/s390/kvm/priv.c
-+++ b/arch/s390/kvm/priv.c
-@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
- struct kvm_s390_interrupt_info *inti;
- unsigned long len;
- u32 tpi_data[3];
-- int cc, rc;
-+ int rc;
- u64 addr;
-
-- rc = 0;
- addr = kvm_s390_get_base_disp_s(vcpu);
- if (addr & 3)
- return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
-- cc = 0;
-+
- inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
-- if (!inti)
-- goto no_interrupt;
-- cc = 1;
-+ if (!inti) {
-+ kvm_s390_set_psw_cc(vcpu, 0);
-+ return 0;
-+ }
-+
- tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
- tpi_data[1] = inti->io.io_int_parm;
- tpi_data[2] = inti->io.io_int_word;
-@@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
- */
- len = sizeof(tpi_data) - 4;
- rc = write_guest(vcpu, addr, &tpi_data, len);
-- if (rc)
-- return kvm_s390_inject_prog_cond(vcpu, rc);
-+ if (rc) {
-+ rc = kvm_s390_inject_prog_cond(vcpu, rc);
-+ goto reinject_interrupt;
-+ }
- } else {
- /*
- * Store the three-word I/O interruption code into
- * the appropriate lowcore area.
- */
- len = sizeof(tpi_data);
-- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
-+ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
-+ /* failed writes to the low core are not recoverable */
- rc = -EFAULT;
-+ goto reinject_interrupt;
-+ }
- }
-+
-+ /* irq was successfully handed to the guest */
-+ kfree(inti);
-+ kvm_s390_set_psw_cc(vcpu, 1);
-+ return 0;
-+reinject_interrupt:
- /*
- * If we encounter a problem storing the interruption code, the
- * instruction is suppressed from the guest's view: reinject the
- * interrupt.
- */
-- if (!rc)
-+ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
- kfree(inti);
-- else
-- kvm_s390_reinject_io_int(vcpu->kvm, inti);
--no_interrupt:
-- /* Set condition code and we're done. */
-- if (!rc)
-- kvm_s390_set_psw_cc(vcpu, cc);
-+ rc = -EFAULT;
-+ }
-+ /* don't set the cc, a pgm irq was injected or we drop to user space */
- return rc ? -EFAULT : 0;
- }
-
-@@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
- for (n = mem->count - 1; n > 0 ; n--)
- memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
-
-+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
- mem->vm[0].cpus_total = cpus;
- mem->vm[0].cpus_configured = cpus;
- mem->vm[0].cpus_standby = 0;
-diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
-index 47f29b1..e7814b7 100644
---- a/arch/x86/include/asm/insn.h
-+++ b/arch/x86/include/asm/insn.h
-@@ -69,7 +69,7 @@ struct insn {
- const insn_byte_t *next_byte;
- };
-
--#define MAX_INSN_SIZE 16
-+#define MAX_INSN_SIZE 15
-
- #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
- #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
-diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
-index a1410db..653dfa7 100644
---- a/arch/x86/include/asm/mwait.h
-+++ b/arch/x86/include/asm/mwait.h
-@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
- :: "a" (eax), "c" (ecx));
- }
-
-+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
-+{
-+ trace_hardirqs_on();
-+ /* "mwait %eax, %ecx;" */
-+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
-+ :: "a" (eax), "c" (ecx));
-+}
-+
- /*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
-diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
-index d6b078e..25b1cc0 100644
---- a/arch/x86/include/asm/pvclock.h
-+++ b/arch/x86/include/asm/pvclock.h
-@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
-
- struct pvclock_vsyscall_time_info {
- struct pvclock_vcpu_time_info pvti;
-+ u32 migrate_count;
- } __attribute__((__aligned__(SMP_CACHE_BYTES)));
-
- #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
-diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
-index 0739833..666bcf1 100644
---- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
-+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
-@@ -557,6 +557,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
- INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
- INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
- INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
-+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
-+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
- EVENT_CONSTRAINT_END
- };
-
-@@ -564,6 +566,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
- INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
- INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
- INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
-+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
-+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
- EVENT_CONSTRAINT_END
- };
-
-@@ -587,6 +591,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
- INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
- INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
- INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
-+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
-+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
- EVENT_CONSTRAINT_END
- };
-
-@@ -602,6 +608,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
- INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
- INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
- INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
-+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
-+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
- EVENT_CONSTRAINT_END
- };
-
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index 046e2d6..a388bb8 100644
---- a/arch/x86/kernel/process.c
-+++ b/arch/x86/kernel/process.c
-@@ -24,6 +24,7 @@
- #include <asm/syscalls.h>
- #include <asm/idle.h>
- #include <asm/uaccess.h>
-+#include <asm/mwait.h>
- #include <asm/i387.h>
- #include <asm/fpu-internal.h>
- #include <asm/debugreg.h>
-@@ -399,6 +400,53 @@ static void amd_e400_idle(void)
- default_idle();
- }
-
-+/*
-+ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
-+ * We can't rely on cpuidle installing MWAIT, because it will not load
-+ * on systems that support only C1 -- so the boot default must be MWAIT.
-+ *
-+ * Some AMD machines are the opposite, they depend on using HALT.
-+ *
-+ * So for default C1, which is used during boot until cpuidle loads,
-+ * use MWAIT-C1 on Intel HW that has it, else use HALT.
-+ */
-+static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
-+{
-+ if (c->x86_vendor != X86_VENDOR_INTEL)
-+ return 0;
-+
-+ if (!cpu_has(c, X86_FEATURE_MWAIT))
-+ return 0;
-+
-+ return 1;
-+}
-+
-+/*
-+ * MONITOR/MWAIT with no hints, used for default default C1 state.
-+ * This invokes MWAIT with interrutps enabled and no flags,
-+ * which is backwards compatible with the original MWAIT implementation.
-+ */
-+
-+static void mwait_idle(void)
-+{
-+ if (!current_set_polling_and_test()) {
-+ if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
-+ smp_mb(); /* quirk */
-+ clflush((void *)&current_thread_info()->flags);
-+ smp_mb(); /* quirk */
-+ }
-+
-+ __monitor((void *)&current_thread_info()->flags, 0, 0);
-+ if (!need_resched())
-+ __sti_mwait(0, 0);
-+ else
-+ local_irq_enable();
-+ } else {
-+ local_irq_enable();
-+ }
-+ __current_clr_polling();
-+}
-+
- void select_idle_routine(const struct cpuinfo_x86 *c)
- {
- #ifdef CONFIG_SMP
-@@ -412,6 +460,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
- /* E400: APIC timer interrupt does not wake up CPU from C1e */
- pr_info("using AMD E400 aware idle routine\n");
- x86_idle = amd_e400_idle;
-+ } else if (prefer_mwait_c1_over_halt(c)) {
-+ pr_info("using mwait in idle threads\n");
-+ x86_idle = mwait_idle;
- } else
- x86_idle = default_idle;
- }
-diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
-index 2f355d2..e5ecd20 100644
---- a/arch/x86/kernel/pvclock.c
-+++ b/arch/x86/kernel/pvclock.c
-@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
- set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
- }
-
-+static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
-+
-+static struct pvclock_vsyscall_time_info *
-+pvclock_get_vsyscall_user_time_info(int cpu)
-+{
-+ if (!pvclock_vdso_info) {
-+ BUG();
-+ return NULL;
-+ }
-+
-+ return &pvclock_vdso_info[cpu];
-+}
-+
-+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
-+{
-+ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
-+}
-+
- #ifdef CONFIG_X86_64
-+static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
-+ void *v)
-+{
-+ struct task_migration_notifier *mn = v;
-+ struct pvclock_vsyscall_time_info *pvti;
-+
-+ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
-+
-+ /* this is NULL when pvclock vsyscall is not initialized */
-+ if (unlikely(pvti == NULL))
-+ return NOTIFY_DONE;
-+
-+ pvti->migrate_count++;
-+
-+ return NOTIFY_DONE;
-+}
-+
-+static struct notifier_block pvclock_migrate = {
-+ .notifier_call = pvclock_task_migrate,
-+};
-+
- /*
- * Initialize the generic pvclock vsyscall state. This will allocate
- * a/some page(s) for the per-vcpu pvclock information, set up a
-@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
-
- WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
-
-+ pvclock_vdso_info = i;
-+
- for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
- __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
- __pa(i) + (idx*PAGE_SIZE),
- PAGE_KERNEL_VVAR);
- }
-
-+
-+ register_task_migration_notifier(&pvclock_migrate);
-+
- return 0;
- }
- #endif
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index ae4f6d3..a60bd3a 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -3621,8 +3621,16 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
-
- static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
- {
-- unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
-- KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
-+ /*
-+ * Pass through host's Machine Check Enable value to hw_cr4, which
-+ * is in force while we are in guest mode. Do not let guests control
-+ * this bit, even if host CR4.MCE == 0.
-+ */
-+ unsigned long hw_cr4 =
-+ (cr4_read_shadow() & X86_CR4_MCE) |
-+ (cr4 & ~X86_CR4_MCE) |
-+ (to_vmx(vcpu)->rmode.vm86_active ?
-+ KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
-
- if (cr4 & X86_CR4_VMXE) {
- /*
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 32bf19e..e222ba5 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -5775,7 +5775,6 @@ int kvm_arch_init(void *opaque)
- kvm_set_mmio_spte_mask();
-
- kvm_x86_ops = ops;
-- kvm_init_msr_list();
-
- kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
- PT_DIRTY_MASK, PT64_NX_MASK, 0);
-@@ -7209,7 +7208,14 @@ void kvm_arch_hardware_disable(void)
-
- int kvm_arch_hardware_setup(void)
- {
-- return kvm_x86_ops->hardware_setup();
-+ int r;
-+
-+ r = kvm_x86_ops->hardware_setup();
-+ if (r != 0)
-+ return r;
-+
-+ kvm_init_msr_list();
-+ return 0;
- }
-
- void kvm_arch_hardware_unsetup(void)
-diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
-index 1313ae6..85994f5 100644
---- a/arch/x86/lib/insn.c
-+++ b/arch/x86/lib/insn.c
-@@ -52,6 +52,13 @@
- */
- void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
- {
-+ /*
-+ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
-+ * even if the input buffer is long enough to hold them.
-+ */
-+ if (buf_len > MAX_INSN_SIZE)
-+ buf_len = MAX_INSN_SIZE;
-+
- memset(insn, 0, sizeof(*insn));
- insn->kaddr = kaddr;
- insn->end_kaddr = kaddr + buf_len;
-diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
-index 1f33b3d..0a42327 100644
---- a/arch/x86/lib/usercopy_64.c
-+++ b/arch/x86/lib/usercopy_64.c
-@@ -82,7 +82,7 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
- clac();
-
- /* If the destination is a kernel buffer, we always clear the end */
-- if ((unsigned long)to >= TASK_SIZE_MAX)
-+ if (!__addr_ok(to))
- memset(to, 0, len);
- return len;
- }
-diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
-index 9793322..40d2473 100644
---- a/arch/x86/vdso/vclock_gettime.c
-+++ b/arch/x86/vdso/vclock_gettime.c
-@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
- cycle_t ret;
- u64 last;
- u32 version;
-+ u32 migrate_count;
- u8 flags;
- unsigned cpu, cpu1;
-
-
- /*
-- * Note: hypervisor must guarantee that:
-- * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
-- * 2. that per-CPU pvclock time info is updated if the
-- * underlying CPU changes.
-- * 3. that version is increased whenever underlying CPU
-- * changes.
-- *
-+ * When looping to get a consistent (time-info, tsc) pair, we
-+ * also need to deal with the possibility we can switch vcpus,
-+ * so make sure we always re-fetch time-info for the current vcpu.
- */
- do {
- cpu = __getcpu() & VGETCPU_CPU_MASK;
-@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
- * __getcpu() calls (Gleb).
- */
-
-- pvti = get_pvti(cpu);
-+ /* Make sure migrate_count will change if we leave the VCPU. */
-+ do {
-+ pvti = get_pvti(cpu);
-+ migrate_count = pvti->migrate_count;
-+
-+ cpu1 = cpu;
-+ cpu = __getcpu() & VGETCPU_CPU_MASK;
-+ } while (unlikely(cpu != cpu1));
-
- version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
-
- /*
- * Test we're still on the cpu as well as the version.
-- * We could have been migrated just after the first
-- * vgetcpu but before fetching the version, so we
-- * wouldn't notice a version change.
-+ * - We must read TSC of pvti's VCPU.
-+ * - KVM doesn't follow the versioning protocol, so data could
-+ * change before version if we left the VCPU.
- */
-- cpu1 = __getcpu() & VGETCPU_CPU_MASK;
-- } while (unlikely(cpu != cpu1 ||
-- (pvti->pvti.version & 1) ||
-- pvti->pvti.version != version));
-+ smp_rmb();
-+ } while (unlikely((pvti->pvti.version & 1) ||
-+ pvti->pvti.version != version ||
-+ pvti->migrate_count != migrate_count));
-
- if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
- *mode = VCLOCK_NONE;
-diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
-index e31d494..87be10e 100644
---- a/arch/xtensa/Kconfig
-+++ b/arch/xtensa/Kconfig
-@@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE
-
- If unsure, leave the default value here.
-
-+config XTFPGA_LCD
-+ bool "Enable XTFPGA LCD driver"
-+ depends on XTENSA_PLATFORM_XTFPGA
-+ default n
-+ help
-+ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
-+ progress messages there during bootup/shutdown. It may be useful
-+ during board bringup.
-+
-+ If unsure, say N.
-+
-+config XTFPGA_LCD_BASE_ADDR
-+ hex "XTFPGA LCD base address"
-+ depends on XTFPGA_LCD
-+ default "0x0d0c0000"
-+ help
-+ Base address of the LCD controller inside KIO region.
-+ Different boards from XTFPGA family have LCD controller at different
-+ addresses. Please consult prototyping user guide for your board for
-+ the correct address. Wrong address here may lead to hardware lockup.
-+
-+config XTFPGA_LCD_8BIT_ACCESS
-+ bool "Use 8-bit access to XTFPGA LCD"
-+ depends on XTFPGA_LCD
-+ default n
-+ help
-+ LCD may be connected with 4- or 8-bit interface, 8-bit access may
-+ only be used with 8-bit interface. Please consult prototyping user
-+ guide for your board for the correct interface width.
-+
- endmenu
-
- menu "Executable file formats"
-diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
-index db5bb72..62d8465 100644
---- a/arch/xtensa/include/uapi/asm/unistd.h
-+++ b/arch/xtensa/include/uapi/asm/unistd.h
-@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
- __SYSCALL(324, sys_name_to_handle_at, 5)
- #define __NR_open_by_handle_at 325
- __SYSCALL(325, sys_open_by_handle_at, 3)
--#define __NR_sync_file_range 326
-+#define __NR_sync_file_range2 326
- __SYSCALL(326, sys_sync_file_range2, 6)
- #define __NR_perf_event_open 327
- __SYSCALL(327, sys_perf_event_open, 5)
-diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
-index d05f8fe..17b1ef3 100644
---- a/arch/xtensa/platforms/iss/network.c
-+++ b/arch/xtensa/platforms/iss/network.c
-@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
- {
- struct iss_net_private *lp = (struct iss_net_private *)priv;
-
-- spin_lock(&lp->lock);
- iss_net_poll();
-+ spin_lock(&lp->lock);
- mod_timer(&lp->timer, jiffies + lp->timer_val);
- spin_unlock(&lp->lock);
- }
-@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
- struct iss_net_private *lp = netdev_priv(dev);
- int err;
-
-- spin_lock(&lp->lock);
-+ spin_lock_bh(&lp->lock);
-
- err = lp->tp.open(lp);
- if (err < 0)
-@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
- while ((err = iss_net_rx(dev)) > 0)
- ;
-
-- spin_lock(&opened_lock);
-+ spin_unlock_bh(&lp->lock);
-+ spin_lock_bh(&opened_lock);
- list_add(&lp->opened_list, &opened);
-- spin_unlock(&opened_lock);
-+ spin_unlock_bh(&opened_lock);
-+ spin_lock_bh(&lp->lock);
-
- init_timer(&lp->timer);
- lp->timer_val = ISS_NET_TIMER_VALUE;
-@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
- mod_timer(&lp->timer, jiffies + lp->timer_val);
-
- out:
-- spin_unlock(&lp->lock);
-+ spin_unlock_bh(&lp->lock);
- return err;
- }
-
-@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
- {
- struct iss_net_private *lp = netdev_priv(dev);
- netif_stop_queue(dev);
-- spin_lock(&lp->lock);
-+ spin_lock_bh(&lp->lock);
-
- spin_lock(&opened_lock);
- list_del(&opened);
-@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
-
- lp->tp.close(lp);
-
-- spin_unlock(&lp->lock);
-+ spin_unlock_bh(&lp->lock);
- return 0;
- }
-
- static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
- {
- struct iss_net_private *lp = netdev_priv(dev);
-- unsigned long flags;
- int len;
-
- netif_stop_queue(dev);
-- spin_lock_irqsave(&lp->lock, flags);
-+ spin_lock_bh(&lp->lock);
-
- len = lp->tp.write(lp, &skb);
-
-@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
- pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
- }
-
-- spin_unlock_irqrestore(&lp->lock, flags);
-+ spin_unlock_bh(&lp->lock);
-
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
-
- if (!is_valid_ether_addr(hwaddr->sa_data))
- return -EADDRNOTAVAIL;
-- spin_lock(&lp->lock);
-+ spin_lock_bh(&lp->lock);
- memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
-- spin_unlock(&lp->lock);
-+ spin_unlock_bh(&lp->lock);
- return 0;
- }
-
-@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
- *lp = (struct iss_net_private) {
- .device_list = LIST_HEAD_INIT(lp->device_list),
- .opened_list = LIST_HEAD_INIT(lp->opened_list),
-- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
- .dev = dev,
- .index = index,
-- };
-+ };
-
-+ spin_lock_init(&lp->lock);
- /*
- * If this name ends up conflicting with an existing registered
- * netdevice, that is OK, register_netdev{,ice}() will notice this
-diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
-index b9ae206..7839d38 100644
---- a/arch/xtensa/platforms/xtfpga/Makefile
-+++ b/arch/xtensa/platforms/xtfpga/Makefile
-@@ -6,4 +6,5 @@
- #
- # Note 2! The CFLAGS definitions are in the main makefile...
-
--obj-y = setup.o lcd.o
-+obj-y += setup.o
-+obj-$(CONFIG_XTFPGA_LCD) += lcd.o
-diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
-index 6edd20b..4e0af26 100644
---- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
-+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
-@@ -40,9 +40,6 @@
-
- /* UART */
- #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
--/* LCD instruction and data addresses. */
--#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
--#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
-
- /* Misc. */
- #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
-diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
-index 0e43564..4c8541e 100644
---- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
-+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
-@@ -11,10 +11,25 @@
- #ifndef __XTENSA_XTAVNET_LCD_H
- #define __XTENSA_XTAVNET_LCD_H
-
-+#ifdef CONFIG_XTFPGA_LCD
- /* Display string STR at position POS on the LCD. */
- void lcd_disp_at_pos(char *str, unsigned char pos);
-
- /* Shift the contents of the LCD display left or right. */
- void lcd_shiftleft(void);
- void lcd_shiftright(void);
-+#else
-+static inline void lcd_disp_at_pos(char *str, unsigned char pos)
-+{
-+}
-+
-+static inline void lcd_shiftleft(void)
-+{
-+}
-+
-+static inline void lcd_shiftright(void)
-+{
-+}
-+#endif
-+
- #endif
-diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
-index 2872301..4dc0c1b 100644
---- a/arch/xtensa/platforms/xtfpga/lcd.c
-+++ b/arch/xtensa/platforms/xtfpga/lcd.c
-@@ -1,50 +1,63 @@
- /*
-- * Driver for the LCD display on the Tensilica LX60 Board.
-+ * Driver for the LCD display on the Tensilica XTFPGA board family.
-+ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001, 2006 Tensilica Inc.
-+ * Copyright (C) 2015 Cadence Design Systems Inc.
- */
-
--/*
-- *
-- * FIXME: this code is from the examples from the LX60 user guide.
-- *
-- * The lcd_pause function does busy waiting, which is probably not
-- * great. Maybe the code could be changed to use kernel timers, or
-- * change the hardware to not need to wait.
-- */
--
-+#include <linux/delay.h>
- #include <linux/init.h>
- #include <linux/io.h>
-
- #include <platform/hardware.h>
- #include <platform/lcd.h>
--#include <linux/delay.h>
-
--#define LCD_PAUSE_ITERATIONS 4000
-+/* LCD instruction and data addresses. */
-+#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
-+#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
-+
- #define LCD_CLEAR 0x1
- #define LCD_DISPLAY_ON 0xc
-
- /* 8bit and 2 lines display */
- #define LCD_DISPLAY_MODE8BIT 0x38
-+#define LCD_DISPLAY_MODE4BIT 0x28
- #define LCD_DISPLAY_POS 0x80
- #define LCD_SHIFT_LEFT 0x18
- #define LCD_SHIFT_RIGHT 0x1c
-
-+static void lcd_put_byte(u8 *addr, u8 data)
-+{
-+#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
-+ ACCESS_ONCE(*addr) = data;
-+#else
-+ ACCESS_ONCE(*addr) = data & 0xf0;
-+ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
-+#endif
-+}
-+
- static int __init lcd_init(void)
- {
-- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
-+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
- mdelay(5);
-- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
-+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
- udelay(200);
-- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
-+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
-+ udelay(50);
-+#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
-+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
-+ udelay(50);
-+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
- udelay(50);
-- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
-+#endif
-+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
- udelay(50);
-- *LCD_INSTR_ADDR = LCD_CLEAR;
-+ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
- mdelay(10);
- lcd_disp_at_pos("XTENSA LINUX", 0);
- return 0;
-@@ -52,10 +65,10 @@ static int __init lcd_init(void)
-
- void lcd_disp_at_pos(char *str, unsigned char pos)
- {
-- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
-+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
- udelay(100);
- while (*str != 0) {
-- *LCD_DATA_ADDR = *str;
-+ lcd_put_byte(LCD_DATA_ADDR, *str);
- udelay(200);
- str++;
- }
-@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
-
- void lcd_shiftleft(void)
- {
-- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
-+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
- udelay(50);
- }
-
- void lcd_shiftright(void)
- {
-- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
-+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
- udelay(50);
- }
-
-diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
-index 5ed064e..ccf7932 100644
---- a/drivers/acpi/acpica/evgpe.c
-+++ b/drivers/acpi/acpica/evgpe.c
-@@ -92,6 +92,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
- ACPI_SET_BIT(gpe_register_info->enable_for_run,
- (u8)register_bit);
- }
-+ gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
-
- return_ACPI_STATUS(AE_OK);
- }
-@@ -123,7 +124,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
-
- /* Enable the requested GPE */
-
-- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
-+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
- return_ACPI_STATUS(status);
- }
-
-@@ -202,7 +203,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
- if (ACPI_SUCCESS(status)) {
- status =
- acpi_hw_low_set_gpe(gpe_event_info,
-- ACPI_GPE_DISABLE_SAVE);
-+ ACPI_GPE_DISABLE);
- }
-
- if (ACPI_FAILURE(status)) {
-diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
-index 84bc550..af6514e 100644
---- a/drivers/acpi/acpica/hwgpe.c
-+++ b/drivers/acpi/acpica/hwgpe.c
-@@ -89,6 +89,8 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
- * RETURN: Status
- *
- * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
-+ * The enable_mask field of the involved GPE register must be
-+ * updated by the caller if necessary.
- *
- ******************************************************************************/
-
-@@ -119,7 +121,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
- /* Set or clear just the bit that corresponds to this GPE */
-
- register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
-- switch (action & ~ACPI_GPE_SAVE_MASK) {
-+ switch (action) {
- case ACPI_GPE_CONDITIONAL_ENABLE:
-
- /* Only enable if the corresponding enable_mask bit is set */
-@@ -149,9 +151,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
- /* Write the updated enable mask */
-
- status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
-- if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
-- gpe_register_info->enable_mask = (u8)enable_mask;
-- }
- return (status);
- }
-
-@@ -286,10 +285,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
- {
- acpi_status status;
-
-+ gpe_register_info->enable_mask = enable_mask;
- status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
-- if (ACPI_SUCCESS(status)) {
-- gpe_register_info->enable_mask = enable_mask;
-- }
- return (status);
- }
-
-diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
-index 9bad45e..7fbc2b9 100644
---- a/drivers/acpi/acpica/tbinstal.c
-+++ b/drivers/acpi/acpica/tbinstal.c
-@@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
- */
- acpi_tb_uninstall_table(&new_table_desc);
- *table_index = i;
-- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(AE_OK);
- }
- }
-diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
-index bbca783..349f4fd 100644
---- a/drivers/acpi/scan.c
-+++ b/drivers/acpi/scan.c
-@@ -298,7 +298,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
- struct acpi_device_physical_node *pn;
- bool offline = true;
-
-- mutex_lock(&adev->physical_node_lock);
-+ /*
-+ * acpi_container_offline() calls this for all of the container's
-+ * children under the container's physical_node_lock lock.
-+ */
-+ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
-
- list_for_each_entry(pn, &adev->physical_node_list, node)
- if (device_supports_offline(pn->dev) && !pn->dev->offline) {
-diff --git a/drivers/base/bus.c b/drivers/base/bus.c
-index 876bae5..79bc203 100644
---- a/drivers/base/bus.c
-+++ b/drivers/base/bus.c
-@@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
- goto out_put;
- error = device_add_groups(dev, bus->dev_groups);
- if (error)
-- goto out_groups;
-+ goto out_id;
- error = sysfs_create_link(&bus->p->devices_kset->kobj,
- &dev->kobj, dev_name(dev));
- if (error)
-- goto out_id;
-+ goto out_groups;
- error = sysfs_create_link(&dev->kobj,
- &dev->bus->p->subsys.kobj, "subsystem");
- if (error)
-diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
-index 6e64563..9c2ba1c 100644
---- a/drivers/base/cacheinfo.c
-+++ b/drivers/base/cacheinfo.c
-@@ -62,15 +62,21 @@ static int cache_setup_of_node(unsigned int cpu)
- return -ENOENT;
- }
-
-- while (np && index < cache_leaves(cpu)) {
-+ while (index < cache_leaves(cpu)) {
- this_leaf = this_cpu_ci->info_list + index;
- if (this_leaf->level != 1)
- np = of_find_next_cache_node(np);
- else
- np = of_node_get(np);/* cpu node itself */
-+ if (!np)
-+ break;
- this_leaf->of_node = np;
- index++;
- }
-+
-+ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
-+ return -ENOENT;
-+
- return 0;
- }
-
-@@ -189,8 +195,11 @@ static int detect_cache_attributes(unsigned int cpu)
- * will be set up here only if they are not populated already
- */
- ret = cache_shared_cpu_map_setup(cpu);
-- if (ret)
-+ if (ret) {
-+ pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n",
-+ cpu);
- goto free_ci;
-+ }
- return 0;
-
- free_ci:
-diff --git a/drivers/base/platform.c b/drivers/base/platform.c
-index 9421fed..e68ab79 100644
---- a/drivers/base/platform.c
-+++ b/drivers/base/platform.c
-@@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
- }
-
- r = platform_get_resource(dev, IORESOURCE_IRQ, num);
-+ /*
-+ * The resources may pass trigger flags to the irqs that need
-+ * to be set up. It so happens that the trigger flags for
-+ * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
-+ * settings.
-+ */
-+ if (r && r->flags & IORESOURCE_BITS)
-+ irqd_set_trigger_type(irq_get_irq_data(r->start),
-+ r->flags & IORESOURCE_BITS);
-
- return r ? r->start : -ENXIO;
- #endif
-diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
-index de4c849..288547a 100644
---- a/drivers/bluetooth/ath3k.c
-+++ b/drivers/bluetooth/ath3k.c
-@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
- /* Atheros AR3011 with sflash firmware*/
- { USB_DEVICE(0x0489, 0xE027) },
- { USB_DEVICE(0x0489, 0xE03D) },
-+ { USB_DEVICE(0x04F2, 0xAFF1) },
- { USB_DEVICE(0x0930, 0x0215) },
- { USB_DEVICE(0x0CF3, 0x3002) },
- { USB_DEVICE(0x0CF3, 0xE019) },
-diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
-index 8bfc4c2..2c527da 100644
---- a/drivers/bluetooth/btusb.c
-+++ b/drivers/bluetooth/btusb.c
-@@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = {
- /* Atheros 3011 with sflash firmware */
- { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
-+ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
-diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
-index e096e9c..283f00a 100644
---- a/drivers/char/tpm/tpm-chip.c
-+++ b/drivers/char/tpm/tpm-chip.c
-@@ -170,6 +170,41 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
- device_unregister(&chip->dev);
- }
-
-+static int tpm1_chip_register(struct tpm_chip *chip)
-+{
-+ int rc;
-+
-+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
-+ return 0;
-+
-+ rc = tpm_sysfs_add_device(chip);
-+ if (rc)
-+ return rc;
-+
-+ rc = tpm_add_ppi(chip);
-+ if (rc) {
-+ tpm_sysfs_del_device(chip);
-+ return rc;
-+ }
-+
-+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
-+
-+ return 0;
-+}
-+
-+static void tpm1_chip_unregister(struct tpm_chip *chip)
-+{
-+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
-+ return;
-+
-+ if (chip->bios_dir)
-+ tpm_bios_log_teardown(chip->bios_dir);
-+
-+ tpm_remove_ppi(chip);
-+
-+ tpm_sysfs_del_device(chip);
-+}
-+
- /*
- * tpm_chip_register() - create a character device for the TPM chip
- * @chip: TPM chip to use.
-@@ -185,22 +220,13 @@ int tpm_chip_register(struct tpm_chip *chip)
- {
- int rc;
-
-- /* Populate sysfs for TPM1 devices. */
-- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
-- rc = tpm_sysfs_add_device(chip);
-- if (rc)
-- goto del_misc;
--
-- rc = tpm_add_ppi(chip);
-- if (rc)
-- goto del_sysfs;
--
-- chip->bios_dir = tpm_bios_log_setup(chip->devname);
-- }
-+ rc = tpm1_chip_register(chip);
-+ if (rc)
-+ return rc;
-
- rc = tpm_dev_add_device(chip);
- if (rc)
-- return rc;
-+ goto out_err;
-
- /* Make the chip available. */
- spin_lock(&driver_lock);
-@@ -210,10 +236,8 @@ int tpm_chip_register(struct tpm_chip *chip)
- chip->flags |= TPM_CHIP_FLAG_REGISTERED;
-
- return 0;
--del_sysfs:
-- tpm_sysfs_del_device(chip);
--del_misc:
-- tpm_dev_del_device(chip);
-+out_err:
-+ tpm1_chip_unregister(chip);
- return rc;
- }
- EXPORT_SYMBOL_GPL(tpm_chip_register);
-@@ -238,13 +262,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
- spin_unlock(&driver_lock);
- synchronize_rcu();
-
-- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
-- if (chip->bios_dir)
-- tpm_bios_log_teardown(chip->bios_dir);
-- tpm_remove_ppi(chip);
-- tpm_sysfs_del_device(chip);
-- }
--
-+ tpm1_chip_unregister(chip);
- tpm_dev_del_device(chip);
- }
- EXPORT_SYMBOL_GPL(tpm_chip_unregister);
-diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
-index a23ac0c..0b7c3e8 100644
---- a/drivers/clk/at91/clk-usb.c
-+++ b/drivers/clk/at91/clk-usb.c
-@@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
- return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
- }
-
--static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
-- unsigned long *parent_rate)
-+static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
-+ unsigned long rate,
-+ unsigned long min_rate,
-+ unsigned long max_rate,
-+ unsigned long *best_parent_rate,
-+ struct clk_hw **best_parent_hw)
- {
-- unsigned long div;
-+ struct clk *parent = NULL;
-+ long best_rate = -EINVAL;
-+ unsigned long tmp_rate;
-+ int best_diff = -1;
-+ int tmp_diff;
-+ int i;
-
-- if (!rate)
-- return -EINVAL;
-+ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
-+ int div;
-
-- if (rate >= *parent_rate)
-- return *parent_rate;
-+ parent = clk_get_parent_by_index(hw->clk, i);
-+ if (!parent)
-+ continue;
-+
-+ for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
-+ unsigned long tmp_parent_rate;
-+
-+ tmp_parent_rate = rate * div;
-+ tmp_parent_rate = __clk_round_rate(parent,
-+ tmp_parent_rate);
-+ tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
-+ if (tmp_rate < rate)
-+ tmp_diff = rate - tmp_rate;
-+ else
-+ tmp_diff = tmp_rate - rate;
-+
-+ if (best_diff < 0 || best_diff > tmp_diff) {
-+ best_rate = tmp_rate;
-+ best_diff = tmp_diff;
-+ *best_parent_rate = tmp_parent_rate;
-+ *best_parent_hw = __clk_get_hw(parent);
-+ }
-+
-+ if (!best_diff || tmp_rate < rate)
-+ break;
-+ }
-
-- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
-- if (div > SAM9X5_USB_MAX_DIV + 1)
-- div = SAM9X5_USB_MAX_DIV + 1;
-+ if (!best_diff)
-+ break;
-+ }
-
-- return DIV_ROUND_CLOSEST(*parent_rate, div);
-+ return best_rate;
- }
-
- static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
-@@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
-
- static const struct clk_ops at91sam9x5_usb_ops = {
- .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
-- .round_rate = at91sam9x5_clk_usb_round_rate,
-+ .determine_rate = at91sam9x5_clk_usb_determine_rate,
- .get_parent = at91sam9x5_clk_usb_get_parent,
- .set_parent = at91sam9x5_clk_usb_set_parent,
- .set_rate = at91sam9x5_clk_usb_set_rate,
-@@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
- .disable = at91sam9n12_clk_usb_disable,
- .is_enabled = at91sam9n12_clk_usb_is_enabled,
- .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
-- .round_rate = at91sam9x5_clk_usb_round_rate,
-+ .determine_rate = at91sam9x5_clk_usb_determine_rate,
- .set_rate = at91sam9x5_clk_usb_set_rate,
- };
-
-@@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
- init.ops = &at91sam9x5_usb_ops;
- init.parent_names = parent_names;
- init.num_parents = num_parents;
-- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
-+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
-+ CLK_SET_RATE_PARENT;
-
- usb->hw.init = &init;
- usb->pmc = pmc;
-@@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
- init.ops = &at91sam9n12_usb_ops;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-- init.flags = CLK_SET_RATE_GATE;
-+ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
-
- usb->hw.init = &init;
- usb->pmc = pmc;
-diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
-index 0039bd7..466f30c 100644
---- a/drivers/clk/qcom/clk-rcg.c
-+++ b/drivers/clk/qcom/clk-rcg.c
-@@ -495,6 +495,57 @@ static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
- return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
- }
-
-+/*
-+ * This type of clock has a glitch-free mux that switches between the output of
-+ * the M/N counter and an always on clock source (XO). When clk_set_rate() is
-+ * called we need to make sure that we don't switch to the M/N counter if it
-+ * isn't clocking because the mux will get stuck and the clock will stop
-+ * outputting a clock. This can happen if the framework isn't aware that this
-+ * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix
-+ * this we switch the mux in the enable/disable ops and reprogram the M/N
-+ * counter in the set_rate op. We also make sure to switch away from the M/N
-+ * counter in set_rate if software thinks the clock is off.
-+ */
-+static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate,
-+ unsigned long parent_rate)
-+{
-+ struct clk_rcg *rcg = to_clk_rcg(hw);
-+ const struct freq_tbl *f;
-+ int ret;
-+ u32 gfm = BIT(10);
-+
-+ f = qcom_find_freq(rcg->freq_tbl, rate);
-+ if (!f)
-+ return -EINVAL;
-+
-+ /* Switch to XO to avoid glitches */
-+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
-+ ret = __clk_rcg_set_rate(rcg, f);
-+ /* Switch back to M/N if it's clocking */
-+ if (__clk_is_enabled(hw->clk))
-+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
-+
-+ return ret;
-+}
-+
-+static int clk_rcg_lcc_enable(struct clk_hw *hw)
-+{
-+ struct clk_rcg *rcg = to_clk_rcg(hw);
-+ u32 gfm = BIT(10);
-+
-+ /* Use M/N */
-+ return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
-+}
-+
-+static void clk_rcg_lcc_disable(struct clk_hw *hw)
-+{
-+ struct clk_rcg *rcg = to_clk_rcg(hw);
-+ u32 gfm = BIT(10);
-+
-+ /* Use XO */
-+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
-+}
-+
- static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
- {
- struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
-@@ -543,6 +594,17 @@ const struct clk_ops clk_rcg_bypass_ops = {
- };
- EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
-
-+const struct clk_ops clk_rcg_lcc_ops = {
-+ .enable = clk_rcg_lcc_enable,
-+ .disable = clk_rcg_lcc_disable,
-+ .get_parent = clk_rcg_get_parent,
-+ .set_parent = clk_rcg_set_parent,
-+ .recalc_rate = clk_rcg_recalc_rate,
-+ .determine_rate = clk_rcg_determine_rate,
-+ .set_rate = clk_rcg_lcc_set_rate,
-+};
-+EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops);
-+
- const struct clk_ops clk_dyn_rcg_ops = {
- .enable = clk_enable_regmap,
- .is_enabled = clk_is_enabled_regmap,
-diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
-index 687e41f..d09d06b 100644
---- a/drivers/clk/qcom/clk-rcg.h
-+++ b/drivers/clk/qcom/clk-rcg.h
-@@ -96,6 +96,7 @@ struct clk_rcg {
-
- extern const struct clk_ops clk_rcg_ops;
- extern const struct clk_ops clk_rcg_bypass_ops;
-+extern const struct clk_ops clk_rcg_lcc_ops;
-
- #define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
-
-diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
-index 742acfa..381f274 100644
---- a/drivers/clk/qcom/clk-rcg2.c
-+++ b/drivers/clk/qcom/clk-rcg2.c
-@@ -243,7 +243,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
- mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
- cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
- cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
-- if (rcg->mnd_width && f->n)
-+ if (rcg->mnd_width && f->n && (f->m != f->n))
- cfg |= CFG_MODE_DUAL_EDGE;
- ret = regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + CFG_REG, mask, cfg);
-diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
-index cbdc31d..a015bb0 100644
---- a/drivers/clk/qcom/gcc-ipq806x.c
-+++ b/drivers/clk/qcom/gcc-ipq806x.c
-@@ -525,8 +525,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = {
- { 10800000, P_PXO, 1, 2, 5 },
- { 15060000, P_PLL8, 1, 2, 51 },
- { 24000000, P_PLL8, 4, 1, 4 },
-+ { 25000000, P_PXO, 1, 0, 0 },
- { 25600000, P_PLL8, 1, 1, 15 },
-- { 27000000, P_PXO, 1, 0, 0 },
- { 48000000, P_PLL8, 4, 1, 2 },
- { 51200000, P_PLL8, 1, 2, 15 },
- { }
-diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
-index c9ff27b..a6d3a67 100644
---- a/drivers/clk/qcom/lcc-ipq806x.c
-+++ b/drivers/clk/qcom/lcc-ipq806x.c
-@@ -294,14 +294,14 @@ static struct clk_regmap_mux pcm_clk = {
- };
-
- static struct freq_tbl clk_tbl_aif_osr[] = {
-- { 22050, P_PLL4, 1, 147, 20480 },
-- { 32000, P_PLL4, 1, 1, 96 },
-- { 44100, P_PLL4, 1, 147, 10240 },
-- { 48000, P_PLL4, 1, 1, 64 },
-- { 88200, P_PLL4, 1, 147, 5120 },
-- { 96000, P_PLL4, 1, 1, 32 },
-- { 176400, P_PLL4, 1, 147, 2560 },
-- { 192000, P_PLL4, 1, 1, 16 },
-+ { 2822400, P_PLL4, 1, 147, 20480 },
-+ { 4096000, P_PLL4, 1, 1, 96 },
-+ { 5644800, P_PLL4, 1, 147, 10240 },
-+ { 6144000, P_PLL4, 1, 1, 64 },
-+ { 11289600, P_PLL4, 1, 147, 5120 },
-+ { 12288000, P_PLL4, 1, 1, 32 },
-+ { 22579200, P_PLL4, 1, 147, 2560 },
-+ { 24576000, P_PLL4, 1, 1, 16 },
- { },
- };
-
-@@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = {
- };
-
- static struct freq_tbl clk_tbl_ahbix[] = {
-- { 131072, P_PLL4, 1, 1, 3 },
-+ { 131072000, P_PLL4, 1, 1, 3 },
- { },
- };
-
-@@ -386,13 +386,12 @@ static struct clk_rcg ahbix_clk = {
- .freq_tbl = clk_tbl_ahbix,
- .clkr = {
- .enable_reg = 0x38,
-- .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
-+ .enable_mask = BIT(11),
- .hw.init = &(struct clk_init_data){
- .name = "ahbix",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
-- .ops = &clk_rcg_ops,
-- .flags = CLK_SET_RATE_GATE,
-+ .ops = &clk_rcg_lcc_ops,
- },
- },
- };
-diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
-index 51462e8..714d6ba 100644
---- a/drivers/clk/samsung/clk-exynos4.c
-+++ b/drivers/clk/samsung/clk-exynos4.c
-@@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
- VPLL_LOCK, VPLL_CON0, NULL),
- };
-
--static void __init exynos4_core_down_clock(enum exynos4_soc soc)
-+static void __init exynos4x12_core_down_clock(void)
- {
- unsigned int tmp;
-
-@@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc)
- __raw_writel(tmp, reg_base + PWR_CTRL1);
-
- /*
-- * Disable the clock up feature on Exynos4x12, in case it was
-- * enabled by bootloader.
-+ * Disable the clock up feature in case it was enabled by bootloader.
- */
-- if (exynos4_soc == EXYNOS4X12)
-- __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
-+ __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
- }
-
- /* register exynos4 clocks */
-@@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np,
- samsung_clk_register_alias(ctx, exynos4_aliases,
- ARRAY_SIZE(exynos4_aliases));
-
-- exynos4_core_down_clock(soc);
-+ if (soc == EXYNOS4X12)
-+ exynos4x12_core_down_clock();
- exynos4_clk_sleep_init();
-
- samsung_clk_of_add_provider(np, ctx);
-diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
-index 9a893f2..23ce0af 100644
---- a/drivers/clk/tegra/clk-tegra124.c
-+++ b/drivers/clk/tegra/clk-tegra124.c
-@@ -1110,16 +1110,18 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
- 1, 2);
- clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
-
-- clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
-+ clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
- clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
-- clks[TEGRA124_CLK_PLLD_DSI] = clk;
-+ clks[TEGRA124_CLK_PLL_D_DSI_OUT] = clk;
-
-- clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
-- 0, 48, periph_clk_enb_refcnt);
-+ clk = tegra_clk_register_periph_gate("dsia", "pll_d_dsi_out", 0,
-+ clk_base, 0, 48,
-+ periph_clk_enb_refcnt);
- clks[TEGRA124_CLK_DSIA] = clk;
-
-- clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
-- 0, 82, periph_clk_enb_refcnt);
-+ clk = tegra_clk_register_periph_gate("dsib", "pll_d_dsi_out", 0,
-+ clk_base, 0, 82,
-+ periph_clk_enb_refcnt);
- clks[TEGRA124_CLK_DSIB] = clk;
-
- /* emc mux */
-diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
-index 9ddb754..7a1df61 100644
---- a/drivers/clk/tegra/clk.c
-+++ b/drivers/clk/tegra/clk.c
-@@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np)
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-
- rst_ctlr.of_node = np;
-- rst_ctlr.nr_resets = clk_num * 32;
-+ rst_ctlr.nr_resets = periph_banks * 32;
- reset_controller_register(&rst_ctlr);
- }
-
-diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
-index 42f95a4..9a28b7e 100644
---- a/drivers/crypto/omap-aes.c
-+++ b/drivers/crypto/omap-aes.c
-@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
- return err;
- }
-
--static int omap_aes_check_aligned(struct scatterlist *sg)
-+static int omap_aes_check_aligned(struct scatterlist *sg, int total)
- {
-+ int len = 0;
-+
- while (sg) {
- if (!IS_ALIGNED(sg->offset, 4))
- return -1;
- if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
- return -1;
-+
-+ len += sg->length;
- sg = sg_next(sg);
- }
-+
-+ if (len != total)
-+ return -1;
-+
- return 0;
- }
-
-@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
- dd->in_sg = req->src;
- dd->out_sg = req->dst;
-
-- if (omap_aes_check_aligned(dd->in_sg) ||
-- omap_aes_check_aligned(dd->out_sg)) {
-+ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
-+ omap_aes_check_aligned(dd->out_sg, dd->total)) {
- if (omap_aes_copy_sgs(dd))
- pr_err("Failed to copy SGs for unaligned cases\n");
- dd->sgs_copied = 1;
-diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
-index d0bc123..1a54205 100644
---- a/drivers/gpio/gpio-mvebu.c
-+++ b/drivers/gpio/gpio-mvebu.c
-@@ -320,11 +320,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
- {
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct mvebu_gpio_chip *mvchip = gc->private;
-+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
-
- irq_gc_lock(gc);
-- gc->mask_cache &= ~mask;
-- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
-+ ct->mask_cache_priv &= ~mask;
-+
-+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
- irq_gc_unlock(gc);
- }
-
-@@ -332,11 +334,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
- {
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct mvebu_gpio_chip *mvchip = gc->private;
-+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
-+
- u32 mask = 1 << (d->irq - gc->irq_base);
-
- irq_gc_lock(gc);
-- gc->mask_cache |= mask;
-- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
-+ ct->mask_cache_priv |= mask;
-+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
- irq_gc_unlock(gc);
- }
-
-@@ -344,11 +348,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
- {
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct mvebu_gpio_chip *mvchip = gc->private;
-+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
-+
- u32 mask = 1 << (d->irq - gc->irq_base);
-
- irq_gc_lock(gc);
-- gc->mask_cache &= ~mask;
-- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
-+ ct->mask_cache_priv &= ~mask;
-+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
- irq_gc_unlock(gc);
- }
-
-@@ -356,11 +362,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
- {
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct mvebu_gpio_chip *mvchip = gc->private;
-+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
-+
- u32 mask = 1 << (d->irq - gc->irq_base);
-
- irq_gc_lock(gc);
-- gc->mask_cache |= mask;
-- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
-+ ct->mask_cache_priv |= mask;
-+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
- irq_gc_unlock(gc);
- }
-
-diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
-index bf17a60..1dbfba5 100644
---- a/drivers/gpu/drm/exynos/exynos_dp_core.c
-+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
-@@ -32,10 +32,16 @@
- #include <drm/bridge/ptn3460.h>
-
- #include "exynos_dp_core.h"
-+#include "exynos_drm_fimd.h"
-
- #define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
- connector)
-
-+static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
-+{
-+ return to_exynos_crtc(dp->encoder->crtc);
-+}
-+
- static inline struct exynos_dp_device *
- display_to_dp(struct exynos_drm_display *d)
- {
-@@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
- }
- }
-
-+ fimd_dp_clock_enable(dp_to_crtc(dp), true);
-+
- clk_prepare_enable(dp->clock);
- exynos_dp_phy_init(dp);
- exynos_dp_init_dp(dp);
-@@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
- exynos_dp_phy_exit(dp);
- clk_disable_unprepare(dp->clock);
-
-+ fimd_dp_clock_enable(dp_to_crtc(dp), false);
-+
- if (dp->panel) {
- if (drm_panel_unprepare(dp->panel))
- DRM_ERROR("failed to turnoff the panel\n");
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
-index 33a10ce..5d58f6c 100644
---- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
-+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
-@@ -32,6 +32,7 @@
- #include "exynos_drm_fbdev.h"
- #include "exynos_drm_crtc.h"
- #include "exynos_drm_iommu.h"
-+#include "exynos_drm_fimd.h"
-
- /*
- * FIMD stands for Fully Interactive Mobile Display and
-@@ -1233,6 +1234,24 @@ static int fimd_remove(struct platform_device *pdev)
- return 0;
- }
-
-+void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
-+{
-+ struct fimd_context *ctx = crtc->ctx;
-+ u32 val;
-+
-+ /*
-+ * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
-+ * clock. On these SoCs the bootloader may enable it but any
-+ * power domain off/on will reset it to disable state.
-+ */
-+ if (ctx->driver_data != &exynos5_fimd_driver_data)
-+ return;
-+
-+ val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
-+ writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
-+}
-+EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
-+
- struct platform_driver fimd_driver = {
- .probe = fimd_probe,
- .remove = fimd_remove,
-diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
-new file mode 100644
-index 0000000..b4fcaa5
---- /dev/null
-+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
-@@ -0,0 +1,15 @@
-+/*
-+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the
-+ * Free Software Foundation; either version 2 of the License, or (at your
-+ * option) any later version.
-+ */
-+
-+#ifndef _EXYNOS_DRM_FIMD_H_
-+#define _EXYNOS_DRM_FIMD_H_
-+
-+extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
-+
-+#endif /* _EXYNOS_DRM_FIMD_H_ */
-diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
-index fa140e0..60ab1f7 100644
---- a/drivers/gpu/drm/i2c/adv7511.c
-+++ b/drivers/gpu/drm/i2c/adv7511.c
-@@ -33,6 +33,7 @@ struct adv7511 {
-
- unsigned int current_edid_segment;
- uint8_t edid_buf[256];
-+ bool edid_read;
-
- wait_queue_head_t wq;
- struct drm_encoder *encoder;
-@@ -379,69 +380,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
- return false;
- }
-
--static irqreturn_t adv7511_irq_handler(int irq, void *devid)
--{
-- struct adv7511 *adv7511 = devid;
--
-- if (adv7511_hpd(adv7511))
-- drm_helper_hpd_irq_event(adv7511->encoder->dev);
--
-- wake_up_all(&adv7511->wq);
--
-- return IRQ_HANDLED;
--}
--
--static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
-- unsigned int irq)
-+static int adv7511_irq_process(struct adv7511 *adv7511)
- {
- unsigned int irq0, irq1;
-- unsigned int pending;
- int ret;
-
- ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
- if (ret < 0)
-- return 0;
-+ return ret;
-+
- ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
- if (ret < 0)
-- return 0;
-+ return ret;
-+
-+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
-+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
-+
-+ if (irq0 & ADV7511_INT0_HDP)
-+ drm_helper_hpd_irq_event(adv7511->encoder->dev);
-+
-+ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
-+ adv7511->edid_read = true;
-+
-+ if (adv7511->i2c_main->irq)
-+ wake_up_all(&adv7511->wq);
-+ }
-+
-+ return 0;
-+}
-
-- pending = (irq1 << 8) | irq0;
-+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
-+{
-+ struct adv7511 *adv7511 = devid;
-+ int ret;
-
-- return pending & irq;
-+ ret = adv7511_irq_process(adv7511);
-+ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
- }
-
--static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
-- int timeout)
-+/* -----------------------------------------------------------------------------
-+ * EDID retrieval
-+ */
-+
-+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
- {
-- unsigned int pending;
- int ret;
-
- if (adv7511->i2c_main->irq) {
- ret = wait_event_interruptible_timeout(adv7511->wq,
-- adv7511_is_interrupt_pending(adv7511, irq),
-- msecs_to_jiffies(timeout));
-- if (ret <= 0)
-- return 0;
-- pending = adv7511_is_interrupt_pending(adv7511, irq);
-+ adv7511->edid_read, msecs_to_jiffies(timeout));
- } else {
-- if (timeout < 25)
-- timeout = 25;
-- do {
-- pending = adv7511_is_interrupt_pending(adv7511, irq);
-- if (pending)
-+ for (; timeout > 0; timeout -= 25) {
-+ ret = adv7511_irq_process(adv7511);
-+ if (ret < 0)
- break;
-+
-+ if (adv7511->edid_read)
-+ break;
-+
- msleep(25);
-- timeout -= 25;
-- } while (timeout >= 25);
-+ }
- }
-
-- return pending;
-+ return adv7511->edid_read ? 0 : -EIO;
- }
-
--/* -----------------------------------------------------------------------------
-- * EDID retrieval
-- */
--
- static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
- size_t len)
- {
-@@ -463,19 +466,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
- return ret;
-
- if (status != 2) {
-+ adv7511->edid_read = false;
- regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
- block);
-- ret = adv7511_wait_for_interrupt(adv7511,
-- ADV7511_INT0_EDID_READY |
-- ADV7511_INT1_DDC_ERROR, 200);
--
-- if (!(ret & ADV7511_INT0_EDID_READY))
-- return -EIO;
-+ ret = adv7511_wait_for_edid(adv7511, 200);
-+ if (ret < 0)
-+ return ret;
- }
-
-- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
--
- /* Break this apart, hopefully more I2C controllers will
- * support 64 byte transfers than 256 byte transfers
- */
-@@ -528,7 +526,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
- /* Reading the EDID only works if the device is powered */
- if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
-+ ADV7511_INT0_EDID_READY);
-+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
-+ ADV7511_INT1_DDC_ERROR);
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- adv7511->current_edid_segment = -1;
-@@ -563,7 +563,9 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
- adv7511->current_edid_segment = -1;
-
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
-+ ADV7511_INT0_EDID_READY);
-+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
-+ ADV7511_INT1_DDC_ERROR);
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- /*
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index 5c66b56..ec4d932 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -1042,7 +1042,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
- s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
-
- s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
-- s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
-+ s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
-
- s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
- s->ecochk = I915_READ(GAM_ECOCHK);
-@@ -1124,7 +1124,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
- I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
-
- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
-- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
-+ I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
-
- I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
- I915_WRITE(GAM_ECOCHK, s->ecochk);
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index ede5bbb..07320cb 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -3718,14 +3718,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
- ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
-- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
-- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
-+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
- I915_WRITE16(IMR, dev_priv->irq_mask);
-
- I915_WRITE16(IER,
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
-- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT);
- POSTING_READ16(IER);
-
-@@ -3887,14 +3885,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
-- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
-- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
-+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
-
- enable_mask =
- I915_ASLE_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
-- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT;
-
- if (I915_HAS_HOTPLUG(dev)) {
-diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
-index 33b3d0a2..f536ff2 100644
---- a/drivers/gpu/drm/i915/i915_reg.h
-+++ b/drivers/gpu/drm/i915/i915_reg.h
-@@ -1740,6 +1740,7 @@ enum punit_power_well {
- #define GMBUS_CYCLE_INDEX (2<<25)
- #define GMBUS_CYCLE_STOP (4<<25)
- #define GMBUS_BYTE_COUNT_SHIFT 16
-+#define GMBUS_BYTE_COUNT_MAX 256U
- #define GMBUS_SLAVE_INDEX_SHIFT 8
- #define GMBUS_SLAVE_ADDR_SHIFT 1
- #define GMBUS_SLAVE_READ (1<<0)
-diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
-index b31088a..56e437e 100644
---- a/drivers/gpu/drm/i915/intel_i2c.c
-+++ b/drivers/gpu/drm/i915/intel_i2c.c
-@@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
- }
-
- static int
--gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
-- u32 gmbus1_index)
-+gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
-+ unsigned short addr, u8 *buf, unsigned int len,
-+ u32 gmbus1_index)
- {
- int reg_offset = dev_priv->gpio_mmio_base;
-- u16 len = msg->len;
-- u8 *buf = msg->buf;
-
- I915_WRITE(GMBUS1 + reg_offset,
- gmbus1_index |
- GMBUS_CYCLE_WAIT |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
-- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
-+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
- while (len) {
- int ret;
-@@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
- }
-
- static int
--gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
-+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
-+ u32 gmbus1_index)
- {
-- int reg_offset = dev_priv->gpio_mmio_base;
-- u16 len = msg->len;
- u8 *buf = msg->buf;
-+ unsigned int rx_size = msg->len;
-+ unsigned int len;
-+ int ret;
-+
-+ do {
-+ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
-+
-+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
-+ buf, len, gmbus1_index);
-+ if (ret)
-+ return ret;
-+
-+ rx_size -= len;
-+ buf += len;
-+ } while (rx_size != 0);
-+
-+ return 0;
-+}
-+
-+static int
-+gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
-+ unsigned short addr, u8 *buf, unsigned int len)
-+{
-+ int reg_offset = dev_priv->gpio_mmio_base;
-+ unsigned int chunk_size = len;
- u32 val, loop;
-
- val = loop = 0;
-@@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
- I915_WRITE(GMBUS3 + reg_offset, val);
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
-- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
-- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
-+ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
-+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
- while (len) {
- int ret;
-@@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
- if (ret)
- return ret;
- }
-+
-+ return 0;
-+}
-+
-+static int
-+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
-+{
-+ u8 *buf = msg->buf;
-+ unsigned int tx_size = msg->len;
-+ unsigned int len;
-+ int ret;
-+
-+ do {
-+ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
-+
-+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
-+ if (ret)
-+ return ret;
-+
-+ buf += len;
-+ tx_size -= len;
-+ } while (tx_size != 0);
-+
- return 0;
- }
-
-diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
-index 86807ee..9bd5611 100644
---- a/drivers/gpu/drm/radeon/atombios_crtc.c
-+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
-@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
- misc |= ATOM_COMPOSITESYNC;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- misc |= ATOM_INTERLACE;
-- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- misc |= ATOM_DOUBLE_CLOCK_MODE;
-+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
-
- args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
- args.ucCRTC = radeon_crtc->crtc_id;
-@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
- misc |= ATOM_COMPOSITESYNC;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- misc |= ATOM_INTERLACE;
-- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- misc |= ATOM_DOUBLE_CLOCK_MODE;
-+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
-
- args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
- args.ucCRTC = radeon_crtc->crtc_id;
-diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
-index 9c47867..7fe5590 100644
---- a/drivers/hid/hid-ids.h
-+++ b/drivers/hid/hid-ids.h
-@@ -459,6 +459,10 @@
- #define USB_DEVICE_ID_UGCI_FLYING 0x0020
- #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
-
-+#define USB_VENDOR_ID_HP 0x03f0
-+#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE 0x0a4a
-+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
-+
- #define USB_VENDOR_ID_HUION 0x256c
- #define USB_DEVICE_ID_HUION_TABLET 0x006e
-
-diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
-index a821277..4e3ae9f 100644
---- a/drivers/hid/usbhid/hid-quirks.c
-+++ b/drivers/hid/usbhid/hid-quirks.c
-@@ -78,6 +78,8 @@ static const struct hid_blacklist {
- { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
-+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
-+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
-diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
-index 2978f5e..00bc30e 100644
---- a/drivers/hv/channel.c
-+++ b/drivers/hv/channel.c
-@@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
- GFP_KERNEL);
- if (!open_info) {
- err = -ENOMEM;
-- goto error0;
-+ goto error_gpadl;
- }
-
- init_completion(&open_info->waitevent);
-@@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
-
- if (userdatalen > MAX_USER_DEFINED_BYTES) {
- err = -EINVAL;
-- goto error0;
-+ goto error_gpadl;
- }
-
- if (userdatalen)
-@@ -195,6 +195,9 @@ error1:
- list_del(&open_info->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
-+error_gpadl:
-+ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
-+
- error0:
- free_pages((unsigned long)out,
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
-diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
-index 5f96b1b..019d542 100644
---- a/drivers/i2c/busses/i2c-rk3x.c
-+++ b/drivers/i2c/busses/i2c-rk3x.c
-@@ -833,7 +833,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
- clk_disable(i2c->clk);
- spin_unlock_irqrestore(&i2c->lock, flags);
-
-- return ret;
-+ return ret < 0 ? ret : num;
- }
-
- static u32 rk3x_i2c_func(struct i2c_adapter *adap)
-diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
-index edf274c..8143162 100644
---- a/drivers/i2c/i2c-core.c
-+++ b/drivers/i2c/i2c-core.c
-@@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
- adap->bus_recovery_info->set_scl(adap, 1);
- return i2c_generic_recovery(adap);
- }
-+EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
-
- int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
- {
-@@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
-
- return ret;
- }
-+EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
-
- int i2c_recover_bus(struct i2c_adapter *adap)
- {
-@@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
- dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
- return adap->bus_recovery_info->recover_bus(adap);
- }
-+EXPORT_SYMBOL_GPL(i2c_recover_bus);
-
- static int i2c_device_probe(struct device *dev)
- {
-@@ -1410,6 +1413,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
-
- dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
-
-+ pm_runtime_no_callbacks(&adap->dev);
-+
- #ifdef CONFIG_I2C_COMPAT
- res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
- adap->dev.parent);
-diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
-index 593f7ca..06cc1ff 100644
---- a/drivers/i2c/i2c-mux.c
-+++ b/drivers/i2c/i2c-mux.c
-@@ -32,8 +32,9 @@ struct i2c_mux_priv {
- struct i2c_algorithm algo;
-
- struct i2c_adapter *parent;
-- void *mux_priv; /* the mux chip/device */
-- u32 chan_id; /* the channel id */
-+ struct device *mux_dev;
-+ void *mux_priv;
-+ u32 chan_id;
-
- int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
- int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
-@@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
-
- /* Set up private adapter data */
- priv->parent = parent;
-+ priv->mux_dev = mux_dev;
- priv->mux_priv = mux_priv;
- priv->chan_id = chan_id;
- priv->select = select;
-@@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap)
- char symlink_name[20];
-
- snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
-- sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
-+ sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
-
- sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
- i2c_del_adapter(adap);
-diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
-index b0e5852..44d1d79 100644
---- a/drivers/idle/intel_idle.c
-+++ b/drivers/idle/intel_idle.c
-@@ -218,18 +218,10 @@ static struct cpuidle_state byt_cstates[] = {
- .enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
- {
-- .name = "C1E-BYT",
-- .desc = "MWAIT 0x01",
-- .flags = MWAIT2flg(0x01),
-- .exit_latency = 15,
-- .target_residency = 30,
-- .enter = &intel_idle,
-- .enter_freeze = intel_idle_freeze, },
-- {
- .name = "C6N-BYT",
- .desc = "MWAIT 0x58",
- .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
-- .exit_latency = 40,
-+ .exit_latency = 300,
- .target_residency = 275,
- .enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
-@@ -237,7 +229,7 @@ static struct cpuidle_state byt_cstates[] = {
- .name = "C6S-BYT",
- .desc = "MWAIT 0x52",
- .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
-- .exit_latency = 140,
-+ .exit_latency = 500,
- .target_residency = 560,
- .enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
-@@ -246,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
- .desc = "MWAIT 0x60",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 1200,
-- .target_residency = 1500,
-+ .target_residency = 4000,
- .enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
- {
-diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
-index 8c014b5..38acb3c 100644
---- a/drivers/infiniband/core/umem.c
-+++ b/drivers/infiniband/core/umem.c
-@@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
-
-+ if (!size)
-+ return ERR_PTR(-EINVAL);
-+
- /*
- * If the combination of the addr and size requested for this memory
- * region causes an integer overflow, return error.
- */
-- if ((PAGE_ALIGN(addr + size) <= size) ||
-- (PAGE_ALIGN(addr + size) <= addr))
-+ if (((addr + size) < addr) ||
-+ PAGE_ALIGN(addr + size) < (addr + size))
- return ERR_PTR(-EINVAL);
-
- if (!can_do_mlock())
-diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
-index ed2bd67..fbde33a 100644
---- a/drivers/infiniband/hw/mlx4/qp.c
-+++ b/drivers/infiniband/hw/mlx4/qp.c
-@@ -2605,8 +2605,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
-
- memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
-
-- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
-- wr->wr.ud.hlen);
-+ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
- *lso_seg_len = halign;
- return 0;
- }
-diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
-index 20e859a..76eb57b 100644
---- a/drivers/infiniband/ulp/iser/iser_initiator.c
-+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
-@@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn,
- if (scsi_prot_sg_count(sc)) {
- prot_buf->buf = scsi_prot_sglist(sc);
- prot_buf->size = scsi_prot_sg_count(sc);
-- prot_buf->data_len = data_buf->data_len >>
-- ilog2(sc->device->sector_size) * 8;
-+ prot_buf->data_len = (data_buf->data_len >>
-+ ilog2(sc->device->sector_size)) * 8;
- }
-
- if (hdr->flags & ISCSI_FLAG_CMD_READ) {
-diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
-index 075b19c..147029a 100644
---- a/drivers/infiniband/ulp/isert/ib_isert.c
-+++ b/drivers/infiniband/ulp/isert/ib_isert.c
-@@ -222,7 +222,7 @@ fail:
- static void
- isert_free_rx_descriptors(struct isert_conn *isert_conn)
- {
-- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
-+ struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
- struct iser_rx_desc *rx_desc;
- int i;
-
-@@ -719,8 +719,8 @@ out:
- static void
- isert_connect_release(struct isert_conn *isert_conn)
- {
-- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct isert_device *device = isert_conn->conn_device;
-+ struct ib_device *ib_dev = device->ib_device;
-
- isert_dbg("conn %p\n", isert_conn);
-
-@@ -728,7 +728,8 @@ isert_connect_release(struct isert_conn *isert_conn)
- isert_conn_free_fastreg_pool(isert_conn);
-
- isert_free_rx_descriptors(isert_conn);
-- rdma_destroy_id(isert_conn->conn_cm_id);
-+ if (isert_conn->conn_cm_id)
-+ rdma_destroy_id(isert_conn->conn_cm_id);
-
- if (isert_conn->conn_qp) {
- struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
-@@ -878,12 +879,15 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
- return 0;
- }
-
--static void
-+static int
- isert_connect_error(struct rdma_cm_id *cma_id)
- {
- struct isert_conn *isert_conn = cma_id->qp->qp_context;
-
-+ isert_conn->conn_cm_id = NULL;
- isert_put_conn(isert_conn);
-+
-+ return -1;
- }
-
- static int
-@@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
- case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
- case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
- case RDMA_CM_EVENT_CONNECT_ERROR:
-- isert_connect_error(cma_id);
-+ ret = isert_connect_error(cma_id);
- break;
- default:
- isert_err("Unhandled RDMA CMA event: %d\n", event->event);
-@@ -1861,11 +1865,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
- cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
- spin_unlock_bh(&cmd->istate_lock);
-
-- if (ret)
-+ if (ret) {
-+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->pi_err, 0);
-- else
-+ } else {
- target_execute_cmd(se_cmd);
-+ }
- }
-
- static void
-diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
-index 27bcdbc..ea6cb64 100644
---- a/drivers/input/mouse/alps.c
-+++ b/drivers/input/mouse/alps.c
-@@ -1159,13 +1159,14 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
- bool report_buttons)
- {
- struct alps_data *priv = psmouse->private;
-- struct input_dev *dev;
-+ struct input_dev *dev, *dev2 = NULL;
-
- /* Figure out which device to use to report the bare packet */
- if (priv->proto_version == ALPS_PROTO_V2 &&
- (priv->flags & ALPS_DUALPOINT)) {
- /* On V2 devices the DualPoint Stick reports bare packets */
- dev = priv->dev2;
-+ dev2 = psmouse->dev;
- } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
- /* Register dev3 mouse if we received PS/2 packet first time */
- if (!IS_ERR(priv->dev3))
-@@ -1177,7 +1178,7 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
- }
-
- if (report_buttons)
-- alps_report_buttons(dev, NULL,
-+ alps_report_buttons(dev, dev2,
- packet[0] & 1, packet[0] & 2, packet[0] & 4);
-
- input_report_rel(dev, REL_X,
-diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
-index 6e22682..991dc6b 100644
---- a/drivers/input/mouse/elantech.c
-+++ b/drivers/input/mouse/elantech.c
-@@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
- }
-
- /*
-+ * This writes the reg_07 value again to the hardware at the end of every
-+ * set_rate call because the register loses its value. reg_07 allows setting
-+ * absolute mode on v4 hardware
-+ */
-+static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
-+ unsigned int rate)
-+{
-+ struct elantech_data *etd = psmouse->private;
-+
-+ etd->original_set_rate(psmouse, rate);
-+ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
-+ psmouse_err(psmouse, "restoring reg_07 failed\n");
-+}
-+
-+/*
- * Put the touchpad into absolute mode
- */
- static int elantech_set_absolute_mode(struct psmouse *psmouse)
-@@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
- * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
- * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
- * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
-+ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
-+ * Asus X750JN 0x381f17 10, 14, 0e clickpad
- * Asus UX31 0x361f00 20, 15, 0e clickpad
- * Asus UX32VD 0x361f02 00, 15, 0e clickpad
- * Avatar AVIU-145A2 0x361f00 ? clickpad
-@@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse)
- goto init_fail;
- }
-
-+ if (etd->fw_version == 0x381f17) {
-+ etd->original_set_rate = psmouse->set_rate;
-+ psmouse->set_rate = elantech_set_rate_restore_reg_07;
-+ }
-+
- if (elantech_set_input_params(psmouse)) {
- psmouse_err(psmouse, "failed to query touchpad range.\n");
- goto init_fail;
-diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
-index 6f3afec..f965d15 100644
---- a/drivers/input/mouse/elantech.h
-+++ b/drivers/input/mouse/elantech.h
-@@ -142,6 +142,7 @@ struct elantech_data {
- struct finger_pos mt[ETP_MAX_FINGERS];
- unsigned char parity[256];
- int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
-+ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
- };
-
- #ifdef CONFIG_MOUSE_PS2_ELANTECH
-diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
-index 713a962..41473929 100644
---- a/drivers/md/dm-crypt.c
-+++ b/drivers/md/dm-crypt.c
-@@ -925,11 +925,10 @@ static int crypt_convert(struct crypt_config *cc,
-
- switch (r) {
- /* async */
-+ case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&ctx->restart);
- reinit_completion(&ctx->restart);
-- /* fall through*/
-- case -EINPROGRESS:
- ctx->req = NULL;
- ctx->cc_sector++;
- continue;
-@@ -1346,10 +1345,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
- struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
- struct crypt_config *cc = io->cc;
-
-- if (error == -EINPROGRESS) {
-- complete(&ctx->restart);
-+ if (error == -EINPROGRESS)
- return;
-- }
-
- if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
- error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
-@@ -1360,12 +1357,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
- crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
-
- if (!atomic_dec_and_test(&ctx->cc_pending))
-- return;
-+ goto done;
-
- if (bio_data_dir(io->base_bio) == READ)
- kcryptd_crypt_read_done(io);
- else
- kcryptd_crypt_write_io_submit(io, 1);
-+done:
-+ if (!completion_done(&ctx->restart))
-+ complete(&ctx->restart);
- }
-
- static void kcryptd_crypt(struct work_struct *work)
-diff --git a/drivers/md/md.c b/drivers/md/md.c
-index 717daad..e617878 100644
---- a/drivers/md/md.c
-+++ b/drivers/md/md.c
-@@ -249,6 +249,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
- const int rw = bio_data_dir(bio);
- struct mddev *mddev = q->queuedata;
- unsigned int sectors;
-+ int cpu;
-
- if (mddev == NULL || mddev->pers == NULL
- || !mddev->ready) {
-@@ -284,7 +285,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
- sectors = bio_sectors(bio);
- mddev->pers->make_request(mddev, bio);
-
-- generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
-+ cpu = part_stat_lock();
-+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
-+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
-+ part_stat_unlock();
-
- if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
- wake_up(&mddev->sb_wait);
-diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
-index 3ed9f42..3b5d7f7 100644
---- a/drivers/md/raid0.c
-+++ b/drivers/md/raid0.c
-@@ -313,7 +313,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
-
- /*
- * remaps the bio to the target device. we separate two flows.
-- * power 2 flow and a general flow for the sake of perfromance
-+ * power 2 flow and a general flow for the sake of performance
- */
- static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
- sector_t sector, sector_t *sector_offset)
-@@ -524,6 +524,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
- split = bio;
- }
-
-+ sector = bio->bi_iter.bi_sector;
- zone = find_zone(mddev->private, &sector);
- tmp_dev = map_sector(mddev, zone, sector, &sector);
- split->bi_bdev = tmp_dev->bdev;
-diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
-index 77c78de..7020659 100644
---- a/drivers/media/rc/img-ir/img-ir-core.c
-+++ b/drivers/media/rc/img-ir/img-ir-core.c
-@@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev)
- {
- struct img_ir_priv *priv = platform_get_drvdata(pdev);
-
-- free_irq(priv->irq, img_ir_isr);
-+ free_irq(priv->irq, priv);
- img_ir_remove_hw(priv);
- img_ir_remove_raw(priv);
-
-diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
-index 65a326c..749ad56 100644
---- a/drivers/media/usb/stk1160/stk1160-v4l.c
-+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
-@@ -240,6 +240,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
- if (mutex_lock_interruptible(&dev->v4l_lock))
- return -ERESTARTSYS;
-
-+ /*
-+ * Once URBs are cancelled, the URB complete handler
-+ * won't be running. This is required to safely release the
-+ * current buffer (dev->isoc_ctl.buf).
-+ */
- stk1160_cancel_isoc(dev);
-
- /*
-@@ -620,8 +625,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
- stk1160_info("buffer [%p/%d] aborted\n",
- buf, buf->vb.v4l2_buf.index);
- }
-- /* It's important to clear current buffer */
-- dev->isoc_ctl.buf = NULL;
-+
-+ /* It's important to release the current buffer */
-+ if (dev->isoc_ctl.buf) {
-+ buf = dev->isoc_ctl.buf;
-+ dev->isoc_ctl.buf = NULL;
-+
-+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
-+ stk1160_info("buffer [%p/%d] aborted\n",
-+ buf, buf->vb.v4l2_buf.index);
-+ }
- spin_unlock_irqrestore(&dev->buf_lock, flags);
- }
-
-diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
-index fc145d2..922a750 100644
---- a/drivers/memstick/core/mspro_block.c
-+++ b/drivers/memstick/core/mspro_block.c
-@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
-
- if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
- if (msb->data_dir == READ) {
-- for (cnt = 0; cnt < msb->current_seg; cnt++)
-+ for (cnt = 0; cnt < msb->current_seg; cnt++) {
- t_len += msb->req_sg[cnt].length
- / msb->page_size;
-
-@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
- t_len += msb->current_page - 1;
-
- t_len *= msb->page_size;
-+ }
- }
- } else
- t_len = blk_rq_bytes(msb->block_req);
-diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
-index 2a87f69..1aed3b7 100644
---- a/drivers/mfd/mfd-core.c
-+++ b/drivers/mfd/mfd-core.c
-@@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id,
- int platform_id;
- int r;
-
-- if (id < 0)
-+ if (id == PLATFORM_DEVID_AUTO)
- platform_id = id;
- else
- platform_id = id + cell->id;
-diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
-index e8a4218..459ed1b 100644
---- a/drivers/mmc/host/sunxi-mmc.c
-+++ b/drivers/mmc/host/sunxi-mmc.c
-@@ -930,7 +930,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
- return PTR_ERR(host->clk_sample);
- }
-
-- host->reset = devm_reset_control_get(&pdev->dev, "ahb");
-+ host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
-+ if (PTR_ERR(host->reset) == -EPROBE_DEFER)
-+ return PTR_ERR(host->reset);
-
- ret = clk_prepare_enable(host->clk_ahb);
- if (ret) {
-diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
-index a31c357..dba7e1c 100644
---- a/drivers/mmc/host/tmio_mmc_pio.c
-+++ b/drivers/mmc/host/tmio_mmc_pio.c
-@@ -1073,8 +1073,6 @@ EXPORT_SYMBOL(tmio_mmc_host_alloc);
- void tmio_mmc_host_free(struct tmio_mmc_host *host)
- {
- mmc_free_host(host->mmc);
--
-- host->mmc = NULL;
- }
- EXPORT_SYMBOL(tmio_mmc_host_free);
-
-diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
-index 9d2e16f..b5e1548 100644
---- a/drivers/mtd/ubi/attach.c
-+++ b/drivers/mtd/ubi/attach.c
-@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
- second_is_newer = !second_is_newer;
- } else {
- dbg_bld("PEB %d CRC is OK", pnum);
-- bitflips = !!err;
-+ bitflips |= !!err;
- }
- mutex_unlock(&ubi->buf_mutex);
-
-diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
-index d647e50..d16fccf 100644
---- a/drivers/mtd/ubi/cdev.c
-+++ b/drivers/mtd/ubi/cdev.c
-@@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
- /* Validate the request */
- err = -EINVAL;
- if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
-- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
-+ req.bytes < 0 || req.bytes > vol->usable_leb_size)
- break;
-
- err = get_exclusive(desc);
-diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
-index 16e34b3..8c9a710 100644
---- a/drivers/mtd/ubi/eba.c
-+++ b/drivers/mtd/ubi/eba.c
-@@ -1419,7 +1419,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
- * during re-size.
- */
- ubi_move_aeb_to_list(av, aeb, &ai->erase);
-- vol->eba_tbl[aeb->lnum] = aeb->pnum;
-+ else
-+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
- }
- }
-
-diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
-index 8f7bde6..0bd92d8 100644
---- a/drivers/mtd/ubi/wl.c
-+++ b/drivers/mtd/ubi/wl.c
-@@ -1002,7 +1002,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
- int shutdown)
- {
- int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
-- int vol_id = -1, uninitialized_var(lnum);
-+ int vol_id = -1, lnum = -1;
- #ifdef CONFIG_MTD_UBI_FASTMAP
- int anchor = wrk->anchor;
- #endif
-diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
-index 81d4153..77bf133 100644
---- a/drivers/net/ethernet/cadence/macb.c
-+++ b/drivers/net/ethernet/cadence/macb.c
-@@ -2165,7 +2165,7 @@ static void macb_configure_caps(struct macb *bp)
- }
- }
-
-- if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
-+ if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) >= 0x2)
- bp->caps |= MACB_CAPS_MACB_IS_GEM;
-
- if (macb_is_gem(bp)) {
-diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
-index 7f997d3..a71c446 100644
---- a/drivers/net/ethernet/intel/e1000/e1000_main.c
-+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
-@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
- static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int *work_done, int work_to_do);
-+static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
-+ struct e1000_rx_ring *rx_ring,
-+ int cleaned_count)
-+{
-+}
- static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int cleaned_count);
-@@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
- msleep(1);
- /* e1000_down has a dependency on max_frame_size */
- hw->max_frame_size = max_frame;
-- if (netif_running(netdev))
-+ if (netif_running(netdev)) {
-+ /* prevent buffers from being reallocated */
-+ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
- e1000_down(adapter);
-+ }
-
- /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
- * means we reserve 2 more, this pushes us to allocate from the next
-diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
-index af829c5..7ace07d 100644
---- a/drivers/net/ethernet/marvell/pxa168_eth.c
-+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
-@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
- np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (!np) {
- dev_err(&pdev->dev, "missing phy-handle\n");
-- return -EINVAL;
-+ err = -EINVAL;
-+ goto err_netdev;
- }
- of_property_read_u32(np, "reg", &pep->phy_addr);
- pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
-@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
- pep->smi_bus = mdiobus_alloc();
- if (pep->smi_bus == NULL) {
- err = -ENOMEM;
-- goto err_base;
-+ goto err_netdev;
- }
- pep->smi_bus->priv = pep;
- pep->smi_bus->name = "pxa168_eth smi";
-@@ -1551,13 +1552,10 @@ err_mdiobus:
- mdiobus_unregister(pep->smi_bus);
- err_free_mdio:
- mdiobus_free(pep->smi_bus);
--err_base:
-- iounmap(pep->base);
- err_netdev:
- free_netdev(dev);
- err_clk:
-- clk_disable(clk);
-- clk_put(clk);
-+ clk_disable_unprepare(clk);
- return err;
- }
-
-@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
- if (pep->phy)
- phy_disconnect(pep->phy);
- if (pep->clk) {
-- clk_disable(pep->clk);
-- clk_put(pep->clk);
-- pep->clk = NULL;
-+ clk_disable_unprepare(pep->clk);
- }
-
-- iounmap(pep->base);
-- pep->base = NULL;
- mdiobus_unregister(pep->smi_bus);
- mdiobus_free(pep->smi_bus);
- unregister_netdev(dev);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
-index a7b58ba..3dccf01 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
-@@ -981,20 +981,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- /* check if requested function is supported by the device */
-- if ((hfunc == ETH_RSS_HASH_TOP &&
-- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
-- (hfunc == ETH_RSS_HASH_XOR &&
-- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
-- return -EINVAL;
-+ if (hfunc == ETH_RSS_HASH_TOP) {
-+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
-+ return -EINVAL;
-+ if (!(dev->features & NETIF_F_RXHASH))
-+ en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
-+ return 0;
-+ } else if (hfunc == ETH_RSS_HASH_XOR) {
-+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
-+ return -EINVAL;
-+ if (dev->features & NETIF_F_RXHASH)
-+ en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
-+ return 0;
-+ }
-
-- priv->rss_hash_fn = hfunc;
-- if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
-- en_warn(priv,
-- "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
-- if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
-- en_warn(priv,
-- "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
-- return 0;
-+ return -EINVAL;
- }
-
- static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
-@@ -1068,6 +1069,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
- priv->prof->rss_rings = rss_rings;
- if (key)
- memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
-+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
-+ priv->rss_hash_fn = hfunc;
-
- if (port_up) {
- err = mlx4_en_start_port(dev);
-diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
-index af034db..9d15566 100644
---- a/drivers/net/ppp/ppp_generic.c
-+++ b/drivers/net/ppp/ppp_generic.c
-@@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
- {
- /* note: a 0-length skb is used as an error indication */
- if (skb->len > 0) {
-+ skb_checksum_complete_unset(skb);
- #ifdef CONFIG_PPP_MULTILINK
- /* XXX do channel-level decompression here */
- if (PPP_PROTO(skb) == PPP_MP)
-diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
-index 90a714c..23806c2 100644
---- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
-+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
-@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
- {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
- {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
- {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
-+ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
- {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
- {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
- {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
-@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
- {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
- {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
- {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
-+ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
- {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
- {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
- {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
-diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
-index c93fae9..5fbd223 100644
---- a/drivers/net/wireless/ti/wl18xx/debugfs.c
-+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
-@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
- WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
- WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
-
--WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
-+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
-
- WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
- AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
-diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
-index 0f2cfb0..bf14676 100644
---- a/drivers/net/wireless/ti/wlcore/debugfs.h
-+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
-@@ -26,8 +26,8 @@
-
- #include "wlcore.h"
-
--int wl1271_format_buffer(char __user *userbuf, size_t count,
-- loff_t *ppos, char *fmt, ...);
-+__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
-+ loff_t *ppos, char *fmt, ...);
-
- int wl1271_debugfs_init(struct wl1271 *wl);
- void wl1271_debugfs_exit(struct wl1271 *wl);
-diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
-index eb88693..7b53a5c 100644
---- a/drivers/nfc/st21nfcb/i2c.c
-+++ b/drivers/nfc/st21nfcb/i2c.c
-@@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
- return phy->ndlc->hard_fault;
-
- r = i2c_master_send(client, skb->data, skb->len);
-- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
-+ if (r < 0) { /* Retry, chip was in standby */
- usleep_range(1000, 4000);
- r = i2c_master_send(client, skb->data, skb->len);
- }
-@@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
- struct i2c_client *client = phy->i2c_dev;
-
- r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
-- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
-+ if (r < 0) { /* Retry, chip was in standby */
- usleep_range(1000, 4000);
- r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
- }
-diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
-index 15c0fab..bceb30b 100644
---- a/drivers/platform/x86/compal-laptop.c
-+++ b/drivers/platform/x86/compal-laptop.c
-@@ -1026,9 +1026,9 @@ static int compal_probe(struct platform_device *pdev)
- if (err)
- return err;
-
-- hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
-- "compal", data,
-- compal_hwmon_groups);
-+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
-+ "compal", data,
-+ compal_hwmon_groups);
- if (IS_ERR(hwmon_dev)) {
- err = PTR_ERR(hwmon_dev);
- goto remove;
-@@ -1036,7 +1036,9 @@ static int compal_probe(struct platform_device *pdev)
-
- /* Power supply */
- initialize_power_supply_data(data);
-- power_supply_register(&compal_device->dev, &data->psy);
-+ err = power_supply_register(&compal_device->dev, &data->psy);
-+ if (err < 0)
-+ goto remove;
-
- platform_set_drvdata(pdev, data);
-
-diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
-index 9d69460..96b15e0 100644
---- a/drivers/power/ipaq_micro_battery.c
-+++ b/drivers/power/ipaq_micro_battery.c
-@@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = {
- static int micro_batt_probe(struct platform_device *pdev)
- {
- struct micro_battery *mb;
-+ int ret;
-
- mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
- if (!mb)
-@@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev)
-
- mb->micro = dev_get_drvdata(pdev->dev.parent);
- mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
-+ if (!mb->wq)
-+ return -ENOMEM;
-+
- INIT_DELAYED_WORK(&mb->update, micro_battery_work);
- platform_set_drvdata(pdev, mb);
- queue_delayed_work(mb->wq, &mb->update, 1);
-- power_supply_register(&pdev->dev, &micro_batt_power);
-- power_supply_register(&pdev->dev, &micro_ac_power);
-+
-+ ret = power_supply_register(&pdev->dev, &micro_batt_power);
-+ if (ret < 0)
-+ goto batt_err;
-+
-+ ret = power_supply_register(&pdev->dev, &micro_ac_power);
-+ if (ret < 0)
-+ goto ac_err;
-
- dev_info(&pdev->dev, "iPAQ micro battery driver\n");
- return 0;
-+
-+ac_err:
-+ power_supply_unregister(&micro_ac_power);
-+batt_err:
-+ cancel_delayed_work_sync(&mb->update);
-+ destroy_workqueue(mb->wq);
-+ return ret;
- }
-
- static int micro_batt_remove(struct platform_device *pdev)
-@@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev)
- power_supply_unregister(&micro_ac_power);
- power_supply_unregister(&micro_batt_power);
- cancel_delayed_work_sync(&mb->update);
-+ destroy_workqueue(mb->wq);
-
- return 0;
- }
-diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
-index 21fc233..176dab2 100644
---- a/drivers/power/lp8788-charger.c
-+++ b/drivers/power/lp8788-charger.c
-@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
- pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
- pchg->battery.get_property = lp8788_battery_get_property;
-
-- if (power_supply_register(&pdev->dev, &pchg->battery))
-+ if (power_supply_register(&pdev->dev, &pchg->battery)) {
-+ power_supply_unregister(&pchg->charger);
- return -EPERM;
-+ }
-
- return 0;
- }
-diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
-index 7ef445a..cf90760 100644
---- a/drivers/power/twl4030_madc_battery.c
-+++ b/drivers/power/twl4030_madc_battery.c
-@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
- {
- struct twl4030_madc_battery *twl4030_madc_bat;
- struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
-+ int ret = 0;
-
- twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
- if (!twl4030_madc_bat)
-@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
-
- twl4030_madc_bat->pdata = pdata;
- platform_set_drvdata(pdev, twl4030_madc_bat);
-- power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
-+ ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
-+ if (ret < 0)
-+ kfree(twl4030_madc_bat);
-
-- return 0;
-+ return ret;
- }
-
- static int twl4030_madc_battery_remove(struct platform_device *pdev)
-diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
-index 675b5e7..5a0800d 100644
---- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
-+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
-@@ -1584,11 +1584,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
- fp_possible = io_info.fpOkForIo;
- }
-
-- /* Use smp_processor_id() for now until cmd->request->cpu is CPU
-+ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
- id by default, not CPU group id, otherwise all MSI-X queues won't
- be utilized */
- cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
-- smp_processor_id() % instance->msix_vectors : 0;
-+ raw_smp_processor_id() % instance->msix_vectors : 0;
-
- if (fp_possible) {
- megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
-@@ -1693,7 +1693,10 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
- << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
- cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- cmd->request_desc->SCSIIO.MSIxIndex =
-- instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
-+ instance->msix_vectors ?
-+ raw_smp_processor_id() %
-+ instance->msix_vectors :
-+ 0;
- os_timeout_value = scmd->request->timeout / HZ;
-
- if (instance->secure_jbod_support &&
-diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
-index 2d5ab6d..454536c 100644
---- a/drivers/scsi/mvsas/mv_sas.c
-+++ b/drivers/scsi/mvsas/mv_sas.c
-@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
- static int mvs_task_prep_ata(struct mvs_info *mvi,
- struct mvs_task_exec_info *tei)
- {
-- struct sas_ha_struct *sha = mvi->sas;
- struct sas_task *task = tei->task;
- struct domain_device *dev = task->dev;
- struct mvs_device *mvi_dev = dev->lldd_dev;
- struct mvs_cmd_hdr *hdr = tei->hdr;
- struct asd_sas_port *sas_port = dev->port;
-- struct sas_phy *sphy = dev->phy;
-- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
- struct mvs_slot_info *slot;
- void *buf_prd;
- u32 tag = tei->tag, hdr_tag;
-@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
- slot->tx = mvi->tx_prod;
- del_q = TXQ_MODE_I | tag |
- (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
-- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
-+ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
- (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
- mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
-
-diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 6b78476..3290a3e 100644
---- a/drivers/scsi/sd.c
-+++ b/drivers/scsi/sd.c
-@@ -3100,6 +3100,7 @@ static void scsi_disk_release(struct device *dev)
- ida_remove(&sd_index_ida, sdkp->index);
- spin_unlock(&sd_index_lock);
-
-+ blk_integrity_unregister(disk);
- disk->private_data = NULL;
- put_disk(disk);
- put_device(&sdkp->device->sdev_gendev);
-diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
-index 14c7d42..5c06d29 100644
---- a/drivers/scsi/sd_dif.c
-+++ b/drivers/scsi/sd_dif.c
-@@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
-
- disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
-
-- if (!sdkp)
-+ if (!sdkp->ATO)
- return;
-
- if (type == SD_DIF_TYPE3_PROTECTION)
-diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
-index efc6e44..bf8c5c1 100644
---- a/drivers/scsi/storvsc_drv.c
-+++ b/drivers/scsi/storvsc_drv.c
-@@ -746,21 +746,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
- if (bounce_sgl[j].length == PAGE_SIZE) {
- /* full..move to next entry */
- sg_kunmap_atomic(bounce_addr);
-+ bounce_addr = 0;
- j++;
-+ }
-
-- /* if we need to use another bounce buffer */
-- if (srclen || i != orig_sgl_count - 1)
-- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
-+ /* if we need to use another bounce buffer */
-+ if (srclen && bounce_addr == 0)
-+ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
-
-- } else if (srclen == 0 && i == orig_sgl_count - 1) {
-- /* unmap the last bounce that is < PAGE_SIZE */
-- sg_kunmap_atomic(bounce_addr);
-- }
- }
-
- sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
- }
-
-+ if (bounce_addr)
-+ sg_kunmap_atomic(bounce_addr);
-+
- local_irq_restore(flags);
-
- return total_copied;
-diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
-index 6fea4af..aea3a67 100644
---- a/drivers/spi/spi-imx.c
-+++ b/drivers/spi/spi-imx.c
-@@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
- if (spi_imx->dma_is_inited) {
- dma = readl(spi_imx->base + MX51_ECSPI_DMA);
-
-- spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
-- spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
- spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
- rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
- tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
-@@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
- master->max_dma_len = MAX_SDMA_BD_BYTES;
- spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
- SPI_MASTER_MUST_TX;
-+ spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
-+ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
- spi_imx->dma_is_inited = 1;
-
- return 0;
-diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
-index 4eb7a98..7bf5186 100644
---- a/drivers/spi/spidev.c
-+++ b/drivers/spi/spidev.c
-@@ -245,7 +245,10 @@ static int spidev_message(struct spidev_data *spidev,
- k_tmp->len = u_tmp->len;
-
- total += k_tmp->len;
-- if (total > bufsiz) {
-+ /* Check total length of transfers. Also check each
-+ * transfer length to avoid arithmetic overflow.
-+ */
-+ if (total > bufsiz || k_tmp->len > bufsiz) {
- status = -EMSGSIZE;
- goto done;
- }
-diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
-index 7bdb62b..f83e00c 100644
---- a/drivers/staging/android/sync.c
-+++ b/drivers/staging/android/sync.c
-@@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj)
- list_for_each_entry_safe(pt, next, &obj->active_list_head,
- active_list) {
- if (fence_is_signaled_locked(&pt->base))
-- list_del(&pt->active_list);
-+ list_del_init(&pt->active_list);
- }
-
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
-index 6ed35b6..04fc217 100644
---- a/drivers/staging/panel/panel.c
-+++ b/drivers/staging/panel/panel.c
-@@ -335,11 +335,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
- * LCD types
- */
- #define LCD_TYPE_NONE 0
--#define LCD_TYPE_OLD 1
--#define LCD_TYPE_KS0074 2
--#define LCD_TYPE_HANTRONIX 3
--#define LCD_TYPE_NEXCOM 4
--#define LCD_TYPE_CUSTOM 5
-+#define LCD_TYPE_CUSTOM 1
-+#define LCD_TYPE_OLD 2
-+#define LCD_TYPE_KS0074 3
-+#define LCD_TYPE_HANTRONIX 4
-+#define LCD_TYPE_NEXCOM 5
-
- /*
- * keypad types
-@@ -502,7 +502,7 @@ MODULE_PARM_DESC(keypad_type,
- static int lcd_type = NOT_SET;
- module_param(lcd_type, int, 0000);
- MODULE_PARM_DESC(lcd_type,
-- "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
-+ "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
-
- static int lcd_height = NOT_SET;
- module_param(lcd_height, int, 0000);
-diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
-index 07ce3fd..fdf5c56 100644
---- a/drivers/staging/vt6655/rxtx.c
-+++ b/drivers/staging/vt6655/rxtx.c
-@@ -1308,10 +1308,18 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
- priv->hw->conf.chandef.chan->hw_value);
- }
-
-- if (current_rate > RATE_11M)
-- pkt_type = (u8)priv->byPacketType;
-- else
-+ if (current_rate > RATE_11M) {
-+ if (info->band == IEEE80211_BAND_5GHZ) {
-+ pkt_type = PK_TYPE_11A;
-+ } else {
-+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
-+ pkt_type = PK_TYPE_11GB;
-+ else
-+ pkt_type = PK_TYPE_11GA;
-+ }
-+ } else {
- pkt_type = PK_TYPE_11B;
-+ }
-
- /*Set fifo controls */
- if (pkt_type == PK_TYPE_11A)
-diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
-index 77d6425..5e35612 100644
---- a/drivers/target/iscsi/iscsi_target.c
-+++ b/drivers/target/iscsi/iscsi_target.c
-@@ -537,7 +537,7 @@ static struct iscsit_transport iscsi_target_transport = {
-
- static int __init iscsi_target_init_module(void)
- {
-- int ret = 0;
-+ int ret = 0, size;
-
- pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
-
-@@ -546,6 +546,7 @@ static int __init iscsi_target_init_module(void)
- pr_err("Unable to allocate memory for iscsit_global\n");
- return -1;
- }
-+ spin_lock_init(&iscsit_global->ts_bitmap_lock);
- mutex_init(&auth_id_lock);
- spin_lock_init(&sess_idr_lock);
- idr_init(&tiqn_idr);
-@@ -555,15 +556,11 @@ static int __init iscsi_target_init_module(void)
- if (ret < 0)
- goto out;
-
-- ret = iscsi_thread_set_init();
-- if (ret < 0)
-+ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
-+ iscsit_global->ts_bitmap = vzalloc(size);
-+ if (!iscsit_global->ts_bitmap) {
-+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
- goto configfs_out;
--
-- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
-- TARGET_THREAD_SET_COUNT) {
-- pr_err("iscsi_allocate_thread_sets() returned"
-- " unexpected value!\n");
-- goto ts_out1;
- }
-
- lio_qr_cache = kmem_cache_create("lio_qr_cache",
-@@ -572,7 +569,7 @@ static int __init iscsi_target_init_module(void)
- if (!lio_qr_cache) {
- pr_err("nable to kmem_cache_create() for"
- " lio_qr_cache\n");
-- goto ts_out2;
-+ goto bitmap_out;
- }
-
- lio_dr_cache = kmem_cache_create("lio_dr_cache",
-@@ -617,10 +614,8 @@ dr_out:
- kmem_cache_destroy(lio_dr_cache);
- qr_out:
- kmem_cache_destroy(lio_qr_cache);
--ts_out2:
-- iscsi_deallocate_thread_sets();
--ts_out1:
-- iscsi_thread_set_free();
-+bitmap_out:
-+ vfree(iscsit_global->ts_bitmap);
- configfs_out:
- iscsi_target_deregister_configfs();
- out:
-@@ -630,8 +625,6 @@ out:
-
- static void __exit iscsi_target_cleanup_module(void)
- {
-- iscsi_deallocate_thread_sets();
-- iscsi_thread_set_free();
- iscsit_release_discovery_tpg();
- iscsit_unregister_transport(&iscsi_target_transport);
- kmem_cache_destroy(lio_qr_cache);
-@@ -641,6 +634,7 @@ static void __exit iscsi_target_cleanup_module(void)
-
- iscsi_target_deregister_configfs();
-
-+ vfree(iscsit_global->ts_bitmap);
- kfree(iscsit_global);
- }
-
-@@ -3715,17 +3709,16 @@ static int iscsit_send_reject(
-
- void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
- {
-- struct iscsi_thread_set *ts = conn->thread_set;
- int ord, cpu;
- /*
-- * thread_id is assigned from iscsit_global->ts_bitmap from
-- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
-+ * bitmap_id is assigned from iscsit_global->ts_bitmap from
-+ * within iscsit_start_kthreads()
- *
-- * Here we use thread_id to determine which CPU that this
-- * iSCSI connection's iscsi_thread_set will be scheduled to
-+ * Here we use bitmap_id to determine which CPU that this
-+ * iSCSI connection's RX/TX threads will be scheduled to
- * execute upon.
- */
-- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
-+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
- for_each_online_cpu(cpu) {
- if (ord-- == 0) {
- cpumask_set_cpu(cpu, conn->conn_cpumask);
-@@ -3914,7 +3907,7 @@ check_rsp_state:
- switch (state) {
- case ISTATE_SEND_LOGOUTRSP:
- if (!iscsit_logout_post_handler(cmd, conn))
-- goto restart;
-+ return -ECONNRESET;
- /* fall through */
- case ISTATE_SEND_STATUS:
- case ISTATE_SEND_ASYNCMSG:
-@@ -3942,8 +3935,6 @@ check_rsp_state:
-
- err:
- return -1;
--restart:
-- return -EAGAIN;
- }
-
- static int iscsit_handle_response_queue(struct iscsi_conn *conn)
-@@ -3970,21 +3961,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
- int iscsi_target_tx_thread(void *arg)
- {
- int ret = 0;
-- struct iscsi_conn *conn;
-- struct iscsi_thread_set *ts = arg;
-+ struct iscsi_conn *conn = arg;
- /*
- * Allow ourselves to be interrupted by SIGINT so that a
- * connection recovery / failure event can be triggered externally.
- */
- allow_signal(SIGINT);
-
--restart:
-- conn = iscsi_tx_thread_pre_handler(ts);
-- if (!conn)
-- goto out;
--
-- ret = 0;
--
- while (!kthread_should_stop()) {
- /*
- * Ensure that both TX and RX per connection kthreads
-@@ -3993,11 +3976,9 @@ restart:
- iscsit_thread_check_cpumask(conn, current, 1);
-
- wait_event_interruptible(conn->queues_wq,
-- !iscsit_conn_all_queues_empty(conn) ||
-- ts->status == ISCSI_THREAD_SET_RESET);
-+ !iscsit_conn_all_queues_empty(conn));
-
-- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
-- signal_pending(current))
-+ if (signal_pending(current))
- goto transport_err;
-
- get_immediate:
-@@ -4008,15 +3989,14 @@ get_immediate:
- ret = iscsit_handle_response_queue(conn);
- if (ret == 1)
- goto get_immediate;
-- else if (ret == -EAGAIN)
-- goto restart;
-+ else if (ret == -ECONNRESET)
-+ goto out;
- else if (ret < 0)
- goto transport_err;
- }
-
- transport_err:
- iscsit_take_action_for_connection_exit(conn);
-- goto restart;
- out:
- return 0;
- }
-@@ -4111,8 +4091,7 @@ int iscsi_target_rx_thread(void *arg)
- int ret;
- u8 buffer[ISCSI_HDR_LEN], opcode;
- u32 checksum = 0, digest = 0;
-- struct iscsi_conn *conn = NULL;
-- struct iscsi_thread_set *ts = arg;
-+ struct iscsi_conn *conn = arg;
- struct kvec iov;
- /*
- * Allow ourselves to be interrupted by SIGINT so that a
-@@ -4120,11 +4099,6 @@ int iscsi_target_rx_thread(void *arg)
- */
- allow_signal(SIGINT);
-
--restart:
-- conn = iscsi_rx_thread_pre_handler(ts);
-- if (!conn)
-- goto out;
--
- if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
- struct completion comp;
- int rc;
-@@ -4134,7 +4108,7 @@ restart:
- if (rc < 0)
- goto transport_err;
-
-- goto out;
-+ goto transport_err;
- }
-
- while (!kthread_should_stop()) {
-@@ -4210,8 +4184,6 @@ transport_err:
- if (!signal_pending(current))
- atomic_set(&conn->transport_failed, 1);
- iscsit_take_action_for_connection_exit(conn);
-- goto restart;
--out:
- return 0;
- }
-
-@@ -4273,7 +4245,24 @@ int iscsit_close_connection(
- if (conn->conn_transport->transport_type == ISCSI_TCP)
- complete(&conn->conn_logout_comp);
-
-- iscsi_release_thread_set(conn);
-+ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
-+ if (conn->tx_thread &&
-+ cmpxchg(&conn->tx_thread_active, true, false)) {
-+ send_sig(SIGINT, conn->tx_thread, 1);
-+ kthread_stop(conn->tx_thread);
-+ }
-+ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
-+ if (conn->rx_thread &&
-+ cmpxchg(&conn->rx_thread_active, true, false)) {
-+ send_sig(SIGINT, conn->rx_thread, 1);
-+ kthread_stop(conn->rx_thread);
-+ }
-+ }
-+
-+ spin_lock(&iscsit_global->ts_bitmap_lock);
-+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
-+ get_order(1));
-+ spin_unlock(&iscsit_global->ts_bitmap_lock);
-
- iscsit_stop_timers_for_cmds(conn);
- iscsit_stop_nopin_response_timer(conn);
-@@ -4551,15 +4540,13 @@ static void iscsit_logout_post_handler_closesession(
- struct iscsi_conn *conn)
- {
- struct iscsi_session *sess = conn->sess;
--
-- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
-- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
-+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
-
- atomic_set(&conn->conn_logout_remove, 0);
- complete(&conn->conn_logout_comp);
-
- iscsit_dec_conn_usage_count(conn);
-- iscsit_stop_session(sess, 1, 1);
-+ iscsit_stop_session(sess, sleep, sleep);
- iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
- }
-@@ -4567,13 +4554,12 @@ static void iscsit_logout_post_handler_closesession(
- static void iscsit_logout_post_handler_samecid(
- struct iscsi_conn *conn)
- {
-- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
-- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
-+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
-
- atomic_set(&conn->conn_logout_remove, 0);
- complete(&conn->conn_logout_comp);
-
-- iscsit_cause_connection_reinstatement(conn, 1);
-+ iscsit_cause_connection_reinstatement(conn, sleep);
- iscsit_dec_conn_usage_count(conn);
- }
-
-diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
-index bdd8731..e008ed2 100644
---- a/drivers/target/iscsi/iscsi_target_erl0.c
-+++ b/drivers/target/iscsi/iscsi_target_erl0.c
-@@ -860,7 +860,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
- }
- spin_unlock_bh(&conn->state_lock);
-
-- iscsi_thread_set_force_reinstatement(conn);
-+ if (conn->tx_thread && conn->tx_thread_active)
-+ send_sig(SIGINT, conn->tx_thread, 1);
-+ if (conn->rx_thread && conn->rx_thread_active)
-+ send_sig(SIGINT, conn->rx_thread, 1);
-
- sleep:
- wait_for_completion(&conn->conn_wait_rcfr_comp);
-@@ -885,10 +888,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
- return;
- }
-
-- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
-- spin_unlock_bh(&conn->state_lock);
-- return;
-- }
-+ if (conn->tx_thread && conn->tx_thread_active)
-+ send_sig(SIGINT, conn->tx_thread, 1);
-+ if (conn->rx_thread && conn->rx_thread_active)
-+ send_sig(SIGINT, conn->rx_thread, 1);
-
- atomic_set(&conn->connection_reinstatement, 1);
- if (!sleep) {
-diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
-index 153fb66..345f073 100644
---- a/drivers/target/iscsi/iscsi_target_login.c
-+++ b/drivers/target/iscsi/iscsi_target_login.c
-@@ -699,6 +699,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
- iscsit_start_nopin_timer(conn);
- }
-
-+int iscsit_start_kthreads(struct iscsi_conn *conn)
-+{
-+ int ret = 0;
-+
-+ spin_lock(&iscsit_global->ts_bitmap_lock);
-+ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
-+ ISCSIT_BITMAP_BITS, get_order(1));
-+ spin_unlock(&iscsit_global->ts_bitmap_lock);
-+
-+ if (conn->bitmap_id < 0) {
-+ pr_err("bitmap_find_free_region() failed for"
-+ " iscsit_start_kthreads()\n");
-+ return -ENOMEM;
-+ }
-+
-+ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
-+ "%s", ISCSI_TX_THREAD_NAME);
-+ if (IS_ERR(conn->tx_thread)) {
-+ pr_err("Unable to start iscsi_target_tx_thread\n");
-+ ret = PTR_ERR(conn->tx_thread);
-+ goto out_bitmap;
-+ }
-+ conn->tx_thread_active = true;
-+
-+ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
-+ "%s", ISCSI_RX_THREAD_NAME);
-+ if (IS_ERR(conn->rx_thread)) {
-+ pr_err("Unable to start iscsi_target_rx_thread\n");
-+ ret = PTR_ERR(conn->rx_thread);
-+ goto out_tx;
-+ }
-+ conn->rx_thread_active = true;
-+
-+ return 0;
-+out_tx:
-+ kthread_stop(conn->tx_thread);
-+ conn->tx_thread_active = false;
-+out_bitmap:
-+ spin_lock(&iscsit_global->ts_bitmap_lock);
-+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
-+ get_order(1));
-+ spin_unlock(&iscsit_global->ts_bitmap_lock);
-+ return ret;
-+}
-+
- int iscsi_post_login_handler(
- struct iscsi_np *np,
- struct iscsi_conn *conn,
-@@ -709,7 +754,7 @@ int iscsi_post_login_handler(
- struct se_session *se_sess = sess->se_sess;
- struct iscsi_portal_group *tpg = sess->tpg;
- struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
-- struct iscsi_thread_set *ts;
-+ int rc;
-
- iscsit_inc_conn_usage_count(conn);
-
-@@ -724,7 +769,6 @@ int iscsi_post_login_handler(
- /*
- * SCSI Initiator -> SCSI Target Port Mapping
- */
-- ts = iscsi_get_thread_set();
- if (!zero_tsih) {
- iscsi_set_session_parameters(sess->sess_ops,
- conn->param_list, 0);
-@@ -751,9 +795,11 @@ int iscsi_post_login_handler(
- sess->sess_ops->InitiatorName);
- spin_unlock_bh(&sess->conn_lock);
-
-- iscsi_post_login_start_timers(conn);
-+ rc = iscsit_start_kthreads(conn);
-+ if (rc)
-+ return rc;
-
-- iscsi_activate_thread_set(conn, ts);
-+ iscsi_post_login_start_timers(conn);
- /*
- * Determine CPU mask to ensure connection's RX and TX kthreads
- * are scheduled on the same CPU.
-@@ -810,8 +856,11 @@ int iscsi_post_login_handler(
- " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
- spin_unlock_bh(&se_tpg->session_lock);
-
-+ rc = iscsit_start_kthreads(conn);
-+ if (rc)
-+ return rc;
-+
- iscsi_post_login_start_timers(conn);
-- iscsi_activate_thread_set(conn, ts);
- /*
- * Determine CPU mask to ensure connection's RX and TX kthreads
- * are scheduled on the same CPU.
-diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
-index 44620fb..cbb0cc2 100644
---- a/drivers/target/target_core_file.c
-+++ b/drivers/target/target_core_file.c
-@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
- struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *dev = FD_DEV(se_dev);
- struct file *prot_fd = dev->fd_prot_file;
-- struct scatterlist *sg;
- loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
- unsigned char *buf;
-- u32 prot_size, len, size;
-- int rc, ret = 1, i;
-+ u32 prot_size;
-+ int rc, ret = 1;
-
- prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
- se_dev->prot_length;
-
- if (!is_write) {
-- fd_prot->prot_buf = vzalloc(prot_size);
-+ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
- if (!fd_prot->prot_buf) {
- pr_err("Unable to allocate fd_prot->prot_buf\n");
- return -ENOMEM;
- }
- buf = fd_prot->prot_buf;
-
-- fd_prot->prot_sg_nents = cmd->t_prot_nents;
-- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
-- fd_prot->prot_sg_nents, GFP_KERNEL);
-+ fd_prot->prot_sg_nents = 1;
-+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
-+ GFP_KERNEL);
- if (!fd_prot->prot_sg) {
- pr_err("Unable to allocate fd_prot->prot_sg\n");
-- vfree(fd_prot->prot_buf);
-+ kfree(fd_prot->prot_buf);
- return -ENOMEM;
- }
-- size = prot_size;
--
-- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
--
-- len = min_t(u32, PAGE_SIZE, size);
-- sg_set_buf(sg, buf, len);
-- size -= len;
-- buf += len;
-- }
-+ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
-+ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
- }
-
- if (is_write) {
-@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
-
- if (is_write || ret < 0) {
- kfree(fd_prot->prot_sg);
-- vfree(fd_prot->prot_buf);
-+ kfree(fd_prot->prot_buf);
- }
-
- return ret;
-@@ -549,6 +541,56 @@ fd_execute_write_same(struct se_cmd *cmd)
- return 0;
- }
-
-+static int
-+fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
-+ void *buf, size_t bufsize)
-+{
-+ struct fd_dev *fd_dev = FD_DEV(se_dev);
-+ struct file *prot_fd = fd_dev->fd_prot_file;
-+ sector_t prot_length, prot;
-+ loff_t pos = lba * se_dev->prot_length;
-+
-+ if (!prot_fd) {
-+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
-+ return -ENODEV;
-+ }
-+
-+ prot_length = nolb * se_dev->prot_length;
-+
-+ for (prot = 0; prot < prot_length;) {
-+ sector_t len = min_t(sector_t, bufsize, prot_length - prot);
-+ ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
-+
-+ if (ret != len) {
-+ pr_err("vfs_write to prot file failed: %zd\n", ret);
-+ return ret < 0 ? ret : -ENODEV;
-+ }
-+ prot += ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int
-+fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
-+{
-+ void *buf;
-+ int rc;
-+
-+ buf = (void *)__get_free_page(GFP_KERNEL);
-+ if (!buf) {
-+ pr_err("Unable to allocate FILEIO prot buf\n");
-+ return -ENOMEM;
-+ }
-+ memset(buf, 0xff, PAGE_SIZE);
-+
-+ rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
-+
-+ free_page((unsigned long)buf);
-+
-+ return rc;
-+}
-+
- static sense_reason_t
- fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
- {
-@@ -556,6 +598,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
- struct inode *inode = file->f_mapping->host;
- int ret;
-
-+ if (cmd->se_dev->dev_attrib.pi_prot_type) {
-+ ret = fd_do_prot_unmap(cmd, lba, nolb);
-+ if (ret)
-+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-+ }
-+
- if (S_ISBLK(inode->i_mode)) {
- /* The backend is block device, use discard */
- struct block_device *bdev = inode->i_bdev;
-@@ -658,11 +706,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
- 0, fd_prot.prot_sg, 0);
- if (rc) {
- kfree(fd_prot.prot_sg);
-- vfree(fd_prot.prot_buf);
-+ kfree(fd_prot.prot_buf);
- return rc;
- }
- kfree(fd_prot.prot_sg);
-- vfree(fd_prot.prot_buf);
-+ kfree(fd_prot.prot_buf);
- }
- } else {
- memset(&fd_prot, 0, sizeof(struct fd_prot));
-@@ -678,7 +726,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
- 0, fd_prot.prot_sg, 0);
- if (rc) {
- kfree(fd_prot.prot_sg);
-- vfree(fd_prot.prot_buf);
-+ kfree(fd_prot.prot_buf);
- return rc;
- }
- }
-@@ -714,7 +762,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
-
- if (ret < 0) {
- kfree(fd_prot.prot_sg);
-- vfree(fd_prot.prot_buf);
-+ kfree(fd_prot.prot_buf);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
-@@ -878,48 +926,28 @@ static int fd_init_prot(struct se_device *dev)
-
- static int fd_format_prot(struct se_device *dev)
- {
-- struct fd_dev *fd_dev = FD_DEV(dev);
-- struct file *prot_fd = fd_dev->fd_prot_file;
-- sector_t prot_length, prot;
- unsigned char *buf;
-- loff_t pos = 0;
- int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
-- int rc, ret = 0, size, len;
-+ int ret;
-
- if (!dev->dev_attrib.pi_prot_type) {
- pr_err("Unable to format_prot while pi_prot_type == 0\n");
- return -ENODEV;
- }
-- if (!prot_fd) {
-- pr_err("Unable to locate fd_dev->fd_prot_file\n");
-- return -ENODEV;
-- }
-
- buf = vzalloc(unit_size);
- if (!buf) {
- pr_err("Unable to allocate FILEIO prot buf\n");
- return -ENOMEM;
- }
-- prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
-- size = prot_length;
-
- pr_debug("Using FILEIO prot_length: %llu\n",
-- (unsigned long long)prot_length);
-+ (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
-+ dev->prot_length);
-
- memset(buf, 0xff, unit_size);
-- for (prot = 0; prot < prot_length; prot += unit_size) {
-- len = min(unit_size, size);
-- rc = kernel_write(prot_fd, buf, len, pos);
-- if (rc != len) {
-- pr_err("vfs_write to prot file failed: %d\n", rc);
-- ret = -ENODEV;
-- goto out;
-- }
-- pos += len;
-- size -= len;
-- }
--
--out:
-+ ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
-+ buf, unit_size);
- vfree(buf);
- return ret;
- }
-diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
-index 3e72974..755bd9b3 100644
---- a/drivers/target/target_core_sbc.c
-+++ b/drivers/target/target_core_sbc.c
-@@ -312,7 +312,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
- return 0;
- }
-
--static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
-+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
- {
- unsigned char *buf, *addr;
- struct scatterlist *sg;
-@@ -376,7 +376,7 @@ sbc_execute_rw(struct se_cmd *cmd)
- cmd->data_direction);
- }
-
--static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
-+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
- {
- struct se_device *dev = cmd->se_dev;
-
-@@ -399,7 +399,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
- return TCM_NO_SENSE;
- }
-
--static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
-+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
- {
- struct se_device *dev = cmd->se_dev;
- struct scatterlist *write_sg = NULL, *sg;
-@@ -414,11 +414,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
-
- /*
- * Handle early failure in transport_generic_request_failure(),
-- * which will not have taken ->caw_mutex yet..
-+ * which will not have taken ->caw_sem yet..
- */
-- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
-+ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
- return TCM_NO_SENSE;
- /*
-+ * Handle special case for zero-length COMPARE_AND_WRITE
-+ */
-+ if (!cmd->data_length)
-+ goto out;
-+ /*
- * Immediately exit + release dev->caw_sem if command has already
- * been failed with a non-zero SCSI status.
- */
-diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index ac3cbab..f786de0 100644
---- a/drivers/target/target_core_transport.c
-+++ b/drivers/target/target_core_transport.c
-@@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
- transport_complete_task_attr(cmd);
- /*
- * Handle special case for COMPARE_AND_WRITE failure, where the
-- * callback is expected to drop the per device ->caw_mutex.
-+ * callback is expected to drop the per device ->caw_sem.
- */
- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
- cmd->transport_complete_callback)
-- cmd->transport_complete_callback(cmd);
-+ cmd->transport_complete_callback(cmd, false);
-
- switch (sense_reason) {
- case TCM_NON_EXISTENT_LUN:
-@@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work)
- if (cmd->transport_complete_callback) {
- sense_reason_t rc;
-
-- rc = cmd->transport_complete_callback(cmd);
-+ rc = cmd->transport_complete_callback(cmd, true);
- if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
-+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
-+ !cmd->data_length)
-+ goto queue_rsp;
-+
- return;
- } else if (rc) {
- ret = transport_send_check_condition_and_sense(cmd,
-@@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work)
- }
- }
-
-+queue_rsp:
- switch (cmd->data_direction) {
- case DMA_FROM_DEVICE:
- spin_lock(&cmd->se_lun->lun_sep_lock);
-@@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
- static inline void transport_free_pages(struct se_cmd *cmd)
- {
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
-+ /*
-+ * Release special case READ buffer payload required for
-+ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
-+ */
-+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
-+ transport_free_sgl(cmd->t_bidi_data_sg,
-+ cmd->t_bidi_data_nents);
-+ cmd->t_bidi_data_sg = NULL;
-+ cmd->t_bidi_data_nents = 0;
-+ }
- transport_reset_sgl_orig(cmd);
- return;
- }
-@@ -2246,6 +2261,7 @@ sense_reason_t
- transport_generic_new_cmd(struct se_cmd *cmd)
- {
- int ret = 0;
-+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
-
- /*
- * Determine is the TCM fabric module has already allocated physical
-@@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
- */
- if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
- cmd->data_length) {
-- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
-
- if ((cmd->se_cmd_flags & SCF_BIDI) ||
- (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
-@@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
- cmd->data_length, zero_flag);
- if (ret < 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-+ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
-+ cmd->data_length) {
-+ /*
-+ * Special case for COMPARE_AND_WRITE with fabrics
-+ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
-+ */
-+ u32 caw_length = cmd->t_task_nolb *
-+ cmd->se_dev->dev_attrib.block_size;
-+
-+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
-+ &cmd->t_bidi_data_nents,
-+ caw_length, zero_flag);
-+ if (ret < 0)
-+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
- /*
- * If this command is not a write we can execute it right here,
-diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
-index deae122..d465ace 100644
---- a/drivers/tty/serial/8250/8250_core.c
-+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -3444,7 +3444,8 @@ void serial8250_suspend_port(int line)
- port->type != PORT_8250) {
- unsigned char canary = 0xa5;
- serial_out(up, UART_SCR, canary);
-- up->canary = canary;
-+ if (serial_in(up, UART_SCR) == canary)
-+ up->canary = canary;
- }
-
- uart_suspend_port(&serial8250_reg, port);
-diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
-index 6ae5b85..7a80250 100644
---- a/drivers/tty/serial/8250/8250_dw.c
-+++ b/drivers/tty/serial/8250/8250_dw.c
-@@ -629,6 +629,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
- { "80860F0A", 0 },
- { "8086228A", 0 },
- { "APMC0D08", 0},
-+ { "AMD0020", 0 },
- { },
- };
- MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
-diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
-index 0eb29b1..2306191 100644
---- a/drivers/tty/serial/imx.c
-+++ b/drivers/tty/serial/imx.c
-@@ -818,7 +818,7 @@ static irqreturn_t imx_int(int irq, void *dev_id)
- if (sts2 & USR2_ORE) {
- dev_err(sport->port.dev, "Rx FIFO overrun\n");
- sport->port.icount.overrun++;
-- writel(sts2 | USR2_ORE, sport->port.membase + USR2);
-+ writel(USR2_ORE, sport->port.membase + USR2);
- }
-
- return IRQ_HANDLED;
-@@ -1181,10 +1181,12 @@ static int imx_startup(struct uart_port *port)
- imx_uart_dma_init(sport);
-
- spin_lock_irqsave(&sport->port.lock, flags);
-+
- /*
- * Finally, clear and enable interrupts
- */
- writel(USR1_RTSD, sport->port.membase + USR1);
-+ writel(USR2_ORE, sport->port.membase + USR2);
-
- if (sport->dma_is_inited && !sport->dma_is_enabled)
- imx_enable_dma(sport);
-@@ -1199,10 +1201,6 @@ static int imx_startup(struct uart_port *port)
-
- writel(temp, sport->port.membase + UCR1);
-
-- /* Clear any pending ORE flag before enabling interrupt */
-- temp = readl(sport->port.membase + USR2);
-- writel(temp | USR2_ORE, sport->port.membase + USR2);
--
- temp = readl(sport->port.membase + UCR4);
- temp |= UCR4_OREN;
- writel(temp, sport->port.membase + UCR4);
-diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
-index a051a7a..a81f9dd 100644
---- a/drivers/usb/class/cdc-wdm.c
-+++ b/drivers/usb/class/cdc-wdm.c
-@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
- case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
- dev_dbg(&desc->intf->dev,
- "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
-- dr->wIndex, dr->wLength);
-+ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
- break;
-
- case USB_CDC_NOTIFY_NETWORK_CONNECTION:
-@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
- clear_bit(WDM_POLL_RUNNING, &desc->flags);
- dev_err(&desc->intf->dev,
- "unknown notification %d received: index %d len %d\n",
-- dr->bNotificationType, dr->wIndex, dr->wLength);
-+ dr->bNotificationType,
-+ le16_to_cpu(dr->wIndex),
-+ le16_to_cpu(dr->wLength));
- goto exit;
- }
-
-@@ -408,7 +410,7 @@ static ssize_t wdm_write
- USB_RECIP_INTERFACE);
- req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
- req->wValue = 0;
-- req->wIndex = desc->inum;
-+ req->wIndex = desc->inum; /* already converted */
- req->wLength = cpu_to_le16(count);
- set_bit(WDM_IN_USE, &desc->flags);
- desc->outbuf = buf;
-@@ -422,7 +424,7 @@ static ssize_t wdm_write
- rv = usb_translate_errors(rv);
- } else {
- dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
-- req->wIndex);
-+ le16_to_cpu(req->wIndex));
- }
- out:
- usb_autopm_put_interface(desc->intf);
-@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
- desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
- desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
- desc->irq->wValue = 0;
-- desc->irq->wIndex = desc->inum;
-+ desc->irq->wIndex = desc->inum; /* already converted */
- desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
-
- usb_fill_control_urb(
-diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index d7c3d5a..3b71516 100644
---- a/drivers/usb/core/hub.c
-+++ b/drivers/usb/core/hub.c
-@@ -3406,10 +3406,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
- if (status) {
- dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
- } else {
-- /* drive resume for at least 20 msec */
-+ /* drive resume for USB_RESUME_TIMEOUT msec */
- dev_dbg(&udev->dev, "usb %sresume\n",
- (PMSG_IS_AUTO(msg) ? "auto-" : ""));
-- msleep(25);
-+ msleep(USB_RESUME_TIMEOUT);
-
- /* Virtual root hubs can trigger on GET_PORT_STATUS to
- * stop resume signaling. Then finish the resume
-diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
-index c78c874..758b7e0 100644
---- a/drivers/usb/dwc2/hcd.c
-+++ b/drivers/usb/dwc2/hcd.c
-@@ -1521,7 +1521,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
- dev_dbg(hsotg->dev,
- "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
- writel(0, hsotg->regs + PCGCTL);
-- usleep_range(20000, 40000);
-+ msleep(USB_RESUME_TIMEOUT);
-
- hprt0 = dwc2_read_hprt0(hsotg);
- hprt0 |= HPRT0_RES;
-diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
-index 9054598..6385c19 100644
---- a/drivers/usb/gadget/legacy/printer.c
-+++ b/drivers/usb/gadget/legacy/printer.c
-@@ -1031,6 +1031,15 @@ unknown:
- break;
- }
- /* host either stalls (value < 0) or reports success */
-+ if (value >= 0) {
-+ req->length = value;
-+ req->zero = value < wLength;
-+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
-+ if (value < 0) {
-+ ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
-+ req->status = 0;
-+ }
-+ }
- return value;
- }
-
-diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
-index 85e56d1..f4d88df 100644
---- a/drivers/usb/host/ehci-hcd.c
-+++ b/drivers/usb/host/ehci-hcd.c
-@@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
- ehci->reset_done[i] == 0))
- continue;
-
-- /* start 20 msec resume signaling from this port,
-- * and make hub_wq collect PORT_STAT_C_SUSPEND to
-- * stop that signaling. Use 5 ms extra for safety,
-- * like usb_port_resume() does.
-+ /* start USB_RESUME_TIMEOUT msec resume signaling from
-+ * this port, and make hub_wq collect
-+ * PORT_STAT_C_SUSPEND to stop that signaling.
- */
-- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
-+ ehci->reset_done[i] = jiffies +
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
- set_bit(i, &ehci->resuming_ports);
- ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
- usb_hcd_start_port_resume(&hcd->self, i);
-diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
-index 87cf86f..7354d01 100644
---- a/drivers/usb/host/ehci-hub.c
-+++ b/drivers/usb/host/ehci-hub.c
-@@ -471,10 +471,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
- ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
- }
-
-- /* msleep for 20ms only if code is trying to resume port */
-+ /*
-+ * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
-+ * port
-+ */
- if (resume_needed) {
- spin_unlock_irq(&ehci->lock);
-- msleep(20);
-+ msleep(USB_RESUME_TIMEOUT);
- spin_lock_irq(&ehci->lock);
- if (ehci->shutdown)
- goto shutdown;
-@@ -942,7 +945,7 @@ int ehci_hub_control(
- temp &= ~PORT_WAKE_BITS;
- ehci_writel(ehci, temp | PORT_RESUME, status_reg);
- ehci->reset_done[wIndex] = jiffies
-- + msecs_to_jiffies(20);
-+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
- set_bit(wIndex, &ehci->resuming_ports);
- usb_hcd_start_port_resume(&hcd->self, wIndex);
- break;
-diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
-index 475b21f..7a6681f 100644
---- a/drivers/usb/host/fotg210-hcd.c
-+++ b/drivers/usb/host/fotg210-hcd.c
-@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
- /* resume signaling for 20 msec */
- fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
- fotg210->reset_done[wIndex] = jiffies
-- + msecs_to_jiffies(20);
-+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
- break;
- case USB_PORT_FEAT_C_SUSPEND:
- clear_bit(wIndex, &fotg210->port_c_suspend);
-diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
-index a83eefe..ba77e2e 100644
---- a/drivers/usb/host/fusbh200-hcd.c
-+++ b/drivers/usb/host/fusbh200-hcd.c
-@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
- if ((temp & PORT_PE) == 0)
- goto error;
-
-- /* resume signaling for 20 msec */
- fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
- fusbh200->reset_done[wIndex] = jiffies
-- + msecs_to_jiffies(20);
-+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
- break;
- case USB_PORT_FEAT_C_SUSPEND:
- clear_bit(wIndex, &fusbh200->port_c_suspend);
-diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
-index 113d0cc..9ef5644 100644
---- a/drivers/usb/host/isp116x-hcd.c
-+++ b/drivers/usb/host/isp116x-hcd.c
-@@ -1490,7 +1490,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
- spin_unlock_irq(&isp116x->lock);
-
- hcd->state = HC_STATE_RESUMING;
-- msleep(20);
-+ msleep(USB_RESUME_TIMEOUT);
-
- /* Go operational */
- spin_lock_irq(&isp116x->lock);
-diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
-index ef7efb2..28a2866 100644
---- a/drivers/usb/host/oxu210hp-hcd.c
-+++ b/drivers/usb/host/oxu210hp-hcd.c
-@@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
- || oxu->reset_done[i] != 0)
- continue;
-
-- /* start 20 msec resume signaling from this port,
-- * and make hub_wq collect PORT_STAT_C_SUSPEND to
-+ /* start USB_RESUME_TIMEOUT resume signaling from this
-+ * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
- * stop that signaling.
- */
-- oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
-+ oxu->reset_done[i] = jiffies +
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
- oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
- mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
- }
-diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
-index bdc82fe..54a4170 100644
---- a/drivers/usb/host/r8a66597-hcd.c
-+++ b/drivers/usb/host/r8a66597-hcd.c
-@@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
- rh->port &= ~USB_PORT_STAT_SUSPEND;
- rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
- r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
-- msleep(50);
-+ msleep(USB_RESUME_TIMEOUT);
- r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
- }
-
-diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
-index 4f4ba1e..9118cd8 100644
---- a/drivers/usb/host/sl811-hcd.c
-+++ b/drivers/usb/host/sl811-hcd.c
-@@ -1259,7 +1259,7 @@ sl811h_hub_control(
- sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
-
- mod_timer(&sl811->timer, jiffies
-- + msecs_to_jiffies(20));
-+ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
- break;
- case USB_PORT_FEAT_POWER:
- port_power(sl811, 0);
-diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
-index 19ba5ea..7b3d1af 100644
---- a/drivers/usb/host/uhci-hub.c
-+++ b/drivers/usb/host/uhci-hub.c
-@@ -166,7 +166,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
- /* Port received a wakeup request */
- set_bit(port, &uhci->resuming_ports);
- uhci->ports_timeout = jiffies +
-- msecs_to_jiffies(25);
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
- usb_hcd_start_port_resume(
- &uhci_to_hcd(uhci)->self, port);
-
-@@ -338,7 +338,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
- uhci_finish_suspend(uhci, port, port_addr);
-
- /* USB v2.0 7.1.7.5 */
-- uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
-+ uhci->ports_timeout = jiffies +
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
- break;
- case USB_PORT_FEAT_POWER:
- /* UHCI has no power switching */
-diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
-index 73485fa..eeedde8 100644
---- a/drivers/usb/host/xhci-ring.c
-+++ b/drivers/usb/host/xhci-ring.c
-@@ -1574,7 +1574,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
- } else {
- xhci_dbg(xhci, "resume HS port %d\n", port_id);
- bus_state->resume_done[faked_port_index] = jiffies +
-- msecs_to_jiffies(20);
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
- set_bit(faked_port_index, &bus_state->resuming_ports);
- mod_timer(&hcd->rh_timer,
- bus_state->resume_done[faked_port_index]);
-diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
-index 3cb98b1..7911b6b 100644
---- a/drivers/usb/isp1760/isp1760-hcd.c
-+++ b/drivers/usb/isp1760/isp1760-hcd.c
-@@ -1869,7 +1869,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
- reg_write32(hcd->regs, HC_PORTSC1,
- temp | PORT_RESUME);
- priv->reset_done = jiffies +
-- msecs_to_jiffies(20);
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
- }
- break;
- case USB_PORT_FEAT_C_SUSPEND:
-diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
-index 067920f..ec0ee3b 100644
---- a/drivers/usb/musb/musb_core.c
-+++ b/drivers/usb/musb/musb_core.c
-@@ -99,6 +99,7 @@
- #include <linux/platform_device.h>
- #include <linux/io.h>
- #include <linux/dma-mapping.h>
-+#include <linux/usb.h>
-
- #include "musb_core.h"
-
-@@ -562,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
- (USB_PORT_STAT_C_SUSPEND << 16)
- | MUSB_PORT_STAT_RESUME;
- musb->rh_timer = jiffies
-- + msecs_to_jiffies(20);
-+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
- musb->need_finish_resume = 1;
-
- musb->xceiv->otg->state = OTG_STATE_A_HOST;
-@@ -1597,16 +1598,30 @@ irqreturn_t musb_interrupt(struct musb *musb)
- is_host_active(musb) ? "host" : "peripheral",
- musb->int_usb, musb->int_tx, musb->int_rx);
-
-- /* the core can interrupt us for multiple reasons; docs have
-- * a generic interrupt flowchart to follow
-+ /**
-+ * According to Mentor Graphics' documentation, flowchart on page 98,
-+ * IRQ should be handled as follows:
-+ *
-+ * . Resume IRQ
-+ * . Session Request IRQ
-+ * . VBUS Error IRQ
-+ * . Suspend IRQ
-+ * . Connect IRQ
-+ * . Disconnect IRQ
-+ * . Reset/Babble IRQ
-+ * . SOF IRQ (we're not using this one)
-+ * . Endpoint 0 IRQ
-+ * . TX Endpoints
-+ * . RX Endpoints
-+ *
-+ * We will be following that flowchart in order to avoid any problems
-+ * that might arise with internal Finite State Machine.
- */
-+
- if (musb->int_usb)
- retval |= musb_stage0_irq(musb, musb->int_usb,
- devctl);
-
-- /* "stage 1" is handling endpoint irqs */
--
-- /* handle endpoint 0 first */
- if (musb->int_tx & 1) {
- if (is_host_active(musb))
- retval |= musb_h_ep0_irq(musb);
-@@ -1614,37 +1629,31 @@ irqreturn_t musb_interrupt(struct musb *musb)
- retval |= musb_g_ep0_irq(musb);
- }
-
-- /* RX on endpoints 1-15 */
-- reg = musb->int_rx >> 1;
-+ reg = musb->int_tx >> 1;
- ep_num = 1;
- while (reg) {
- if (reg & 1) {
-- /* musb_ep_select(musb->mregs, ep_num); */
-- /* REVISIT just retval = ep->rx_irq(...) */
- retval = IRQ_HANDLED;
- if (is_host_active(musb))
-- musb_host_rx(musb, ep_num);
-+ musb_host_tx(musb, ep_num);
- else
-- musb_g_rx(musb, ep_num);
-+ musb_g_tx(musb, ep_num);
- }
--
- reg >>= 1;
- ep_num++;
- }
-
-- /* TX on endpoints 1-15 */
-- reg = musb->int_tx >> 1;
-+ reg = musb->int_rx >> 1;
- ep_num = 1;
- while (reg) {
- if (reg & 1) {
-- /* musb_ep_select(musb->mregs, ep_num); */
-- /* REVISIT just retval |= ep->tx_irq(...) */
- retval = IRQ_HANDLED;
- if (is_host_active(musb))
-- musb_host_tx(musb, ep_num);
-+ musb_host_rx(musb, ep_num);
- else
-- musb_g_tx(musb, ep_num);
-+ musb_g_rx(musb, ep_num);
- }
-+
- reg >>= 1;
- ep_num++;
- }
-@@ -2463,7 +2472,7 @@ static int musb_resume(struct device *dev)
- if (musb->need_finish_resume) {
- musb->need_finish_resume = 0;
- schedule_delayed_work(&musb->finish_resume_work,
-- msecs_to_jiffies(20));
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
- }
-
- /*
-@@ -2506,7 +2515,7 @@ static int musb_runtime_resume(struct device *dev)
- if (musb->need_finish_resume) {
- musb->need_finish_resume = 0;
- schedule_delayed_work(&musb->finish_resume_work,
-- msecs_to_jiffies(20));
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
- }
-
- return 0;
-diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
-index 294e159..5428ed1 100644
---- a/drivers/usb/musb/musb_virthub.c
-+++ b/drivers/usb/musb/musb_virthub.c
-@@ -136,7 +136,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
- /* later, GetPortStatus will stop RESUME signaling */
- musb->port1_status |= MUSB_PORT_STAT_RESUME;
- schedule_delayed_work(&musb->finish_resume_work,
-- msecs_to_jiffies(20));
-+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
- }
- }
-
-diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
-index 2f9735b..d1cd6b5 100644
---- a/drivers/usb/phy/phy.c
-+++ b/drivers/usb/phy/phy.c
-@@ -81,7 +81,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
-
- static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
- {
-- return res == match_data;
-+ struct usb_phy **phy = res;
-+
-+ return *phy == match_data;
- }
-
- /**
-diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 995986b..d925f55 100644
---- a/fs/binfmt_elf.c
-+++ b/fs/binfmt_elf.c
-@@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
- i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
- int elf_prot = 0, elf_flags;
- unsigned long k, vaddr;
-+ unsigned long total_size = 0;
-
- if (elf_ppnt->p_type != PT_LOAD)
- continue;
-@@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
- #else
- load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
- #endif
-+ total_size = total_mapping_size(elf_phdata,
-+ loc->elf_ex.e_phnum);
-+ if (!total_size) {
-+ error = -EINVAL;
-+ goto out_free_dentry;
-+ }
- }
-
- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-- elf_prot, elf_flags, 0);
-+ elf_prot, elf_flags, total_size);
- if (BAD_ADDR(error)) {
- retval = IS_ERR((void *)error) ?
- PTR_ERR((void*)error) : -EINVAL;
-diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
-index 8b353ad..0a795c9 100644
---- a/fs/btrfs/extent-tree.c
-+++ b/fs/btrfs/extent-tree.c
-@@ -6956,12 +6956,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
- return -ENOSPC;
- }
-
-- if (btrfs_test_opt(root, DISCARD))
-- ret = btrfs_discard_extent(root, start, len, NULL);
--
- if (pin)
- pin_down_extent(root, cache, start, len, 1);
- else {
-+ if (btrfs_test_opt(root, DISCARD))
-+ ret = btrfs_discard_extent(root, start, len, NULL);
- btrfs_add_free_space(cache, start, len);
- btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
- }
-diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
-index 74609b9..f23d4be 100644
---- a/fs/btrfs/ioctl.c
-+++ b/fs/btrfs/ioctl.c
-@@ -2897,6 +2897,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
- if (src == dst)
- return -EINVAL;
-
-+ if (len == 0)
-+ return 0;
-+
- btrfs_double_lock(src, loff, dst, dst_loff, len);
-
- ret = extent_same_check_offsets(src, loff, len);
-@@ -3626,6 +3629,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
- if (off + len == src->i_size)
- len = ALIGN(src->i_size, bs) - off;
-
-+ if (len == 0) {
-+ ret = 0;
-+ goto out_unlock;
-+ }
-+
- /* verify the end result is block aligned */
- if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
- !IS_ALIGNED(destoff, bs))
-diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
-index 883b936..45ea704 100644
---- a/fs/btrfs/xattr.c
-+++ b/fs/btrfs/xattr.c
-@@ -364,22 +364,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
- /*
- * Check if the attribute is in a supported namespace.
- *
-- * This applied after the check for the synthetic attributes in the system
-+ * This is applied after the check for the synthetic attributes in the system
- * namespace.
- */
--static bool btrfs_is_valid_xattr(const char *name)
-+static int btrfs_is_valid_xattr(const char *name)
- {
-- return !strncmp(name, XATTR_SECURITY_PREFIX,
-- XATTR_SECURITY_PREFIX_LEN) ||
-- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
-- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
-- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
-- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
-+ int len = strlen(name);
-+ int prefixlen = 0;
-+
-+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
-+ XATTR_SECURITY_PREFIX_LEN))
-+ prefixlen = XATTR_SECURITY_PREFIX_LEN;
-+ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
-+ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
-+ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
-+ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
-+ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
-+ prefixlen = XATTR_USER_PREFIX_LEN;
-+ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
-+ prefixlen = XATTR_BTRFS_PREFIX_LEN;
-+ else
-+ return -EOPNOTSUPP;
-+
-+ /*
-+ * The name cannot consist of just prefix
-+ */
-+ if (len <= prefixlen)
-+ return -EINVAL;
-+
-+ return 0;
- }
-
- ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size)
- {
-+ int ret;
-+
- /*
- * If this is a request for a synthetic attribute in the system.*
- * namespace use the generic infrastructure to resolve a handler
-@@ -388,8 +408,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
- if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
- return generic_getxattr(dentry, name, buffer, size);
-
-- if (!btrfs_is_valid_xattr(name))
-- return -EOPNOTSUPP;
-+ ret = btrfs_is_valid_xattr(name);
-+ if (ret)
-+ return ret;
- return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
- }
-
-@@ -397,6 +418,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- size_t size, int flags)
- {
- struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
-+ int ret;
-
- /*
- * The permission on security.* and system.* is not checked
-@@ -413,8 +435,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
- return generic_setxattr(dentry, name, value, size, flags);
-
-- if (!btrfs_is_valid_xattr(name))
-- return -EOPNOTSUPP;
-+ ret = btrfs_is_valid_xattr(name);
-+ if (ret)
-+ return ret;
-
- if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
- return btrfs_set_prop(dentry->d_inode, name,
-@@ -430,6 +453,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- int btrfs_removexattr(struct dentry *dentry, const char *name)
- {
- struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
-+ int ret;
-
- /*
- * The permission on security.* and system.* is not checked
-@@ -446,8 +470,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
- if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
- return generic_removexattr(dentry, name);
-
-- if (!btrfs_is_valid_xattr(name))
-- return -EOPNOTSUPP;
-+ ret = btrfs_is_valid_xattr(name);
-+ if (ret)
-+ return ret;
-
- if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
- return btrfs_set_prop(dentry->d_inode, name,
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 28fe71a..aae7011 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
- struct inode *inode)
- {
- struct inode *dir = dentry->d_parent->d_inode;
-- struct buffer_head *bh;
-+ struct buffer_head *bh = NULL;
- struct ext4_dir_entry_2 *de;
- struct ext4_dir_entry_tail *t;
- struct super_block *sb;
-@@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
- return retval;
- if (retval == 1) {
- retval = 0;
-- return retval;
-+ goto out;
- }
- }
-
- if (is_dx(dir)) {
- retval = ext4_dx_add_entry(handle, dentry, inode);
- if (!retval || (retval != ERR_BAD_DX_DIR))
-- return retval;
-+ goto out;
- ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
- dx_fallback++;
- ext4_mark_inode_dirty(handle, dir);
-@@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
- return PTR_ERR(bh);
-
- retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
-- if (retval != -ENOSPC) {
-- brelse(bh);
-- return retval;
-- }
-+ if (retval != -ENOSPC)
-+ goto out;
-
- if (blocks == 1 && !dx_fallback &&
-- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
-- return make_indexed_dir(handle, dentry, inode, bh);
-+ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
-+ retval = make_indexed_dir(handle, dentry, inode, bh);
-+ bh = NULL; /* make_indexed_dir releases bh */
-+ goto out;
-+ }
- brelse(bh);
- }
- bh = ext4_append(handle, dir, &block);
-@@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
- }
-
- retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
-+out:
- brelse(bh);
- if (retval == 0)
- ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
-diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
-index 665ef5a..a563ddb 100644
---- a/fs/lockd/svcsubs.c
-+++ b/fs/lockd/svcsubs.c
-@@ -31,7 +31,7 @@
- static struct hlist_head nlm_files[FILE_NRHASH];
- static DEFINE_MUTEX(nlm_file_mutex);
-
--#ifdef NFSD_DEBUG
-+#ifdef CONFIG_SUNRPC_DEBUG
- static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
- {
- u32 *fhp = (u32*)f->data;
-diff --git a/fs/namei.c b/fs/namei.c
-index c83145a..caa38a2 100644
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -1591,7 +1591,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
-
- if (should_follow_link(path->dentry, follow)) {
- if (nd->flags & LOOKUP_RCU) {
-- if (unlikely(unlazy_walk(nd, path->dentry))) {
-+ if (unlikely(nd->path.mnt != path->mnt ||
-+ unlazy_walk(nd, path->dentry))) {
- err = -ECHILD;
- goto out_err;
- }
-@@ -3047,7 +3048,8 @@ finish_lookup:
-
- if (should_follow_link(path->dentry, !symlink_ok)) {
- if (nd->flags & LOOKUP_RCU) {
-- if (unlikely(unlazy_walk(nd, path->dentry))) {
-+ if (unlikely(nd->path.mnt != path->mnt ||
-+ unlazy_walk(nd, path->dentry))) {
- error = -ECHILD;
- goto out;
- }
-diff --git a/fs/namespace.c b/fs/namespace.c
-index 82ef140..4622ee3 100644
---- a/fs/namespace.c
-+++ b/fs/namespace.c
-@@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
- */
- struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
- {
-- struct mount *p, *res;
-- res = p = __lookup_mnt(mnt, dentry);
-+ struct mount *p, *res = NULL;
-+ p = __lookup_mnt(mnt, dentry);
- if (!p)
- goto out;
-+ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
-+ res = p;
- hlist_for_each_entry_continue(p, mnt_hash) {
- if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
- break;
-- res = p;
-+ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
-+ res = p;
- }
- out:
- return res;
-@@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
- /*
- * vfsmount lock must be held for write
- */
--static void detach_mnt(struct mount *mnt, struct path *old_path)
-+static void unhash_mnt(struct mount *mnt)
- {
-- old_path->dentry = mnt->mnt_mountpoint;
-- old_path->mnt = &mnt->mnt_parent->mnt;
- mnt->mnt_parent = mnt;
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
- list_del_init(&mnt->mnt_child);
-@@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
- /*
- * vfsmount lock must be held for write
- */
-+static void detach_mnt(struct mount *mnt, struct path *old_path)
-+{
-+ old_path->dentry = mnt->mnt_mountpoint;
-+ old_path->mnt = &mnt->mnt_parent->mnt;
-+ unhash_mnt(mnt);
-+}
-+
-+/*
-+ * vfsmount lock must be held for write
-+ */
-+static void umount_mnt(struct mount *mnt)
-+{
-+ /* old mountpoint will be dropped when we can do that */
-+ mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
-+ unhash_mnt(mnt);
-+}
-+
-+/*
-+ * vfsmount lock must be held for write
-+ */
- void mnt_set_mountpoint(struct mount *mnt,
- struct mountpoint *mp,
- struct mount *child_mnt)
-@@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt)
- rcu_read_unlock();
-
- list_del(&mnt->mnt_instance);
-+
-+ if (unlikely(!list_empty(&mnt->mnt_mounts))) {
-+ struct mount *p, *tmp;
-+ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
-+ umount_mnt(p);
-+ }
-+ }
- unlock_mount_hash();
-
- if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
-@@ -1319,49 +1347,63 @@ static inline void namespace_lock(void)
- down_write(&namespace_sem);
- }
-
-+enum umount_tree_flags {
-+ UMOUNT_SYNC = 1,
-+ UMOUNT_PROPAGATE = 2,
-+ UMOUNT_CONNECTED = 4,
-+};
- /*
- * mount_lock must be held
- * namespace_sem must be held for write
-- * how = 0 => just this tree, don't propagate
-- * how = 1 => propagate; we know that nobody else has reference to any victims
-- * how = 2 => lazy umount
- */
--void umount_tree(struct mount *mnt, int how)
-+static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
- {
-- HLIST_HEAD(tmp_list);
-+ LIST_HEAD(tmp_list);
- struct mount *p;
-
-+ if (how & UMOUNT_PROPAGATE)
-+ propagate_mount_unlock(mnt);
-+
-+ /* Gather the mounts to umount */
- for (p = mnt; p; p = next_mnt(p, mnt)) {
-- hlist_del_init_rcu(&p->mnt_hash);
-- hlist_add_head(&p->mnt_hash, &tmp_list);
-+ p->mnt.mnt_flags |= MNT_UMOUNT;
-+ list_move(&p->mnt_list, &tmp_list);
- }
-
-- hlist_for_each_entry(p, &tmp_list, mnt_hash)
-+ /* Hide the mounts from mnt_mounts */
-+ list_for_each_entry(p, &tmp_list, mnt_list) {
- list_del_init(&p->mnt_child);
-+ }
-
-- if (how)
-+ /* Add propogated mounts to the tmp_list */
-+ if (how & UMOUNT_PROPAGATE)
- propagate_umount(&tmp_list);
-
-- while (!hlist_empty(&tmp_list)) {
-- p = hlist_entry(tmp_list.first, struct mount, mnt_hash);
-- hlist_del_init_rcu(&p->mnt_hash);
-+ while (!list_empty(&tmp_list)) {
-+ bool disconnect;
-+ p = list_first_entry(&tmp_list, struct mount, mnt_list);
- list_del_init(&p->mnt_expire);
- list_del_init(&p->mnt_list);
- __touch_mnt_namespace(p->mnt_ns);
- p->mnt_ns = NULL;
-- if (how < 2)
-+ if (how & UMOUNT_SYNC)
- p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
-
-- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
-+ disconnect = !(((how & UMOUNT_CONNECTED) &&
-+ mnt_has_parent(p) &&
-+ (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
-+ IS_MNT_LOCKED_AND_LAZY(p));
-+
-+ pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
-+ disconnect ? &unmounted : NULL);
- if (mnt_has_parent(p)) {
-- hlist_del_init(&p->mnt_mp_list);
-- put_mountpoint(p->mnt_mp);
- mnt_add_count(p->mnt_parent, -1);
-- /* old mountpoint will be dropped when we can do that */
-- p->mnt_ex_mountpoint = p->mnt_mountpoint;
-- p->mnt_mountpoint = p->mnt.mnt_root;
-- p->mnt_parent = p;
-- p->mnt_mp = NULL;
-+ if (!disconnect) {
-+ /* Don't forget about p */
-+ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
-+ } else {
-+ umount_mnt(p);
-+ }
- }
- change_mnt_propagation(p, MS_PRIVATE);
- }
-@@ -1447,14 +1489,14 @@ static int do_umount(struct mount *mnt, int flags)
-
- if (flags & MNT_DETACH) {
- if (!list_empty(&mnt->mnt_list))
-- umount_tree(mnt, 2);
-+ umount_tree(mnt, UMOUNT_PROPAGATE);
- retval = 0;
- } else {
- shrink_submounts(mnt);
- retval = -EBUSY;
- if (!propagate_mount_busy(mnt, 2)) {
- if (!list_empty(&mnt->mnt_list))
-- umount_tree(mnt, 1);
-+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
- retval = 0;
- }
- }
-@@ -1480,13 +1522,20 @@ void __detach_mounts(struct dentry *dentry)
-
- namespace_lock();
- mp = lookup_mountpoint(dentry);
-- if (!mp)
-+ if (IS_ERR_OR_NULL(mp))
- goto out_unlock;
-
- lock_mount_hash();
- while (!hlist_empty(&mp->m_list)) {
- mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
-- umount_tree(mnt, 2);
-+ if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
-+ struct mount *p, *tmp;
-+ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
-+ hlist_add_head(&p->mnt_umount.s_list, &unmounted);
-+ umount_mnt(p);
-+ }
-+ }
-+ else umount_tree(mnt, UMOUNT_CONNECTED);
- }
- unlock_mount_hash();
- put_mountpoint(mp);
-@@ -1648,7 +1697,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
- out:
- if (res) {
- lock_mount_hash();
-- umount_tree(res, 0);
-+ umount_tree(res, UMOUNT_SYNC);
- unlock_mount_hash();
- }
- return q;
-@@ -1672,7 +1721,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
- {
- namespace_lock();
- lock_mount_hash();
-- umount_tree(real_mount(mnt), 0);
-+ umount_tree(real_mount(mnt), UMOUNT_SYNC);
- unlock_mount_hash();
- namespace_unlock();
- }
-@@ -1855,7 +1904,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
- out_cleanup_ids:
- while (!hlist_empty(&tree_list)) {
- child = hlist_entry(tree_list.first, struct mount, mnt_hash);
-- umount_tree(child, 0);
-+ umount_tree(child, UMOUNT_SYNC);
- }
- unlock_mount_hash();
- cleanup_group_ids(source_mnt, NULL);
-@@ -2035,7 +2084,7 @@ static int do_loopback(struct path *path, const char *old_name,
- err = graft_tree(mnt, parent, mp);
- if (err) {
- lock_mount_hash();
-- umount_tree(mnt, 0);
-+ umount_tree(mnt, UMOUNT_SYNC);
- unlock_mount_hash();
- }
- out2:
-@@ -2406,7 +2455,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
- while (!list_empty(&graveyard)) {
- mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
- touch_mnt_namespace(mnt->mnt_ns);
-- umount_tree(mnt, 1);
-+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
- }
- unlock_mount_hash();
- namespace_unlock();
-@@ -2477,7 +2526,7 @@ static void shrink_submounts(struct mount *mnt)
- m = list_first_entry(&graveyard, struct mount,
- mnt_expire);
- touch_mnt_namespace(m->mnt_ns);
-- umount_tree(m, 1);
-+ umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
- }
- }
- }
-diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
-index 351be920..8d129bb 100644
---- a/fs/nfs/callback.c
-+++ b/fs/nfs/callback.c
-@@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp)
- if (try_to_freeze())
- continue;
-
-- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
-+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
- spin_lock_bh(&serv->sv_cb_lock);
- if (!list_empty(&serv->sv_cb_list)) {
- req = list_first_entry(&serv->sv_cb_list,
-@@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp)
- error);
- } else {
- spin_unlock_bh(&serv->sv_cb_lock);
-- /* schedule_timeout to game the hung task watchdog */
-- schedule_timeout(60 * HZ);
-+ schedule();
- finish_wait(&serv->sv_cb_waitq, &wq);
- }
-+ flush_signals(current);
- }
- return 0;
- }
-diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
-index e907c8c..ab21ef1 100644
---- a/fs/nfs/direct.c
-+++ b/fs/nfs/direct.c
-@@ -129,22 +129,25 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
- int i;
- ssize_t count;
-
-- WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
--
-- count = dreq->mirrors[hdr->pgio_mirror_idx].count;
-- if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
-- count = hdr->io_start + hdr->good_bytes - dreq->io_start;
-- dreq->mirrors[hdr->pgio_mirror_idx].count = count;
-- }
--
-- /* update the dreq->count by finding the minimum agreed count from all
-- * mirrors */
-- count = dreq->mirrors[0].count;
-+ if (dreq->mirror_count == 1) {
-+ dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
-+ dreq->count += hdr->good_bytes;
-+ } else {
-+ /* mirrored writes */
-+ count = dreq->mirrors[hdr->pgio_mirror_idx].count;
-+ if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
-+ count = hdr->io_start + hdr->good_bytes - dreq->io_start;
-+ dreq->mirrors[hdr->pgio_mirror_idx].count = count;
-+ }
-+ /* update the dreq->count by finding the minimum agreed count from all
-+ * mirrors */
-+ count = dreq->mirrors[0].count;
-
-- for (i = 1; i < dreq->mirror_count; i++)
-- count = min(count, dreq->mirrors[i].count);
-+ for (i = 1; i < dreq->mirror_count; i++)
-+ count = min(count, dreq->mirrors[i].count);
-
-- dreq->count = count;
-+ dreq->count = count;
-+ }
- }
-
- /*
-diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
-index 5c399ec..d494ea2 100644
---- a/fs/nfs/nfs4xdr.c
-+++ b/fs/nfs/nfs4xdr.c
-@@ -7365,6 +7365,11 @@ nfs4_stat_to_errno(int stat)
- .p_name = #proc, \
- }
-
-+#define STUB(proc) \
-+[NFSPROC4_CLNT_##proc] = { \
-+ .p_name = #proc, \
-+}
-+
- struct rpc_procinfo nfs4_procedures[] = {
- PROC(READ, enc_read, dec_read),
- PROC(WRITE, enc_write, dec_write),
-@@ -7417,6 +7422,7 @@ struct rpc_procinfo nfs4_procedures[] = {
- PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
- PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
- PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
-+ STUB(GETDEVICELIST),
- PROC(BIND_CONN_TO_SESSION,
- enc_bind_conn_to_session, dec_bind_conn_to_session),
- PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
-diff --git a/fs/nfs/read.c b/fs/nfs/read.c
-index 568ecf0..848d8b1 100644
---- a/fs/nfs/read.c
-+++ b/fs/nfs/read.c
-@@ -284,7 +284,7 @@ int nfs_readpage(struct file *file, struct page *page)
- dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
- page, PAGE_CACHE_SIZE, page_file_index(page));
- nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
-- nfs_inc_stats(inode, NFSIOS_READPAGES);
-+ nfs_add_stats(inode, NFSIOS_READPAGES, 1);
-
- /*
- * Try to flush any pending writes to the file..
-diff --git a/fs/nfs/write.c b/fs/nfs/write.c
-index 849ed78..41b3f1096 100644
---- a/fs/nfs/write.c
-+++ b/fs/nfs/write.c
-@@ -580,7 +580,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
- int ret;
-
- nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
-- nfs_inc_stats(inode, NFSIOS_WRITEPAGES);
-+ nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
-
- nfs_pageio_cond_complete(pgio, page_file_index(page));
- ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
-diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
-index 92b9d97..5416968 100644
---- a/fs/nfsd/nfs4proc.c
-+++ b/fs/nfsd/nfs4proc.c
-@@ -1030,6 +1030,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
- return status;
- }
-+ if (!file)
-+ return nfserr_bad_stateid;
-
- status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
- fallocate->falloc_offset,
-@@ -1069,6 +1071,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
- return status;
- }
-+ if (!file)
-+ return nfserr_bad_stateid;
-
- switch (seek->seek_whence) {
- case NFS4_CONTENT_DATA:
-diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index 8ba1d88..ee1cccd 100644
---- a/fs/nfsd/nfs4state.c
-+++ b/fs/nfsd/nfs4state.c
-@@ -1139,7 +1139,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
- return sid->sequence % SESSION_HASH_SIZE;
- }
-
--#ifdef NFSD_DEBUG
-+#ifdef CONFIG_SUNRPC_DEBUG
- static inline void
- dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
- {
-diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 5fb7e78..5b33ce1 100644
---- a/fs/nfsd/nfs4xdr.c
-+++ b/fs/nfsd/nfs4xdr.c
-@@ -3422,6 +3422,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
- unsigned long maxcount;
- struct xdr_stream *xdr = &resp->xdr;
- struct file *file = read->rd_filp;
-+ struct svc_fh *fhp = read->rd_fhp;
- int starting_len = xdr->buf->len;
- struct raparms *ra;
- __be32 *p;
-@@ -3445,12 +3446,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
- maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
-
-- if (!read->rd_filp) {
-+ if (read->rd_filp)
-+ err = nfsd_permission(resp->rqstp, fhp->fh_export,
-+ fhp->fh_dentry,
-+ NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
-+ else
- err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp,
- &file, &ra);
-- if (err)
-- goto err_truncate;
-- }
-+ if (err)
-+ goto err_truncate;
-
- if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
- err = nfsd4_encode_splice_read(resp, read, file, maxcount);
-diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
-index aa47d75..9690cb4 100644
---- a/fs/nfsd/nfsctl.c
-+++ b/fs/nfsd/nfsctl.c
-@@ -1250,15 +1250,15 @@ static int __init init_nfsd(void)
- int retval;
- printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
-
-- retval = register_cld_notifier();
-- if (retval)
-- return retval;
- retval = register_pernet_subsys(&nfsd_net_ops);
- if (retval < 0)
-- goto out_unregister_notifier;
-- retval = nfsd4_init_slabs();
-+ return retval;
-+ retval = register_cld_notifier();
- if (retval)
- goto out_unregister_pernet;
-+ retval = nfsd4_init_slabs();
-+ if (retval)
-+ goto out_unregister_notifier;
- retval = nfsd4_init_pnfs();
- if (retval)
- goto out_free_slabs;
-@@ -1290,10 +1290,10 @@ out_exit_pnfs:
- nfsd4_exit_pnfs();
- out_free_slabs:
- nfsd4_free_slabs();
--out_unregister_pernet:
-- unregister_pernet_subsys(&nfsd_net_ops);
- out_unregister_notifier:
- unregister_cld_notifier();
-+out_unregister_pernet:
-+ unregister_pernet_subsys(&nfsd_net_ops);
- return retval;
- }
-
-@@ -1308,8 +1308,8 @@ static void __exit exit_nfsd(void)
- nfsd4_exit_pnfs();
- nfsd_fault_inject_cleanup();
- unregister_filesystem(&nfsd_fs_type);
-- unregister_pernet_subsys(&nfsd_net_ops);
- unregister_cld_notifier();
-+ unregister_pernet_subsys(&nfsd_net_ops);
- }
-
- MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
-diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
-index 565c4da..cf98052 100644
---- a/fs/nfsd/nfsd.h
-+++ b/fs/nfsd/nfsd.h
-@@ -24,7 +24,7 @@
- #include "export.h"
-
- #undef ifdebug
--#ifdef NFSD_DEBUG
-+#ifdef CONFIG_SUNRPC_DEBUG
- # define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag)
- #else
- # define ifdebug(flag) if (0)
-diff --git a/fs/open.c b/fs/open.c
-index 33f9cbf..44a3be1 100644
---- a/fs/open.c
-+++ b/fs/open.c
-@@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
- uid = make_kuid(current_user_ns(), user);
- gid = make_kgid(current_user_ns(), group);
-
-+retry_deleg:
- newattrs.ia_valid = ATTR_CTIME;
- if (user != (uid_t) -1) {
- if (!uid_valid(uid))
-@@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
- if (!S_ISDIR(inode->i_mode))
- newattrs.ia_valid |=
- ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
--retry_deleg:
- mutex_lock(&inode->i_mutex);
- error = security_path_chown(path, uid, gid);
- if (!error)
-diff --git a/fs/pnode.c b/fs/pnode.c
-index 260ac8f..6367e1e 100644
---- a/fs/pnode.c
-+++ b/fs/pnode.c
-@@ -362,6 +362,46 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
- }
-
- /*
-+ * Clear MNT_LOCKED when it can be shown to be safe.
-+ *
-+ * mount_lock lock must be held for write
-+ */
-+void propagate_mount_unlock(struct mount *mnt)
-+{
-+ struct mount *parent = mnt->mnt_parent;
-+ struct mount *m, *child;
-+
-+ BUG_ON(parent == mnt);
-+
-+ for (m = propagation_next(parent, parent); m;
-+ m = propagation_next(m, parent)) {
-+ child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
-+ if (child)
-+ child->mnt.mnt_flags &= ~MNT_LOCKED;
-+ }
-+}
-+
-+/*
-+ * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
-+ */
-+static void mark_umount_candidates(struct mount *mnt)
-+{
-+ struct mount *parent = mnt->mnt_parent;
-+ struct mount *m;
-+
-+ BUG_ON(parent == mnt);
-+
-+ for (m = propagation_next(parent, parent); m;
-+ m = propagation_next(m, parent)) {
-+ struct mount *child = __lookup_mnt_last(&m->mnt,
-+ mnt->mnt_mountpoint);
-+ if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
-+ SET_MNT_MARK(child);
-+ }
-+ }
-+}
-+
-+/*
- * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
- * parent propagates to.
- */
-@@ -378,13 +418,16 @@ static void __propagate_umount(struct mount *mnt)
- struct mount *child = __lookup_mnt_last(&m->mnt,
- mnt->mnt_mountpoint);
- /*
-- * umount the child only if the child has no
-- * other children
-+ * umount the child only if the child has no children
-+ * and the child is marked safe to unmount.
- */
-- if (child && list_empty(&child->mnt_mounts)) {
-+ if (!child || !IS_MNT_MARKED(child))
-+ continue;
-+ CLEAR_MNT_MARK(child);
-+ if (list_empty(&child->mnt_mounts)) {
- list_del_init(&child->mnt_child);
-- hlist_del_init_rcu(&child->mnt_hash);
-- hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
-+ child->mnt.mnt_flags |= MNT_UMOUNT;
-+ list_move_tail(&child->mnt_list, &mnt->mnt_list);
- }
- }
- }
-@@ -396,11 +439,14 @@ static void __propagate_umount(struct mount *mnt)
- *
- * vfsmount lock must be held for write
- */
--int propagate_umount(struct hlist_head *list)
-+int propagate_umount(struct list_head *list)
- {
- struct mount *mnt;
-
-- hlist_for_each_entry(mnt, list, mnt_hash)
-+ list_for_each_entry_reverse(mnt, list, mnt_list)
-+ mark_umount_candidates(mnt);
-+
-+ list_for_each_entry(mnt, list, mnt_list)
- __propagate_umount(mnt);
- return 0;
- }
-diff --git a/fs/pnode.h b/fs/pnode.h
-index 4a24635..7114ce6 100644
---- a/fs/pnode.h
-+++ b/fs/pnode.h
-@@ -19,6 +19,9 @@
- #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
- #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
- #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
-+#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
-+#define IS_MNT_LOCKED_AND_LAZY(m) \
-+ (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
-
- #define CL_EXPIRE 0x01
- #define CL_SLAVE 0x02
-@@ -40,14 +43,14 @@ static inline void set_mnt_shared(struct mount *mnt)
- void change_mnt_propagation(struct mount *, int);
- int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
- struct hlist_head *);
--int propagate_umount(struct hlist_head *);
-+int propagate_umount(struct list_head *);
- int propagate_mount_busy(struct mount *, int);
-+void propagate_mount_unlock(struct mount *);
- void mnt_release_group_id(struct mount *);
- int get_dominating_id(struct mount *mnt, const struct path *root);
- unsigned int mnt_get_count(struct mount *mnt);
- void mnt_set_mountpoint(struct mount *, struct mountpoint *,
- struct mount *);
--void umount_tree(struct mount *, int);
- struct mount *copy_tree(struct mount *, struct dentry *, int);
- bool is_path_reachable(struct mount *, struct dentry *,
- const struct path *root);
-diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
-index b034f10..0d58525 100644
---- a/include/acpi/actypes.h
-+++ b/include/acpi/actypes.h
-@@ -199,9 +199,29 @@ typedef int s32;
- typedef s32 acpi_native_int;
-
- typedef u32 acpi_size;
-+
-+#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
-+
-+/*
-+ * OSPMs can define this to shrink the size of the structures for 32-bit
-+ * none PAE environment. ASL compiler may always define this to generate
-+ * 32-bit OSPM compliant tables.
-+ */
- typedef u32 acpi_io_address;
- typedef u32 acpi_physical_address;
-
-+#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
-+
-+/*
-+ * It is reported that, after some calculations, the physical addresses can
-+ * wrap over the 32-bit boundary on 32-bit PAE environment.
-+ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
-+ */
-+typedef u64 acpi_io_address;
-+typedef u64 acpi_physical_address;
-+
-+#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
-+
- #define ACPI_MAX_PTR ACPI_UINT32_MAX
- #define ACPI_SIZE_MAX ACPI_UINT32_MAX
-
-@@ -736,10 +756,6 @@ typedef u32 acpi_event_status;
- #define ACPI_GPE_ENABLE 0
- #define ACPI_GPE_DISABLE 1
- #define ACPI_GPE_CONDITIONAL_ENABLE 2
--#define ACPI_GPE_SAVE_MASK 4
--
--#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
--#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)
-
- /*
- * GPE info flags - Per GPE
-diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
-index ad74dc5..ecdf940 100644
---- a/include/acpi/platform/acenv.h
-+++ b/include/acpi/platform/acenv.h
-@@ -76,6 +76,7 @@
- #define ACPI_LARGE_NAMESPACE_NODE
- #define ACPI_DATA_TABLE_DISASSEMBLY
- #define ACPI_SINGLE_THREADED
-+#define ACPI_32BIT_PHYSICAL_ADDRESS
- #endif
-
- /* acpi_exec configuration. Multithreaded with full AML debugger */
-diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
-index ae2eb17..a215609 100644
---- a/include/dt-bindings/clock/tegra124-car-common.h
-+++ b/include/dt-bindings/clock/tegra124-car-common.h
-@@ -297,7 +297,7 @@
- #define TEGRA124_CLK_PLL_C4 270
- #define TEGRA124_CLK_PLL_DP 271
- #define TEGRA124_CLK_PLL_E_MUX 272
--#define TEGRA124_CLK_PLLD_DSI 273
-+#define TEGRA124_CLK_PLL_D_DSI_OUT 273
- /* 274 */
- /* 275 */
- /* 276 */
-diff --git a/include/linux/bpf.h b/include/linux/bpf.h
-index bbfceb7..33b52fb 100644
---- a/include/linux/bpf.h
-+++ b/include/linux/bpf.h
-@@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f);
-
- /* function argument constraints */
- enum bpf_arg_type {
-- ARG_ANYTHING = 0, /* any argument is ok */
-+ ARG_DONTCARE = 0, /* unused argument in helper function */
-
- /* the following constraints used to prototype
- * bpf_map_lookup/update/delete_elem() functions
-@@ -62,6 +62,8 @@ enum bpf_arg_type {
- */
- ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
- ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
-+
-+ ARG_ANYTHING, /* any (initialized) argument is ok */
- };
-
- /* type of values returned from helper functions */
-diff --git a/include/linux/mount.h b/include/linux/mount.h
-index c2c561d..564beee 100644
---- a/include/linux/mount.h
-+++ b/include/linux/mount.h
-@@ -61,6 +61,7 @@ struct mnt_namespace;
- #define MNT_DOOMED 0x1000000
- #define MNT_SYNC_UMOUNT 0x2000000
- #define MNT_MARKED 0x4000000
-+#define MNT_UMOUNT 0x8000000
-
- struct vfsmount {
- struct dentry *mnt_root; /* root of the mounted tree */
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index a419b65..51348f7 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
- extern void calc_global_load(unsigned long ticks);
- extern void update_cpu_load_nohz(void);
-
-+/* Notifier for when a task gets migrated to a new CPU */
-+struct task_migration_notifier {
-+ struct task_struct *task;
-+ int from_cpu;
-+ int to_cpu;
-+};
-+extern void register_task_migration_notifier(struct notifier_block *n);
-+
- extern unsigned long get_parent_ip(unsigned long addr);
-
- extern void dump_cpu_task(int cpu);
-diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index f54d665..bdccc4b 100644
---- a/include/linux/skbuff.h
-+++ b/include/linux/skbuff.h
-@@ -769,6 +769,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
-
- struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
- int node);
-+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
- struct sk_buff *build_skb(void *data, unsigned int frag_size);
- static inline struct sk_buff *alloc_skb(unsigned int size,
- gfp_t priority)
-@@ -3013,6 +3014,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
- */
- #define CHECKSUM_BREAK 76
-
-+/* Unset checksum-complete
-+ *
-+ * Unset checksum complete can be done when packet is being modified
-+ * (uncompressed for instance) and checksum-complete value is
-+ * invalidated.
-+ */
-+static inline void skb_checksum_complete_unset(struct sk_buff *skb)
-+{
-+ if (skb->ip_summed == CHECKSUM_COMPLETE)
-+ skb->ip_summed = CHECKSUM_NONE;
-+}
-+
- /* Validate (init) checksum based on checksum complete.
- *
- * Return values:
-diff --git a/include/linux/usb.h b/include/linux/usb.h
-index 7ee1b5c..447fe29 100644
---- a/include/linux/usb.h
-+++ b/include/linux/usb.h
-@@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf);
- #define USB_MAXINTERFACES 32
- #define USB_MAXIADS (USB_MAXINTERFACES/2)
-
-+/*
-+ * USB Resume Timer: Every Host controller driver should drive the resume
-+ * signalling on the bus for the amount of time defined by this macro.
-+ *
-+ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
-+ *
-+ * Note that the USB Specification states we should drive resume for *at least*
-+ * 20 ms, but it doesn't give an upper bound. This creates two possible
-+ * situations which we want to avoid:
-+ *
-+ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
-+ * us to fail USB Electrical Tests, thus failing Certification
-+ *
-+ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
-+ * and while we can argue that's against the USB Specification, we don't have
-+ * control over which devices a certification laboratory will be using for
-+ * certification. If CertLab uses a device which was tested against Windows and
-+ * that happens to have relaxed resume signalling rules, we might fall into
-+ * situations where we fail interoperability and electrical tests.
-+ *
-+ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
-+ * should cope with both LPJ calibration errors and devices not following every
-+ * detail of the USB Specification.
-+ */
-+#define USB_RESUME_TIMEOUT 40 /* ms */
-+
- /**
- * struct usb_interface_cache - long-term representation of a device interface
- * @num_altsetting: number of altsettings defined.
-diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
-index d3583d3..dd0f3ab 100644
---- a/include/target/iscsi/iscsi_target_core.h
-+++ b/include/target/iscsi/iscsi_target_core.h
-@@ -602,6 +602,11 @@ struct iscsi_conn {
- struct iscsi_session *sess;
- /* Pointer to thread_set in use for this conn's threads */
- struct iscsi_thread_set *thread_set;
-+ int bitmap_id;
-+ int rx_thread_active;
-+ struct task_struct *rx_thread;
-+ int tx_thread_active;
-+ struct task_struct *tx_thread;
- /* list_head for session connection list */
- struct list_head conn_list;
- } ____cacheline_aligned;
-@@ -871,10 +876,12 @@ struct iscsit_global {
- /* Unique identifier used for the authentication daemon */
- u32 auth_id;
- u32 inactive_ts;
-+#define ISCSIT_BITMAP_BITS 262144
- /* Thread Set bitmap count */
- int ts_bitmap_count;
- /* Thread Set bitmap pointer */
- unsigned long *ts_bitmap;
-+ spinlock_t ts_bitmap_lock;
- /* Used for iSCSI discovery session authentication */
- struct iscsi_node_acl discovery_acl;
- struct iscsi_portal_group *discovery_tpg;
-diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
-index 672150b..985ca4c 100644
---- a/include/target/target_core_base.h
-+++ b/include/target/target_core_base.h
-@@ -524,7 +524,7 @@ struct se_cmd {
- sense_reason_t (*execute_cmd)(struct se_cmd *);
- sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
- u32, enum dma_data_direction);
-- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
-+ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
-
- unsigned char *t_task_cdb;
- unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
-diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
-index 0bf130a..28ec6c9 100644
---- a/include/uapi/linux/nfsd/debug.h
-+++ b/include/uapi/linux/nfsd/debug.h
-@@ -12,14 +12,6 @@
- #include <linux/sunrpc/debug.h>
-
- /*
-- * Enable debugging for nfsd.
-- * Requires RPC_DEBUG.
-- */
--#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
--# define NFSD_DEBUG 1
--#endif
--
--/*
- * knfsd debug flags
- */
- #define NFSDDBG_SOCK 0x0001
-diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
-index a20e4a3..847a0a2 100644
---- a/include/video/samsung_fimd.h
-+++ b/include/video/samsung_fimd.h
-@@ -436,6 +436,12 @@
- #define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
- #define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
-
-+/* Display port clock control */
-+#define DP_MIE_CLKCON 0x27c
-+#define DP_MIE_CLK_DISABLE 0x0
-+#define DP_MIE_CLK_DP_ENABLE 0x2
-+#define DP_MIE_CLK_MIE_ENABLE 0x3
-+
- /* Notes on per-window bpp settings
- *
- * Value Win0 Win1 Win2 Win3 Win 4
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
-index 36508e6..5d8ea3d 100644
---- a/kernel/bpf/verifier.c
-+++ b/kernel/bpf/verifier.c
-@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
- enum bpf_reg_type expected_type;
- int err = 0;
-
-- if (arg_type == ARG_ANYTHING)
-+ if (arg_type == ARG_DONTCARE)
- return 0;
-
- if (reg->type == NOT_INIT) {
-@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
- return -EACCES;
- }
-
-+ if (arg_type == ARG_ANYTHING)
-+ return 0;
-+
- if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
- arg_type == ARG_PTR_TO_MAP_VALUE) {
- expected_type = PTR_TO_STACK;
-diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index 227fec3..9a34bd8 100644
---- a/kernel/ptrace.c
-+++ b/kernel/ptrace.c
-@@ -697,6 +697,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
- static int ptrace_resume(struct task_struct *child, long request,
- unsigned long data)
- {
-+ bool need_siglock;
-+
- if (!valid_signal(data))
- return -EIO;
-
-@@ -724,8 +726,26 @@ static int ptrace_resume(struct task_struct *child, long request,
- user_disable_single_step(child);
- }
-
-+ /*
-+ * Change ->exit_code and ->state under siglock to avoid the race
-+ * with wait_task_stopped() in between; a non-zero ->exit_code will
-+ * wrongly look like another report from tracee.
-+ *
-+ * Note that we need siglock even if ->exit_code == data and/or this
-+ * status was not reported yet, the new status must not be cleared by
-+ * wait_task_stopped() after resume.
-+ *
-+ * If data == 0 we do not care if wait_task_stopped() reports the old
-+ * status and clears the code too; this can't race with the tracee, it
-+ * takes siglock after resume.
-+ */
-+ need_siglock = data && !thread_group_empty(current);
-+ if (need_siglock)
-+ spin_lock_irq(&child->sighand->siglock);
- child->exit_code = data;
- wake_up_state(child, __TASK_TRACED);
-+ if (need_siglock)
-+ spin_unlock_irq(&child->sighand->siglock);
-
- return 0;
- }
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 62671f5..3d5f6f6 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -996,6 +996,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
- rq_clock_skip_update(rq, true);
- }
-
-+static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-+
-+void register_task_migration_notifier(struct notifier_block *n)
-+{
-+ atomic_notifier_chain_register(&task_migration_notifier, n);
-+}
-+
- #ifdef CONFIG_SMP
- void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
- {
-@@ -1026,10 +1033,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
- trace_sched_migrate_task(p, new_cpu);
-
- if (task_cpu(p) != new_cpu) {
-+ struct task_migration_notifier tmn;
-+
- if (p->sched_class->migrate_task_rq)
- p->sched_class->migrate_task_rq(p, new_cpu);
- p->se.nr_migrations++;
- perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
-+
-+ tmn.task = p;
-+ tmn.from_cpu = task_cpu(p);
-+ tmn.to_cpu = new_cpu;
-+
-+ atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
- }
-
- __set_task_cpu(p, new_cpu);
-diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index 3fa8fa6..f670cbb 100644
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -514,7 +514,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
- unsigned long flags;
- struct rq *rq;
-
-- rq = task_rq_lock(current, &flags);
-+ rq = task_rq_lock(p, &flags);
-
- /*
- * We need to take care of several possible races here:
-@@ -569,7 +569,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
- push_dl_task(rq);
- #endif
- unlock:
-- task_rq_unlock(rq, current, &flags);
-+ task_rq_unlock(rq, p, &flags);
-
- return HRTIMER_NORESTART;
- }
-diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index 5040d44..922048a 100644
---- a/kernel/trace/ring_buffer.c
-+++ b/kernel/trace/ring_buffer.c
-@@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
-
- static __always_inline int trace_recursive_lock(void)
- {
-- unsigned int val = this_cpu_read(current_context);
-+ unsigned int val = __this_cpu_read(current_context);
- int bit;
-
- if (in_interrupt()) {
-@@ -2696,18 +2696,17 @@ static __always_inline int trace_recursive_lock(void)
- return 1;
-
- val |= (1 << bit);
-- this_cpu_write(current_context, val);
-+ __this_cpu_write(current_context, val);
-
- return 0;
- }
-
- static __always_inline void trace_recursive_unlock(void)
- {
-- unsigned int val = this_cpu_read(current_context);
-+ unsigned int val = __this_cpu_read(current_context);
-
-- val--;
-- val &= this_cpu_read(current_context);
-- this_cpu_write(current_context, val);
-+ val &= val & (val - 1);
-+ __this_cpu_write(current_context, val);
- }
-
- #else
-diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index db54dda..a9c10a3 100644
---- a/kernel/trace/trace_events.c
-+++ b/kernel/trace/trace_events.c
-@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
- static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
- {
- char *event = NULL, *sub = NULL, *match;
-+ int ret;
-
- /*
- * The buf format can be <subsystem>:<event-name>
-@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
- event = NULL;
- }
-
-- return __ftrace_set_clr_event(tr, match, sub, event, set);
-+ ret = __ftrace_set_clr_event(tr, match, sub, event, set);
-+
-+ /* Put back the colon to allow this to be called again */
-+ if (buf)
-+ *(buf - 1) = ':';
-+
-+ return ret;
- }
-
- /**
-diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
-index 2d25ad1..b6fce36 100644
---- a/kernel/trace/trace_functions_graph.c
-+++ b/kernel/trace/trace_functions_graph.c
-@@ -1309,15 +1309,19 @@ void graph_trace_open(struct trace_iterator *iter)
- {
- /* pid and depth on the last trace processed */
- struct fgraph_data *data;
-+ gfp_t gfpflags;
- int cpu;
-
- iter->private = NULL;
-
-- data = kzalloc(sizeof(*data), GFP_KERNEL);
-+ /* We can be called in atomic context via ftrace_dump() */
-+ gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
-+
-+ data = kzalloc(sizeof(*data), gfpflags);
- if (!data)
- goto out_err;
-
-- data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
-+ data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
- if (!data->cpu_data)
- goto out_err_free;
-
-diff --git a/lib/string.c b/lib/string.c
-index ce81aae..a579201 100644
---- a/lib/string.c
-+++ b/lib/string.c
-@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
- void memzero_explicit(void *s, size_t count)
- {
- memset(s, 0, count);
-- OPTIMIZER_HIDE_VAR(s);
-+ barrier();
- }
- EXPORT_SYMBOL(memzero_explicit);
-
-diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index 6817b03..956d4db 100644
---- a/mm/huge_memory.c
-+++ b/mm/huge_memory.c
-@@ -2316,8 +2316,14 @@ static struct page
- struct vm_area_struct *vma, unsigned long address,
- int node)
- {
-+ gfp_t flags;
-+
- VM_BUG_ON_PAGE(*hpage, *hpage);
-
-+ /* Only allocate from the target node */
-+ flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
-+ __GFP_THISNODE;
-+
- /*
- * Before allocating the hugepage, release the mmap_sem read lock.
- * The allocation can take potentially a long time if it involves
-@@ -2326,8 +2332,7 @@ static struct page
- */
- up_read(&mm->mmap_sem);
-
-- *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
-- khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
-+ *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
- if (unlikely(!*hpage)) {
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
- *hpage = ERR_PTR(-ENOMEM);
-diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index c41b2a0..caad3c5 100644
---- a/mm/hugetlb.c
-+++ b/mm/hugetlb.c
-@@ -3735,8 +3735,7 @@ retry:
- if (!pmd_huge(*pmd))
- goto out;
- if (pmd_present(*pmd)) {
-- page = pte_page(*(pte_t *)pmd) +
-- ((address & ~PMD_MASK) >> PAGE_SHIFT);
-+ page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
- if (flags & FOLL_GET)
- get_page(page);
- } else {
-diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index 4721046..de5dc5e 100644
---- a/mm/mempolicy.c
-+++ b/mm/mempolicy.c
-@@ -1985,7 +1985,8 @@ retry_cpuset:
- nmask = policy_nodemask(gfp, pol);
- if (!nmask || node_isset(node, *nmask)) {
- mpol_cond_put(pol);
-- page = alloc_pages_exact_node(node, gfp, order);
-+ page = alloc_pages_exact_node(node,
-+ gfp | __GFP_THISNODE, order);
- goto out;
- }
- }
-diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
-index 0ee453f..f371cbf 100644
---- a/net/bridge/br_netfilter.c
-+++ b/net/bridge/br_netfilter.c
-@@ -651,6 +651,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
- struct net_device *in;
-
- if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
-+ int frag_max_size;
-+
-+ if (skb->protocol == htons(ETH_P_IP)) {
-+ frag_max_size = IPCB(skb)->frag_max_size;
-+ BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
-+ }
-+
- in = nf_bridge->physindev;
- if (nf_bridge->mask & BRNF_PKT_TYPE) {
- skb->pkt_type = PACKET_OTHERHOST;
-@@ -710,8 +717,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
- nf_bridge->mask |= BRNF_PKT_TYPE;
- }
-
-- if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
-- return NF_DROP;
-+ if (pf == NFPROTO_IPV4) {
-+ int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
-+
-+ if (br_parse_ip_options(skb))
-+ return NF_DROP;
-+
-+ IPCB(skb)->frag_max_size = frag_max;
-+ }
-
- /* The physdev module checks on this */
- nf_bridge->mask |= BRNF_BRIDGED;
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 45109b7..22a53ac 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -3041,7 +3041,7 @@ static struct rps_dev_flow *
- set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
- struct rps_dev_flow *rflow, u16 next_cpu)
- {
-- if (next_cpu != RPS_NO_CPU) {
-+ if (next_cpu < nr_cpu_ids) {
- #ifdef CONFIG_RFS_ACCEL
- struct netdev_rx_queue *rxqueue;
- struct rps_dev_flow_table *flow_table;
-@@ -3146,7 +3146,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
- * If the desired CPU (where last recvmsg was done) is
- * different from current CPU (one in the rx-queue flow
- * table entry), switch if one of the following holds:
-- * - Current CPU is unset (equal to RPS_NO_CPU).
-+ * - Current CPU is unset (>= nr_cpu_ids).
- * - Current CPU is offline.
- * - The current CPU's queue tail has advanced beyond the
- * last packet that was enqueued using this table entry.
-@@ -3154,14 +3154,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
- * have been dequeued, thus preserving in order delivery.
- */
- if (unlikely(tcpu != next_cpu) &&
-- (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
-+ (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
- ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
- rflow->last_qtail)) >= 0)) {
- tcpu = next_cpu;
- rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
- }
-
-- if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
-+ if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
- *rflowp = rflow;
- cpu = tcpu;
- goto done;
-@@ -3202,14 +3202,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
- struct rps_dev_flow_table *flow_table;
- struct rps_dev_flow *rflow;
- bool expire = true;
-- int cpu;
-+ unsigned int cpu;
-
- rcu_read_lock();
- flow_table = rcu_dereference(rxqueue->rps_flow_table);
- if (flow_table && flow_id <= flow_table->mask) {
- rflow = &flow_table->flows[flow_id];
- cpu = ACCESS_ONCE(rflow->cpu);
-- if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
-+ if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
- ((int)(per_cpu(softnet_data, cpu).input_queue_head -
- rflow->last_qtail) <
- (int)(10 * flow_table->mask)))
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 98d45fe..e9f9a15 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -280,13 +280,14 @@ nodata:
- EXPORT_SYMBOL(__alloc_skb);
-
- /**
-- * build_skb - build a network buffer
-+ * __build_skb - build a network buffer
- * @data: data buffer provided by caller
-- * @frag_size: size of fragment, or 0 if head was kmalloced
-+ * @frag_size: size of data, or 0 if head was kmalloced
- *
- * Allocate a new &sk_buff. Caller provides space holding head and
- * skb_shared_info. @data must have been allocated by kmalloc() only if
-- * @frag_size is 0, otherwise data should come from the page allocator.
-+ * @frag_size is 0, otherwise data should come from the page allocator
-+ * or vmalloc()
- * The return is the new skb buffer.
- * On a failure the return is %NULL, and @data is not freed.
- * Notes :
-@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
- * before giving packet to stack.
- * RX rings only contains data buffers, not full skbs.
- */
--struct sk_buff *build_skb(void *data, unsigned int frag_size)
-+struct sk_buff *__build_skb(void *data, unsigned int frag_size)
- {
- struct skb_shared_info *shinfo;
- struct sk_buff *skb;
-@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
-
- memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->truesize = SKB_TRUESIZE(size);
-- skb->head_frag = frag_size != 0;
- atomic_set(&skb->users, 1);
- skb->head = data;
- skb->data = data;
-@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
-
- return skb;
- }
-+
-+/* build_skb() is wrapper over __build_skb(), that specifically
-+ * takes care of skb->head and skb->pfmemalloc
-+ * This means that if @frag_size is not zero, then @data must be backed
-+ * by a page fragment, not kmalloc() or vmalloc()
-+ */
-+struct sk_buff *build_skb(void *data, unsigned int frag_size)
-+{
-+ struct sk_buff *skb = __build_skb(data, frag_size);
-+
-+ if (skb && frag_size) {
-+ skb->head_frag = 1;
-+ if (virt_to_head_page(data)->pfmemalloc)
-+ skb->pfmemalloc = 1;
-+ }
-+ return skb;
-+}
- EXPORT_SYMBOL(build_skb);
-
- struct netdev_alloc_cache {
-@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
- gfp_t gfp = gfp_mask;
-
- if (order) {
-- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
-+ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
-+ __GFP_NOMEMALLOC;
- page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
- nc->frag.size = PAGE_SIZE << (page ? order : 0);
- }
-diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
-index d9bc28a..53bd53f 100644
---- a/net/ipv4/ip_forward.c
-+++ b/net/ipv4/ip_forward.c
-@@ -82,6 +82,9 @@ int ip_forward(struct sk_buff *skb)
- if (skb->pkt_type != PACKET_HOST)
- goto drop;
-
-+ if (unlikely(skb->sk))
-+ goto drop;
-+
- if (skb_warn_if_lro(skb))
- goto drop;
-
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index d520492..9d48dc4 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -2751,39 +2751,65 @@ begin_fwd:
- }
- }
-
--/* Send a fin. The caller locks the socket for us. This cannot be
-- * allowed to fail queueing a FIN frame under any circumstances.
-+/* We allow to exceed memory limits for FIN packets to expedite
-+ * connection tear down and (memory) recovery.
-+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
-+ * or even be forced to close flow without any FIN.
-+ */
-+static void sk_forced_wmem_schedule(struct sock *sk, int size)
-+{
-+ int amt, status;
-+
-+ if (size <= sk->sk_forward_alloc)
-+ return;
-+ amt = sk_mem_pages(size);
-+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
-+ sk_memory_allocated_add(sk, amt, &status);
-+}
-+
-+/* Send a FIN. The caller locks the socket for us.
-+ * We should try to send a FIN packet really hard, but eventually give up.
- */
- void tcp_send_fin(struct sock *sk)
- {
-+ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-- struct sk_buff *skb = tcp_write_queue_tail(sk);
-- int mss_now;
-
-- /* Optimization, tack on the FIN if we have a queue of
-- * unsent frames. But be careful about outgoing SACKS
-- * and IP options.
-+ /* Optimization, tack on the FIN if we have one skb in write queue and
-+ * this skb was not yet sent, or we are under memory pressure.
-+ * Note: in the latter case, FIN packet will be sent after a timeout,
-+ * as TCP stack thinks it has already been transmitted.
- */
-- mss_now = tcp_current_mss(sk);
--
-- if (tcp_send_head(sk) != NULL) {
-- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
-- TCP_SKB_CB(skb)->end_seq++;
-+ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
-+coalesce:
-+ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
-+ TCP_SKB_CB(tskb)->end_seq++;
- tp->write_seq++;
-+ if (!tcp_send_head(sk)) {
-+ /* This means tskb was already sent.
-+ * Pretend we included the FIN on previous transmit.
-+ * We need to set tp->snd_nxt to the value it would have
-+ * if FIN had been sent. This is because retransmit path
-+ * does not change tp->snd_nxt.
-+ */
-+ tp->snd_nxt++;
-+ return;
-+ }
- } else {
-- /* Socket is locked, keep trying until memory is available. */
-- for (;;) {
-- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
-- if (skb)
-- break;
-- yield();
-+ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
-+ if (unlikely(!skb)) {
-+ if (tskb)
-+ goto coalesce;
-+ return;
- }
-+ skb_reserve(skb, MAX_TCP_HEADER);
-+ sk_forced_wmem_schedule(sk, skb->truesize);
- /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
- tcp_init_nondata_skb(skb, tp->write_seq,
- TCPHDR_ACK | TCPHDR_FIN);
- tcp_queue_skb(sk, skb);
- }
-- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
-+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
- }
-
- /* We get here when a process closes a file descriptor (either due to
-diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
-index 142f66a..0ca013d 100644
---- a/net/mac80211/mlme.c
-+++ b/net/mac80211/mlme.c
-@@ -2260,7 +2260,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
- else
- ssid_len = ssid[1];
-
-- ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
-+ ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
- ssid + 2, ssid_len, NULL,
- 0, (u32) -1, true, 0,
- ifmgd->associated->channel, false);
-diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 05919bf..d1d7a81 100644
---- a/net/netlink/af_netlink.c
-+++ b/net/netlink/af_netlink.c
-@@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
- if (data == NULL)
- return NULL;
-
-- skb = build_skb(data, size);
-+ skb = __build_skb(data, size);
- if (skb == NULL)
- vfree(data);
-- else {
-- skb->head_frag = 0;
-+ else
- skb->destructor = netlink_skb_destructor;
-- }
-
- return skb;
- }
-diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
-index 2ca9f2e..53745f4 100644
---- a/sound/pci/emu10k1/emuproc.c
-+++ b/sound/pci/emu10k1/emuproc.c
-@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
- struct snd_emu10k1 *emu = entry->private_data;
- u32 value;
- u32 value2;
-- unsigned long flags;
- u32 rate;
-
- if (emu->card_capabilities->emu_model) {
-- spin_lock_irqsave(&emu->emu_lock, flags);
- snd_emu1010_fpga_read(emu, 0x38, &value);
-- spin_unlock_irqrestore(&emu->emu_lock, flags);
- if ((value & 0x1) == 0) {
-- spin_lock_irqsave(&emu->emu_lock, flags);
- snd_emu1010_fpga_read(emu, 0x2a, &value);
- snd_emu1010_fpga_read(emu, 0x2b, &value2);
-- spin_unlock_irqrestore(&emu->emu_lock, flags);
- rate = 0x1770000 / (((value << 5) | value2)+1);
- snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
- } else {
- snd_iprintf(buffer, "ADAT Unlocked\n");
- }
-- spin_lock_irqsave(&emu->emu_lock, flags);
- snd_emu1010_fpga_read(emu, 0x20, &value);
-- spin_unlock_irqrestore(&emu->emu_lock, flags);
- if ((value & 0x4) == 0) {
-- spin_lock_irqsave(&emu->emu_lock, flags);
- snd_emu1010_fpga_read(emu, 0x28, &value);
- snd_emu1010_fpga_read(emu, 0x29, &value2);
-- spin_unlock_irqrestore(&emu->emu_lock, flags);
- rate = 0x1770000 / (((value << 5) | value2)+1);
- snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
- } else {
-@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
- {
- struct snd_emu10k1 *emu = entry->private_data;
- u32 value;
-- unsigned long flags;
- int i;
- snd_iprintf(buffer, "EMU1010 Registers:\n\n");
-
- for(i = 0; i < 0x40; i+=1) {
-- spin_lock_irqsave(&emu->emu_lock, flags);
- snd_emu1010_fpga_read(emu, i, &value);
-- spin_unlock_irqrestore(&emu->emu_lock, flags);
- snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
- }
- }
-diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
-index f9d12c0..2fd490b 100644
---- a/sound/pci/hda/patch_realtek.c
-+++ b/sound/pci/hda/patch_realtek.c
-@@ -5047,12 +5047,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-+ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
- SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
- SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
- SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-+ SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
-@@ -5142,6 +5144,16 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
- {0x1b, 0x411111f0}, \
- {0x1e, 0x411111f0}
-
-+#define ALC256_STANDARD_PINS \
-+ {0x12, 0x90a60140}, \
-+ {0x14, 0x90170110}, \
-+ {0x19, 0x411111f0}, \
-+ {0x1a, 0x411111f0}, \
-+ {0x1b, 0x411111f0}, \
-+ {0x1d, 0x40700001}, \
-+ {0x1e, 0x411111f0}, \
-+ {0x21, 0x02211020}
-+
- #define ALC282_STANDARD_PINS \
- {0x14, 0x90170110}, \
- {0x18, 0x411111f0}, \
-@@ -5235,15 +5247,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
- {0x1d, 0x40700001},
- {0x21, 0x02211050}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-- {0x12, 0x90a60140},
-- {0x13, 0x40000000},
-- {0x14, 0x90170110},
-- {0x19, 0x411111f0},
-- {0x1a, 0x411111f0},
-- {0x1b, 0x411111f0},
-- {0x1d, 0x40700001},
-- {0x1e, 0x411111f0},
-- {0x21, 0x02211020}),
-+ ALC256_STANDARD_PINS,
-+ {0x13, 0x40000000}),
-+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-+ ALC256_STANDARD_PINS,
-+ {0x13, 0x411111f0}),
- SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
- {0x12, 0x90a60130},
- {0x13, 0x40000000},
-@@ -5563,6 +5571,8 @@ static int patch_alc269(struct hda_codec *codec)
- break;
- case 0x10ec0256:
- spec->codec_variant = ALC269_TYPE_ALC256;
-+ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
-+ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
- break;
- }
-
-@@ -5576,8 +5586,8 @@ static int patch_alc269(struct hda_codec *codec)
- if (err < 0)
- goto error;
-
-- if (!spec->gen.no_analog && spec->gen.beep_nid)
-- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
-+ if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
-+ set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
-
- codec->patch_ops = alc_patch_ops;
- #ifdef CONFIG_PM
-diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
-index 7d3a6ac..e770ee6 100644
---- a/sound/soc/codecs/cs4271.c
-+++ b/sound/soc/codecs/cs4271.c
-@@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec)
- if (gpio_is_valid(cs4271->gpio_nreset)) {
- /* Reset codec */
- gpio_direction_output(cs4271->gpio_nreset, 0);
-- udelay(1);
-+ mdelay(1);
- gpio_set_value(cs4271->gpio_nreset, 1);
- /* Give the codec time to wake up */
-- udelay(1);
-+ mdelay(1);
- }
-
- ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
-diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
-index 474cae8..8c09e3f 100644
---- a/sound/soc/codecs/pcm512x.c
-+++ b/sound/soc/codecs/pcm512x.c
-@@ -304,9 +304,9 @@ static const struct soc_enum pcm512x_veds =
- static const struct snd_kcontrol_new pcm512x_controls[] = {
- SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
- PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
--SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
-+SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
- PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
--SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
-+SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
- PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
- SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
- PCM512x_RQMR_SHIFT, 1, 1),
-@@ -576,8 +576,8 @@ static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai,
-
- /* pllin_rate / P (or here, den) cannot be greater than 20 MHz */
- if (pllin_rate / den > 20000000 && num < 8) {
-- num *= 20000000 / (pllin_rate / den);
-- den *= 20000000 / (pllin_rate / den);
-+ num *= DIV_ROUND_UP(pllin_rate / den, 20000000);
-+ den *= DIV_ROUND_UP(pllin_rate / den, 20000000);
- }
- dev_dbg(dev, "num / den = %lu / %lu\n", num, den);
-
-diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
-index 31bb480..9e71c76 100644
---- a/sound/soc/codecs/wm8741.c
-+++ b/sound/soc/codecs/wm8741.c
-@@ -123,7 +123,7 @@ static struct {
- };
-
- static const unsigned int rates_11289[] = {
-- 44100, 88235,
-+ 44100, 88200,
- };
-
- static const struct snd_pcm_hw_constraint_list constraints_11289 = {
-@@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = {
- };
-
- static const unsigned int rates_16934[] = {
-- 44100, 88235,
-+ 44100, 88200,
- };
-
- static const struct snd_pcm_hw_constraint_list constraints_16934 = {
-@@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = {
- };
-
- static const unsigned int rates_22579[] = {
-- 44100, 88235, 1764000
-+ 44100, 88200, 176400
- };
-
- static const struct snd_pcm_hw_constraint_list constraints_22579 = {
-@@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = {
- };
-
- static const unsigned int rates_36864[] = {
-- 48000, 96000, 19200
-+ 48000, 96000, 192000
- };
-
- static const struct snd_pcm_hw_constraint_list constraints_36864 = {
-diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
-index b6bb594..8c2b9be 100644
---- a/sound/soc/davinci/davinci-evm.c
-+++ b/sound/soc/davinci/davinci-evm.c
-@@ -425,18 +425,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
- return ret;
- }
-
--static int davinci_evm_remove(struct platform_device *pdev)
--{
-- struct snd_soc_card *card = platform_get_drvdata(pdev);
--
-- snd_soc_unregister_card(card);
--
-- return 0;
--}
--
- static struct platform_driver davinci_evm_driver = {
- .probe = davinci_evm_probe,
-- .remove = davinci_evm_remove,
- .driver = {
- .name = "davinci_evm",
- .pm = &snd_soc_pm_ops,
-diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
-index 9a28365..32631a8 100644
---- a/sound/usb/quirks.c
-+++ b/sound/usb/quirks.c
-@@ -1115,6 +1115,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
- {
- /* devices which do not support reading the sample rate. */
- switch (chip->usb_id) {
-+ case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
- case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
- case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
- return true;
-diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
-index dcc6652..deb3569 100644
---- a/tools/lib/traceevent/kbuffer-parse.c
-+++ b/tools/lib/traceevent/kbuffer-parse.c
-@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
- switch (type_len) {
- case KBUFFER_TYPE_PADDING:
- *length = read_4(kbuf, data);
-- data += *length;
- break;
-
- case KBUFFER_TYPE_TIME_EXTEND:
-diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
-index cc22408..0884d31 100644
---- a/tools/perf/config/Makefile
-+++ b/tools/perf/config/Makefile
-@@ -651,7 +651,7 @@ ifeq (${IS_64_BIT}, 1)
- NO_PERF_READ_VDSO32 := 1
- endif
- endif
-- ifneq (${IS_X86_64}, 1)
-+ ifneq ($(ARCH), x86)
- NO_PERF_READ_VDSOX32 := 1
- endif
- ifndef NO_PERF_READ_VDSOX32
-@@ -699,7 +699,7 @@ sysconfdir = $(prefix)/etc
- ETC_PERFCONFIG = etc/perfconfig
- endif
- ifndef lib
--ifeq ($(IS_X86_64),1)
-+ifeq ($(ARCH)$(IS_64_BIT), x861)
- lib = lib64
- else
- lib = lib
-diff --git a/tools/perf/tests/make b/tools/perf/tests/make
-index 75709d2..bff8532 100644
---- a/tools/perf/tests/make
-+++ b/tools/perf/tests/make
-@@ -5,7 +5,7 @@ include config/Makefile.arch
-
- # FIXME looks like x86 is the only arch running tests ;-)
- # we need some IS_(32/64) flag to make this generic
--ifeq ($(IS_X86_64),1)
-+ifeq ($(ARCH)$(IS_64_BIT), x861)
- lib = lib64
- else
- lib = lib
-diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
-index 6da965b..85b5238 100644
---- a/tools/perf/util/cloexec.c
-+++ b/tools/perf/util/cloexec.c
-@@ -7,6 +7,12 @@
-
- static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
-
-+int __weak sched_getcpu(void)
-+{
-+ errno = ENOSYS;
-+ return -1;
-+}
-+
- static int perf_flag_probe(void)
- {
- /* use 'safest' configuration as used in perf_evsel__fallback() */
-diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
-index 94a5a7d..68888c2 100644
---- a/tools/perf/util/cloexec.h
-+++ b/tools/perf/util/cloexec.h
-@@ -3,4 +3,10 @@
-
- unsigned long perf_event_open_cloexec_flag(void);
-
-+#ifdef __GLIBC_PREREQ
-+#if !__GLIBC_PREREQ(2, 6)
-+extern int sched_getcpu(void) __THROW;
-+#endif
-+#endif
-+
- #endif /* __PERF_CLOEXEC_H */
-diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
-index 33b7a2a..9bdf007 100644
---- a/tools/perf/util/symbol-elf.c
-+++ b/tools/perf/util/symbol-elf.c
-@@ -74,6 +74,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
- return GELF_ST_TYPE(sym->st_info);
- }
-
-+#ifndef STT_GNU_IFUNC
-+#define STT_GNU_IFUNC 10
-+#endif
-+
- static inline int elf_sym__is_function(const GElf_Sym *sym)
- {
- return (elf_sym__type(sym) == STT_FUNC ||
-diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
-index d1b3a36..4039854 100644
---- a/tools/power/x86/turbostat/Makefile
-+++ b/tools/power/x86/turbostat/Makefile
-@@ -1,8 +1,12 @@
- CC = $(CROSS_COMPILE)gcc
--BUILD_OUTPUT := $(PWD)
-+BUILD_OUTPUT := $(CURDIR)
- PREFIX := /usr
- DESTDIR :=
-
-+ifeq ("$(origin O)", "command line")
-+ BUILD_OUTPUT := $(O)
-+endif
-+
- turbostat : turbostat.c
- CFLAGS += -Wall
- CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
-diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
-index c9f60f5..e5abe7c 100644
---- a/virt/kvm/arm/vgic.c
-+++ b/virt/kvm/arm/vgic.c
-@@ -1371,6 +1371,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
- goto out;
- }
-
-+ if (irq_num >= kvm->arch.vgic.nr_irqs)
-+ return -EINVAL;
-+
- vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
- if (vcpu_id >= 0) {
- /* kick the specified vcpu */
-diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index cc6a25d..f8f3f5f 100644
---- a/virt/kvm/kvm_main.c
-+++ b/virt/kvm/kvm_main.c
-@@ -1653,8 +1653,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- ghc->generation = slots->generation;
- ghc->len = len;
- ghc->memslot = gfn_to_memslot(kvm, start_gfn);
-- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
-- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
-+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
-+ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
- ghc->hva += offset;
- } else {
- /*
diff --git a/1002_linux-4.0.3.patch b/1002_linux-4.0.3.patch
deleted file mode 100644
index d137bf26..00000000
--- a/1002_linux-4.0.3.patch
+++ /dev/null
@@ -1,2827 +0,0 @@
-diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index bfcb1a62a7b4..4d68ec841304 100644
---- a/Documentation/kernel-parameters.txt
-+++ b/Documentation/kernel-parameters.txt
-@@ -3746,6 +3746,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
- READ_CAPACITY_16 command);
- f = NO_REPORT_OPCODES (don't use report opcodes
- command, uas only);
-+ g = MAX_SECTORS_240 (don't transfer more than
-+ 240 sectors at a time, uas only);
- h = CAPACITY_HEURISTICS (decrease the
- reported device capacity by one
- sector if the number is odd);
-diff --git a/Makefile b/Makefile
-index 0649a6011a76..dc9f43a019d6 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 0
--SUBLEVEL = 2
-+SUBLEVEL = 3
- EXTRAVERSION =
- NAME = Hurr durr I'ma sheep
-
-diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
-index ef7d112f5ce0..b0bd4e5fd5cf 100644
---- a/arch/arm64/mm/dma-mapping.c
-+++ b/arch/arm64/mm/dma-mapping.c
-@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
-
- *ret_page = phys_to_page(phys);
- ptr = (void *)val;
-- if (flags & __GFP_ZERO)
-- memset(ptr, 0, size);
-+ memset(ptr, 0, size);
- }
-
- return ptr;
-@@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
- struct page *page;
- void *addr;
-
-- size = PAGE_ALIGN(size);
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size));
- if (!page)
-@@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
-
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- addr = page_address(page);
-- if (flags & __GFP_ZERO)
-- memset(addr, 0, size);
-+ memset(addr, 0, size);
- return addr;
- } else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
-@@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size,
- {
- void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
-
-+ size = PAGE_ALIGN(size);
-+
- if (!is_device_dma_coherent(dev)) {
- if (__free_from_pool(vaddr, size))
- return;
-diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index c7a16904cd03..1a313c468d65 100644
---- a/arch/mips/Kconfig
-+++ b/arch/mips/Kconfig
-@@ -2072,7 +2072,7 @@ config MIPSR2_TO_R6_EMULATOR
- help
- Choose this option if you want to run non-R6 MIPS userland code.
- Even if you say 'Y' here, the emulator will still be disabled by
-- default. You can enable it using the 'mipsr2emul' kernel option.
-+ default. You can enable it using the 'mipsr2emu' kernel option.
- The only reason this is a build-time option is to save ~14K from the
- final kernel image.
- comment "MIPS R2-to-R6 emulator is only available for UP kernels"
-@@ -2142,7 +2142,7 @@ config MIPS_CMP
-
- config MIPS_CPS
- bool "MIPS Coherent Processing System support"
-- depends on SYS_SUPPORTS_MIPS_CPS
-+ depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
- select MIPS_CM
- select MIPS_CPC
- select MIPS_CPS_PM if HOTPLUG_CPU
-diff --git a/arch/mips/Makefile b/arch/mips/Makefile
-index 8f57fc72d62c..1b4dab1e6ab8 100644
---- a/arch/mips/Makefile
-+++ b/arch/mips/Makefile
-@@ -197,11 +197,17 @@ endif
- # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
- # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
- # been fixed properly.
--mips-cflags := "$(cflags-y)"
--cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
--cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips)
-+mips-cflags := $(cflags-y)
-+ifeq ($(CONFIG_CPU_HAS_SMARTMIPS),y)
-+smartmips-ase := $(call cc-option-yn,$(mips-cflags) -msmartmips)
-+cflags-$(smartmips-ase) += -msmartmips -Wa,--no-warn
-+endif
-+ifeq ($(CONFIG_CPU_MICROMIPS),y)
-+micromips-ase := $(call cc-option-yn,$(mips-cflags) -mmicromips)
-+cflags-$(micromips-ase) += -mmicromips
-+endif
- ifeq ($(CONFIG_CPU_HAS_MSA),y)
--toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
-+toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
- cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
- endif
-
-diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
-index b3ae068ca4fa..3fd369d74444 100644
---- a/arch/mips/bcm47xx/board.c
-+++ b/arch/mips/bcm47xx/board.c
-@@ -247,8 +247,8 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
- }
-
- if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 &&
-- bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) {
-- for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
-+ bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) {
-+ for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) {
- if (!strstarts(buf1, e2->value1) &&
- !strcmp(buf2, e2->value2))
- return &e2->board;
-diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
-index e1f27d653f60..7019e2967009 100644
---- a/arch/mips/bcm63xx/prom.c
-+++ b/arch/mips/bcm63xx/prom.c
-@@ -17,7 +17,6 @@
- #include <bcm63xx_cpu.h>
- #include <bcm63xx_io.h>
- #include <bcm63xx_regs.h>
--#include <bcm63xx_gpio.h>
-
- void __init prom_init(void)
- {
-@@ -53,9 +52,6 @@ void __init prom_init(void)
- reg &= ~mask;
- bcm_perf_writel(reg, PERF_CKCTL_REG);
-
-- /* register gpiochip */
-- bcm63xx_gpio_init();
--
- /* do low level board init */
- board_prom_init();
-
-diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
-index 6660c7ddf87b..240fb4ffa55c 100644
---- a/arch/mips/bcm63xx/setup.c
-+++ b/arch/mips/bcm63xx/setup.c
-@@ -20,6 +20,7 @@
- #include <bcm63xx_cpu.h>
- #include <bcm63xx_regs.h>
- #include <bcm63xx_io.h>
-+#include <bcm63xx_gpio.h>
-
- void bcm63xx_machine_halt(void)
- {
-@@ -160,6 +161,9 @@ void __init plat_mem_setup(void)
-
- int __init bcm63xx_register_devices(void)
- {
-+ /* register gpiochip */
-+ bcm63xx_gpio_init();
-+
- return board_register_devices();
- }
-
-diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
-index 7d8987818ccf..d8960d46417b 100644
---- a/arch/mips/cavium-octeon/dma-octeon.c
-+++ b/arch/mips/cavium-octeon/dma-octeon.c
-@@ -306,7 +306,7 @@ void __init plat_swiotlb_setup(void)
- swiotlbsize = 64 * (1<<20);
- }
- #endif
--#ifdef CONFIG_USB_OCTEON_OHCI
-+#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
- /* OCTEON II ohci is only 32-bit. */
- if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
- swiotlbsize = 64 * (1<<20);
-diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
-index a42110e7edbc..a7f40820e567 100644
---- a/arch/mips/cavium-octeon/setup.c
-+++ b/arch/mips/cavium-octeon/setup.c
-@@ -413,7 +413,10 @@ static void octeon_restart(char *command)
-
- mb();
- while (1)
-- cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
-+ if (OCTEON_IS_OCTEON3())
-+ cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
-+ else
-+ cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
- }
-
-
-diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
-index e08381a37f8b..723229f4cf27 100644
---- a/arch/mips/include/asm/cacheflush.h
-+++ b/arch/mips/include/asm/cacheflush.h
-@@ -29,6 +29,20 @@
- * - flush_icache_all() flush the entire instruction cache
- * - flush_data_cache_page() flushes a page from the data cache
- */
-+
-+ /*
-+ * This flag is used to indicate that the page pointed to by a pte
-+ * is dirty and requires cleaning before returning it to the user.
-+ */
-+#define PG_dcache_dirty PG_arch_1
-+
-+#define Page_dcache_dirty(page) \
-+ test_bit(PG_dcache_dirty, &(page)->flags)
-+#define SetPageDcacheDirty(page) \
-+ set_bit(PG_dcache_dirty, &(page)->flags)
-+#define ClearPageDcacheDirty(page) \
-+ clear_bit(PG_dcache_dirty, &(page)->flags)
-+
- extern void (*flush_cache_all)(void);
- extern void (*__flush_cache_all)(void);
- extern void (*flush_cache_mm)(struct mm_struct *mm);
-@@ -37,13 +51,15 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
- unsigned long start, unsigned long end);
- extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
- extern void __flush_dcache_page(struct page *page);
-+extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
-
- #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
- static inline void flush_dcache_page(struct page *page)
- {
-- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
-+ if (cpu_has_dc_aliases)
- __flush_dcache_page(page);
--
-+ else if (!cpu_has_ic_fills_f_dc)
-+ SetPageDcacheDirty(page);
- }
-
- #define flush_dcache_mmap_lock(mapping) do { } while (0)
-@@ -61,6 +77,11 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
- static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
- {
-+ if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
-+ Page_dcache_dirty(page)) {
-+ __flush_icache_page(vma, page);
-+ ClearPageDcacheDirty(page);
-+ }
- }
-
- extern void (*flush_icache_range)(unsigned long start, unsigned long end);
-@@ -95,19 +116,6 @@ extern void (*flush_icache_all)(void);
- extern void (*local_flush_data_cache_page)(void * addr);
- extern void (*flush_data_cache_page)(unsigned long addr);
-
--/*
-- * This flag is used to indicate that the page pointed to by a pte
-- * is dirty and requires cleaning before returning it to the user.
-- */
--#define PG_dcache_dirty PG_arch_1
--
--#define Page_dcache_dirty(page) \
-- test_bit(PG_dcache_dirty, &(page)->flags)
--#define SetPageDcacheDirty(page) \
-- set_bit(PG_dcache_dirty, &(page)->flags)
--#define ClearPageDcacheDirty(page) \
-- clear_bit(PG_dcache_dirty, &(page)->flags)
--
- /* Run kernel code uncached, useful for cache probing functions. */
- unsigned long run_uncached(void *func);
-
-diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
-index 0d8208de9a3f..345fd7f80730 100644
---- a/arch/mips/include/asm/cpu-features.h
-+++ b/arch/mips/include/asm/cpu-features.h
-@@ -235,8 +235,39 @@
- /* MIPSR2 and MIPSR6 have a lot of similarities */
- #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6)
-
-+/*
-+ * cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor
-+ *
-+ * Returns non-zero value if the current processor implementation requires
-+ * an IHB instruction to deal with an instruction hazard as per MIPS R2
-+ * architecture specification, zero otherwise.
-+ */
- #ifndef cpu_has_mips_r2_exec_hazard
--#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
-+#define cpu_has_mips_r2_exec_hazard \
-+({ \
-+ int __res; \
-+ \
-+ switch (current_cpu_type()) { \
-+ case CPU_M14KC: \
-+ case CPU_74K: \
-+ case CPU_1074K: \
-+ case CPU_PROAPTIV: \
-+ case CPU_P5600: \
-+ case CPU_M5150: \
-+ case CPU_QEMU_GENERIC: \
-+ case CPU_CAVIUM_OCTEON: \
-+ case CPU_CAVIUM_OCTEON_PLUS: \
-+ case CPU_CAVIUM_OCTEON2: \
-+ case CPU_CAVIUM_OCTEON3: \
-+ __res = 0; \
-+ break; \
-+ \
-+ default: \
-+ __res = 1; \
-+ } \
-+ \
-+ __res; \
-+})
- #endif
-
- /*
-diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
-index 535f196ffe02..694925a26924 100644
---- a/arch/mips/include/asm/elf.h
-+++ b/arch/mips/include/asm/elf.h
-@@ -294,6 +294,9 @@ do { \
- if (personality(current->personality) != PER_LINUX) \
- set_personality(PER_LINUX); \
- \
-+ clear_thread_flag(TIF_HYBRID_FPREGS); \
-+ set_thread_flag(TIF_32BIT_FPREGS); \
-+ \
- mips_set_personality_fp(state); \
- \
- current->thread.abi = &mips_abi; \
-@@ -319,6 +322,8 @@ do { \
- do { \
- set_thread_flag(TIF_32BIT_REGS); \
- set_thread_flag(TIF_32BIT_ADDR); \
-+ clear_thread_flag(TIF_HYBRID_FPREGS); \
-+ set_thread_flag(TIF_32BIT_FPREGS); \
- \
- mips_set_personality_fp(state); \
- \
-diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
-index fa1f3cfbae8d..d68e685cde60 100644
---- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
-+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
-@@ -50,7 +50,6 @@
- #define cpu_has_mips32r2 0
- #define cpu_has_mips64r1 0
- #define cpu_has_mips64r2 1
--#define cpu_has_mips_r2_exec_hazard 0
- #define cpu_has_dsp 0
- #define cpu_has_dsp2 0
- #define cpu_has_mipsmt 0
-diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
-index 33db1c806b01..774bb45834cb 100644
---- a/arch/mips/include/asm/octeon/cvmx.h
-+++ b/arch/mips/include/asm/octeon/cvmx.h
-@@ -436,14 +436,6 @@ static inline uint64_t cvmx_get_cycle_global(void)
-
- /***************************************************************************/
-
--static inline void cvmx_reset_octeon(void)
--{
-- union cvmx_ciu_soft_rst ciu_soft_rst;
-- ciu_soft_rst.u64 = 0;
-- ciu_soft_rst.s.soft_rst = 1;
-- cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
--}
--
- /* Return the number of cores available in the chip */
- static inline uint32_t cvmx_octeon_num_cores(void)
- {
-diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
-index 64ba56a02843..1884609741a8 100644
---- a/arch/mips/include/asm/octeon/pci-octeon.h
-+++ b/arch/mips/include/asm/octeon/pci-octeon.h
-@@ -11,9 +11,6 @@
-
- #include <linux/pci.h>
-
--/* Some PCI cards require delays when accessing config space. */
--#define PCI_CONFIG_SPACE_DELAY 10000
--
- /*
- * The physical memory base mapped by BAR1. 256MB at the end of the
- * first 4GB.
-diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index bef782c4a44b..f8f809fd6c6d 100644
---- a/arch/mips/include/asm/pgtable.h
-+++ b/arch/mips/include/asm/pgtable.h
-@@ -127,10 +127,6 @@ do { \
- } \
- } while(0)
-
--
--extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
-- pte_t pteval);
--
- #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
-
- #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
-@@ -154,6 +150,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
- }
- }
- }
-+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
-
- static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
-@@ -192,6 +189,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
- }
- #endif
- }
-+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
-
- static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
-@@ -407,12 +405,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-
- extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
- pte_t pte);
-+extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
-+ pte_t pte);
-
- static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
- {
- pte_t pte = *ptep;
- __update_tlb(vma, address, pte);
-+ __update_cache(vma, address, pte);
- }
-
- static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
-diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
-index 1b22d2da88a1..38902bf97adc 100644
---- a/arch/mips/include/asm/r4kcache.h
-+++ b/arch/mips/include/asm/r4kcache.h
-@@ -12,6 +12,8 @@
- #ifndef _ASM_R4KCACHE_H
- #define _ASM_R4KCACHE_H
-
-+#include <linux/stringify.h>
-+
- #include <asm/asm.h>
- #include <asm/cacheops.h>
- #include <asm/compiler.h>
-@@ -344,7 +346,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
- " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
- " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
-- " addiu $1, $0, 0x100 \n" \
-+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
- " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
-@@ -368,17 +370,17 @@ static inline void invalidate_tcache_page(unsigned long addr)
- " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
- " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
-- " addiu $1, $1, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
-- " addiu $1, $1, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
-@@ -396,25 +398,25 @@ static inline void invalidate_tcache_page(unsigned long addr)
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
- " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " .set pop\n" \
-@@ -429,39 +431,38 @@ static inline void invalidate_tcache_page(unsigned long addr)
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
-- " addiu $1, %0, 0x100\n" \
-+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
-+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
-+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
-diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
-index b4548690ade9..1fca2e0793dc 100644
---- a/arch/mips/include/asm/spinlock.h
-+++ b/arch/mips/include/asm/spinlock.h
-@@ -263,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
- if (R10000_LLSC_WAR) {
- __asm__ __volatile__(
- "1: ll %1, %2 # arch_read_unlock \n"
-- " addiu %1, 1 \n"
-+ " addiu %1, -1 \n"
- " sc %1, %0 \n"
- " beqzl %1, 1b \n"
- : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
-diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
-index af41ba6db960..7791840cf22c 100644
---- a/arch/mips/kernel/entry.S
-+++ b/arch/mips/kernel/entry.S
-@@ -10,6 +10,7 @@
-
- #include <asm/asm.h>
- #include <asm/asmmacro.h>
-+#include <asm/compiler.h>
- #include <asm/regdef.h>
- #include <asm/mipsregs.h>
- #include <asm/stackframe.h>
-@@ -185,7 +186,7 @@ syscall_exit_work:
- * For C code use the inline version named instruction_hazard().
- */
- LEAF(mips_ihb)
-- .set mips32r2
-+ .set MIPS_ISA_LEVEL_RAW
- jr.hb ra
- nop
- END(mips_ihb)
-diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
-index bed7590e475f..d5589bedd0a4 100644
---- a/arch/mips/kernel/smp-cps.c
-+++ b/arch/mips/kernel/smp-cps.c
-@@ -88,6 +88,12 @@ static void __init cps_smp_setup(void)
-
- /* Make core 0 coherent with everything */
- write_gcr_cl_coherence(0xff);
-+
-+#ifdef CONFIG_MIPS_MT_FPAFF
-+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
-+ if (cpu_has_fpu)
-+ cpu_set(0, mt_fpu_cpumask);
-+#endif /* CONFIG_MIPS_MT_FPAFF */
- }
-
- static void __init cps_prepare_cpus(unsigned int max_cpus)
-diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
-index 7e3ea7766822..77d96db8253c 100644
---- a/arch/mips/mm/cache.c
-+++ b/arch/mips/mm/cache.c
-@@ -119,36 +119,37 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
-
- EXPORT_SYMBOL(__flush_anon_page);
-
--static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
-+void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
-+{
-+ unsigned long addr;
-+
-+ if (PageHighMem(page))
-+ return;
-+
-+ addr = (unsigned long) page_address(page);
-+ flush_data_cache_page(addr);
-+}
-+EXPORT_SYMBOL_GPL(__flush_icache_page);
-+
-+void __update_cache(struct vm_area_struct *vma, unsigned long address,
-+ pte_t pte)
- {
- struct page *page;
-- unsigned long pfn = pte_pfn(pteval);
-+ unsigned long pfn, addr;
-+ int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
-
-+ pfn = pte_pfn(pte);
- if (unlikely(!pfn_valid(pfn)))
- return;
--
- page = pfn_to_page(pfn);
- if (page_mapping(page) && Page_dcache_dirty(page)) {
-- unsigned long page_addr = (unsigned long) page_address(page);
--
-- if (!cpu_has_ic_fills_f_dc ||
-- pages_do_alias(page_addr, address & PAGE_MASK))
-- flush_data_cache_page(page_addr);
-+ addr = (unsigned long) page_address(page);
-+ if (exec || pages_do_alias(addr, address & PAGE_MASK))
-+ flush_data_cache_page(addr);
- ClearPageDcacheDirty(page);
- }
- }
-
--void set_pte_at(struct mm_struct *mm, unsigned long addr,
-- pte_t *ptep, pte_t pteval)
--{
-- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
-- if (pte_present(pteval))
-- mips_flush_dcache_from_pte(pteval, addr);
-- }
--
-- set_pte(ptep, pteval);
--}
--
- unsigned long _page_cachable_default;
- EXPORT_SYMBOL(_page_cachable_default);
-
-diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
-index d75ff73a2012..a79fd0af0224 100644
---- a/arch/mips/mm/tlbex.c
-+++ b/arch/mips/mm/tlbex.c
-@@ -501,26 +501,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
- case tlb_indexed: tlbw = uasm_i_tlbwi; break;
- }
-
-- if (cpu_has_mips_r2_exec_hazard) {
-- /*
-- * The architecture spec says an ehb is required here,
-- * but a number of cores do not have the hazard and
-- * using an ehb causes an expensive pipeline stall.
-- */
-- switch (current_cpu_type()) {
-- case CPU_M14KC:
-- case CPU_74K:
-- case CPU_1074K:
-- case CPU_PROAPTIV:
-- case CPU_P5600:
-- case CPU_M5150:
-- case CPU_QEMU_GENERIC:
-- break;
--
-- default:
-+ if (cpu_has_mips_r2_r6) {
-+ if (cpu_has_mips_r2_exec_hazard)
- uasm_i_ehb(p);
-- break;
-- }
- tlbw(p);
- return;
- }
-diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
-index c83dbf3689e2..7b066a44e679 100644
---- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c
-+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
-@@ -203,6 +203,7 @@ static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
- static void config_sata_phy(u64 regbase)
- {
- u32 port, i, reg;
-+ u8 val;
-
- for (port = 0; port < 2; port++) {
- for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
-@@ -210,6 +211,18 @@ static void config_sata_phy(u64 regbase)
-
- for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
- write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
-+
-+ /* Fix for PHY link up failures at lower temperatures */
-+ write_phy_reg(regbase, 0x800F, port, 0x1f);
-+
-+ val = read_phy_reg(regbase, 0x0029, port);
-+ write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
-+
-+ val = read_phy_reg(regbase, 0x0056, port);
-+ write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
-+
-+ val = read_phy_reg(regbase, 0x0018, port);
-+ write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
- }
- }
-
-diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
-index 300591c6278d..2eda01e6e08f 100644
---- a/arch/mips/pci/Makefile
-+++ b/arch/mips/pci/Makefile
-@@ -43,7 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
- obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
- obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
- obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
--obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o
-+obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o
- obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
- obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
- obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
-diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
-index a04af55d89f1..c258cd406fbb 100644
---- a/arch/mips/pci/pci-octeon.c
-+++ b/arch/mips/pci/pci-octeon.c
-@@ -214,6 +214,8 @@ const char *octeon_get_pci_interrupts(void)
- return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
- case CVMX_BOARD_TYPE_BBGW_REF:
- return "AABCD";
-+ case CVMX_BOARD_TYPE_CUST_DSR1000N:
-+ return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
- case CVMX_BOARD_TYPE_THUNDER:
- case CVMX_BOARD_TYPE_EBH3000:
- default:
-@@ -271,9 +273,6 @@ static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
- pci_addr.s.func = devfn & 0x7;
- pci_addr.s.reg = reg;
-
--#if PCI_CONFIG_SPACE_DELAY
-- udelay(PCI_CONFIG_SPACE_DELAY);
--#endif
- switch (size) {
- case 4:
- *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
-@@ -308,9 +307,6 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
- pci_addr.s.func = devfn & 0x7;
- pci_addr.s.reg = reg;
-
--#if PCI_CONFIG_SPACE_DELAY
-- udelay(PCI_CONFIG_SPACE_DELAY);
--#endif
- switch (size) {
- case 4:
- cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
-diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
-index 1bb0b2bf8d6e..99f3db4f0a9b 100644
---- a/arch/mips/pci/pcie-octeon.c
-+++ b/arch/mips/pci/pcie-octeon.c
-@@ -1762,14 +1762,6 @@ static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
--#if PCI_CONFIG_SPACE_DELAY
-- /*
-- * Delay on writes so that devices have time to come up. Some
-- * bridges need this to allow time for the secondary busses to
-- * work
-- */
-- udelay(PCI_CONFIG_SPACE_DELAY);
--#endif
- return PCIBIOS_SUCCESSFUL;
- }
-
-diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
-index b1c52ca580f9..e9bc8c96174e 100644
---- a/arch/mips/ralink/Kconfig
-+++ b/arch/mips/ralink/Kconfig
-@@ -7,6 +7,11 @@ config CLKEVT_RT3352
- select CLKSRC_OF
- select CLKSRC_MMIO
-
-+config RALINK_ILL_ACC
-+ bool
-+ depends on SOC_RT305X
-+ default y
-+
- choice
- prompt "Ralink SoC selection"
- default SOC_RT305X
-diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
-index a7a3edd28beb..f23179e84128 100644
---- a/drivers/acpi/sbs.c
-+++ b/drivers/acpi/sbs.c
-@@ -670,7 +670,7 @@ static int acpi_sbs_add(struct acpi_device *device)
- if (!sbs_manager_broken) {
- result = acpi_manager_get_info(sbs);
- if (!result) {
-- sbs->manager_present = 0;
-+ sbs->manager_present = 1;
- for (id = 0; id < MAX_SBS_BAT; ++id)
- if ((sbs->batteries_supported & (1 << id)))
- acpi_battery_add(sbs, id);
-diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
-index b40af3203089..b67066d0d9a6 100644
---- a/drivers/block/rbd.c
-+++ b/drivers/block/rbd.c
-@@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
- result, xferred);
- if (!img_request->result)
- img_request->result = result;
-+ /*
-+ * Need to end I/O on the entire obj_request worth of
-+ * bytes in case of error.
-+ */
-+ xferred = obj_request->length;
- }
-
- /* Image object requests don't own their page array */
-diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
-index 9bd56116fd5a..1afc0b419da2 100644
---- a/drivers/gpu/drm/radeon/atombios_crtc.c
-+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
-@@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
- else
- radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
-
-+ /* if there is no audio, set MINM_OVER_MAXP */
-+ if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
-+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
- if (rdev->family < CHIP_RV770)
- radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
- /* use frac fb div on APUs */
-diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
-index c39c1d0d9d4e..f20eb32406d1 100644
---- a/drivers/gpu/drm/radeon/atombios_encoders.c
-+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
-@@ -1729,17 +1729,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- int encoder_mode = atombios_get_encoder_mode(encoder);
-
- DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
- radeon_encoder->encoder_id, mode, radeon_encoder->devices,
- radeon_encoder->active_device);
-
-- if (connector && (radeon_audio != 0) &&
-+ if ((radeon_audio != 0) &&
- ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
-- (ENCODER_MODE_IS_DP(encoder_mode) &&
-- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
-+ ENCODER_MODE_IS_DP(encoder_mode)))
- radeon_audio_dpms(encoder, mode);
-
- switch (radeon_encoder->encoder_id) {
-diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
-index 3adc2afe32aa..68fd9fc677e3 100644
---- a/drivers/gpu/drm/radeon/dce6_afmt.c
-+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
-@@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
- WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
- }
- }
--
--void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
--{
-- struct drm_device *dev = encoder->dev;
-- struct radeon_device *rdev = dev->dev_private;
-- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
--
-- if (!dig || !dig->afmt)
-- return;
--
-- if (enable) {
-- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
-- EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
-- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
-- EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
-- EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
-- EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
-- EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
-- } else {
-- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
-- }
--
-- dig->afmt->enabled = enable;
--}
-diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
-index c18d4ecbd95d..0926739c9fa7 100644
---- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
-+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
-@@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
- WREG32(AFMT_AVI_INFO3 + offset,
- frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
-
-- WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
-- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
-- HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
--
- WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
-- HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
-- ~HDMI_AVI_INFO_LINE_MASK);
-+ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
-+ ~HDMI_AVI_INFO_LINE_MASK);
- }
-
- void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
-@@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
- WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
- AFMT_AUDIO_CHANNEL_ENABLE(0xff));
-
-+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
-+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
-+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-+
- /* allow 60958 channel status and send audio packets fields to be updated */
-- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
-- AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
-+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
-+ AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
- }
-
-
-@@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
- return;
-
- if (enable) {
-- WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
-- HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
--
-- WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
-- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
-- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
-- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
-- HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
-- HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
-+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
-+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
-+ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
-+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
-+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
-+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
-+ AFMT_AUDIO_SAMPLE_SEND);
-+ } else {
-+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
-+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
-+ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
-+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
-+ ~AFMT_AUDIO_SAMPLE_SEND);
-+ }
- } else {
-+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
-+ ~AFMT_AUDIO_SAMPLE_SEND);
- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
- }
-
-@@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
- if (!dig || !dig->afmt)
- return;
-
-- if (enable) {
-+ if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector;
- uint32_t val;
-
-+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
-+ AFMT_AUDIO_SAMPLE_SEND);
-+
- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
- EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
-
-- if (radeon_connector->con_priv) {
-+ if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
- dig_connector = radeon_connector->con_priv;
- val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
- val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
-@@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
- EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
- } else {
- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
-+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
-+ ~AFMT_AUDIO_SAMPLE_SEND);
- }
-
- dig->afmt->enabled = enable;
-diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
-index dd6606b8e23c..e85894ade95c 100644
---- a/drivers/gpu/drm/radeon/r600_hdmi.c
-+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
-@@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
- WREG32(HDMI0_AVI_INFO3 + offset,
- frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
-
-+ WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
-+ HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
-+
- WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
-- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
-- HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
-+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
-+ HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
-
-- WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
-- HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
- }
-
- /*
-diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
-index b21ef69a34ac..b7d33a13db9f 100644
---- a/drivers/gpu/drm/radeon/radeon_audio.c
-+++ b/drivers/gpu/drm/radeon/radeon_audio.c
-@@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
- void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
- void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
- void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
--void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
-
- static const u32 pin_offsets[7] =
- {
-@@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
- .set_avi_packet = evergreen_set_avi_packet,
- .set_audio_packet = dce4_set_audio_packet,
- .mode_set = radeon_audio_dp_mode_set,
-- .dpms = dce6_dp_enable,
-+ .dpms = evergreen_dp_enable,
- };
-
- static void radeon_audio_interface_init(struct radeon_device *rdev)
-@@ -461,30 +460,33 @@ void radeon_audio_detect(struct drm_connector *connector,
- if (!connector || !connector->encoder)
- return;
-
-+ if (!radeon_encoder_is_digital(connector->encoder))
-+ return;
-+
- rdev = connector->encoder->dev->dev_private;
- radeon_encoder = to_radeon_encoder(connector->encoder);
- dig = radeon_encoder->enc_priv;
-
-- if (status == connector_status_connected) {
-- struct radeon_connector *radeon_connector;
-- int sink_type;
--
-- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-- radeon_encoder->audio = NULL;
-- return;
-- }
-+ if (!dig->afmt)
-+ return;
-
-- radeon_connector = to_radeon_connector(connector);
-- sink_type = radeon_dp_getsinktype(radeon_connector);
-+ if (status == connector_status_connected) {
-+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
-- sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
-+ radeon_dp_getsinktype(radeon_connector) ==
-+ CONNECTOR_OBJECT_ID_DISPLAYPORT)
- radeon_encoder->audio = rdev->audio.dp_funcs;
- else
- radeon_encoder->audio = rdev->audio.hdmi_funcs;
-
- dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
-+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
-+ } else {
-+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
-+ dig->afmt->pin = NULL;
-+ }
- } else {
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
- dig->afmt->pin = NULL;
-diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
-index 27def67cb6be..27973e3faf0e 100644
---- a/drivers/gpu/drm/radeon/radeon_connectors.c
-+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
-@@ -1333,8 +1333,10 @@ out:
- /* updated in get modes as well since we need to know if it's analog or digital */
- radeon_connector_update_scratch_regs(connector, ret);
-
-- if (radeon_audio != 0)
-+ if (radeon_audio != 0) {
-+ radeon_connector_get_edid(connector);
- radeon_audio_detect(connector, ret);
-+ }
-
- exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
-@@ -1659,8 +1661,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
-
- radeon_connector_update_scratch_regs(connector, ret);
-
-- if (radeon_audio != 0)
-+ if (radeon_audio != 0) {
-+ radeon_connector_get_edid(connector);
- radeon_audio_detect(connector, ret);
-+ }
-
- out:
- pm_runtime_mark_last_busy(connector->dev->dev);
-diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
-index 4d0f96cc3da4..ab39b85e0f76 100644
---- a/drivers/gpu/drm/radeon/radeon_cs.c
-+++ b/drivers/gpu/drm/radeon/radeon_cs.c
-@@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
- p->dma_reloc_idx = 0;
- /* FIXME: we assume that each relocs use 4 dwords */
- p->nrelocs = chunk->length_dw / 4;
-- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
-+ p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
- if (p->relocs == NULL) {
- return -ENOMEM;
- }
-@@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
- }
- }
- kfree(parser->track);
-- kfree(parser->relocs);
-+ drm_free_large(parser->relocs);
- drm_free_large(parser->vm_bos);
- for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
-diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
-index 2a5a4a9e772d..de42fc4a22b8 100644
---- a/drivers/gpu/drm/radeon/radeon_vm.c
-+++ b/drivers/gpu/drm/radeon/radeon_vm.c
-@@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
- }
-
- mutex_lock(&vm->mutex);
-+ soffset /= RADEON_GPU_PAGE_SIZE;
-+ eoffset /= RADEON_GPU_PAGE_SIZE;
-+ if (soffset || eoffset) {
-+ struct interval_tree_node *it;
-+ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
-+ if (it && it != &bo_va->it) {
-+ struct radeon_bo_va *tmp;
-+ tmp = container_of(it, struct radeon_bo_va, it);
-+ /* bo and tmp overlap, invalid offset */
-+ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
-+ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
-+ soffset, tmp->bo, tmp->it.start, tmp->it.last);
-+ mutex_unlock(&vm->mutex);
-+ return -EINVAL;
-+ }
-+ }
-+
- if (bo_va->it.start || bo_va->it.last) {
- if (bo_va->addr) {
- /* add a clone of the bo_va to clear the old address */
-@@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
- spin_lock(&vm->status_lock);
- list_add(&tmp->vm_status, &vm->freed);
- spin_unlock(&vm->status_lock);
-+
-+ bo_va->addr = 0;
- }
-
- interval_tree_remove(&bo_va->it, &vm->va);
-@@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
- bo_va->it.last = 0;
- }
-
-- soffset /= RADEON_GPU_PAGE_SIZE;
-- eoffset /= RADEON_GPU_PAGE_SIZE;
- if (soffset || eoffset) {
-- struct interval_tree_node *it;
-- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
-- if (it) {
-- struct radeon_bo_va *tmp;
-- tmp = container_of(it, struct radeon_bo_va, it);
-- /* bo and tmp overlap, invalid offset */
-- dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
-- "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
-- soffset, tmp->bo, tmp->it.start, tmp->it.last);
-- mutex_unlock(&vm->mutex);
-- return -EINVAL;
-- }
- bo_va->it.start = soffset;
- bo_va->it.last = eoffset - 1;
- interval_tree_insert(&bo_va->it, &vm->va);
-@@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
- list_del(&bo_va->bo_list);
-
- mutex_lock(&vm->mutex);
-- interval_tree_remove(&bo_va->it, &vm->va);
-+ if (bo_va->it.start || bo_va->it.last)
-+ interval_tree_remove(&bo_va->it, &vm->va);
- spin_lock(&vm->status_lock);
- list_del(&bo_va->vm_status);
-
-diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
-index 7be11651b7e6..9dbb3154d559 100644
---- a/drivers/gpu/drm/radeon/si_dpm.c
-+++ b/drivers/gpu/drm/radeon/si_dpm.c
-@@ -2924,6 +2924,7 @@ struct si_dpm_quirk {
- static struct si_dpm_quirk si_dpm_quirk_list[] = {
- /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
- { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
-+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
- { 0, 0, 0, 0 },
- };
-
-diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
-index 3736f71bdec5..18def3022f6e 100644
---- a/drivers/hv/channel_mgmt.c
-+++ b/drivers/hv/channel_mgmt.c
-@@ -787,7 +787,7 @@ int vmbus_request_offers(void)
- {
- struct vmbus_channel_message_header *msg;
- struct vmbus_channel_msginfo *msginfo;
-- int ret, t;
-+ int ret;
-
- msginfo = kmalloc(sizeof(*msginfo) +
- sizeof(struct vmbus_channel_message_header),
-@@ -795,8 +795,6 @@ int vmbus_request_offers(void)
- if (!msginfo)
- return -ENOMEM;
-
-- init_completion(&msginfo->waitevent);
--
- msg = (struct vmbus_channel_message_header *)msginfo->msg;
-
- msg->msgtype = CHANNELMSG_REQUESTOFFERS;
-@@ -810,14 +808,6 @@ int vmbus_request_offers(void)
- goto cleanup;
- }
-
-- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
-- if (t == 0) {
-- ret = -ETIMEDOUT;
-- goto cleanup;
-- }
--
--
--
- cleanup:
- kfree(msginfo);
-
-diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
-index ee394dc68303..ec1ea8ba7aac 100644
---- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
-+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
-@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
- memoffset = (mtype * (edc_size * 1024 * 1024));
- else {
- mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
-- MA_EXT_MEMORY1_BAR_A));
-+ MA_EXT_MEMORY0_BAR_A));
- memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
- }
-
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-index 3485acf03014..2f1324bed7b3 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- mlx4_en_ptp_overflow_check(mdev);
-
-+ mlx4_en_recover_from_oom(priv);
- queue_delayed_work(mdev->workqueue, &priv->service_task,
- SERVICE_TASK_DELAY);
- }
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-index 698d60de1255..05ec5e151ded 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
- return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
- }
-
-+static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
-+{
-+ BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
-+ return ring->prod == ring->cons;
-+}
-+
- static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
- {
- *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
-@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
- ring->cons, ring->prod);
-
- /* Unmap and free Rx buffers */
-- BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
-- while (ring->cons != ring->prod) {
-+ while (!mlx4_en_is_ring_empty(ring)) {
- index = ring->cons & ring->size_mask;
- en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
- mlx4_en_free_rx_desc(priv, ring, index);
-@@ -491,6 +496,23 @@ err_allocator:
- return err;
- }
-
-+/* We recover from out of memory by scheduling our napi poll
-+ * function (mlx4_en_process_cq), which tries to allocate
-+ * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
-+ */
-+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
-+{
-+ int ring;
-+
-+ if (!priv->port_up)
-+ return;
-+
-+ for (ring = 0; ring < priv->rx_ring_num; ring++) {
-+ if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
-+ napi_reschedule(&priv->rx_cq[ring]->napi);
-+ }
-+}
-+
- void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride)
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-index 55f9f5c5344e..8c234ec1d8aa 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
- ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
- ring->queue_index = queue_index;
-
-- if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
-- cpumask_set_cpu(queue_index, &ring->affinity_mask);
-+ if (queue_index < priv->num_tx_rings_p_up)
-+ cpumask_set_cpu_local_first(queue_index,
-+ priv->mdev->dev->numa_node,
-+ &ring->affinity_mask);
-
- *pring = ring;
- return 0;
-@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
-
- err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
- &ring->qp, &ring->qp_state);
-- if (!user_prio && cpu_online(ring->queue_index))
-+ if (!cpumask_empty(&ring->affinity_mask))
- netif_set_xps_queue(priv->dev, &ring->affinity_mask,
- ring->queue_index);
-
-diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-index ebbe244e80dd..8687c8d54227 100644
---- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-@@ -790,6 +790,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
- void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring);
- void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
-+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
- int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node);
-diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
-index 7600639db4c4..add419d6ff34 100644
---- a/drivers/scsi/3w-9xxx.c
-+++ b/drivers/scsi/3w-9xxx.c
-@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
- static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
- static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
- static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
--static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
-
- /* Functions */
-
-@@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
- }
-
- /* Now complete the io */
-+ scsi_dma_unmap(cmd);
-+ cmd->scsi_done(cmd);
- tw_dev->state[request_id] = TW_S_COMPLETED;
- twa_free_request_id(tw_dev, request_id);
- tw_dev->posted_request_count--;
-- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
-- twa_unmap_scsi_data(tw_dev, request_id);
- }
-
- /* Check for valid status after each drain */
-@@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
- }
- } /* End twa_load_sgl() */
-
--/* This function will perform a pci-dma mapping for a scatter gather list */
--static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
--{
-- int use_sg;
-- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
--
-- use_sg = scsi_dma_map(cmd);
-- if (!use_sg)
-- return 0;
-- else if (use_sg < 0) {
-- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
-- return 0;
-- }
--
-- cmd->SCp.phase = TW_PHASE_SGLIST;
-- cmd->SCp.have_data_in = use_sg;
--
-- return use_sg;
--} /* End twa_map_scsi_sg_data() */
--
- /* This function will poll for a response interrupt of a request */
- static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
- {
-@@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
- (tw_dev->state[i] != TW_S_INITIAL) &&
- (tw_dev->state[i] != TW_S_COMPLETED)) {
- if (tw_dev->srb[i]) {
-- tw_dev->srb[i]->result = (DID_RESET << 16);
-- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
-- twa_unmap_scsi_data(tw_dev, i);
-+ struct scsi_cmnd *cmd = tw_dev->srb[i];
-+
-+ cmd->result = (DID_RESET << 16);
-+ scsi_dma_unmap(cmd);
-+ cmd->scsi_done(cmd);
- }
- }
- }
-@@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
- /* Save the scsi command for use by the ISR */
- tw_dev->srb[request_id] = SCpnt;
-
-- /* Initialize phase to zero */
-- SCpnt->SCp.phase = TW_PHASE_INITIAL;
--
- retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
- switch (retval) {
- case SCSI_MLQUEUE_HOST_BUSY:
-+ scsi_dma_unmap(SCpnt);
- twa_free_request_id(tw_dev, request_id);
-- twa_unmap_scsi_data(tw_dev, request_id);
- break;
- case 1:
-- tw_dev->state[request_id] = TW_S_COMPLETED;
-- twa_free_request_id(tw_dev, request_id);
-- twa_unmap_scsi_data(tw_dev, request_id);
- SCpnt->result = (DID_ERROR << 16);
-+ scsi_dma_unmap(SCpnt);
- done(SCpnt);
-+ tw_dev->state[request_id] = TW_S_COMPLETED;
-+ twa_free_request_id(tw_dev, request_id);
- retval = 0;
- }
- out:
-@@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
- command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
- command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
- } else {
-- sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
-- if (sg_count == 0)
-+ sg_count = scsi_dma_map(srb);
-+ if (sg_count < 0)
- goto out;
-
- scsi_for_each_sg(srb, sg, sg_count, i) {
-@@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
- return(table[index].text);
- } /* End twa_string_lookup() */
-
--/* This function will perform a pci-dma unmap */
--static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
--{
-- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
--
-- if (cmd->SCp.phase == TW_PHASE_SGLIST)
-- scsi_dma_unmap(cmd);
--} /* End twa_unmap_scsi_data() */
--
- /* This function gets called when a disk is coming on-line */
- static int twa_slave_configure(struct scsi_device *sdev)
- {
-diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
-index 040f7214e5b7..0fdc83cfa0e1 100644
---- a/drivers/scsi/3w-9xxx.h
-+++ b/drivers/scsi/3w-9xxx.h
-@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
- #define TW_CURRENT_DRIVER_BUILD 0
- #define TW_CURRENT_DRIVER_BRANCH 0
-
--/* Phase defines */
--#define TW_PHASE_INITIAL 0
--#define TW_PHASE_SINGLE 1
--#define TW_PHASE_SGLIST 2
--
- /* Misc defines */
- #define TW_9550SX_DRAIN_COMPLETED 0xFFFF
- #define TW_SECTOR_SIZE 512
-diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
-index 2361772d5909..f8374850f714 100644
---- a/drivers/scsi/3w-sas.c
-+++ b/drivers/scsi/3w-sas.c
-@@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
- return 0;
- } /* End twl_post_command_packet() */
-
--/* This function will perform a pci-dma mapping for a scatter gather list */
--static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
--{
-- int use_sg;
-- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
--
-- use_sg = scsi_dma_map(cmd);
-- if (!use_sg)
-- return 0;
-- else if (use_sg < 0) {
-- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
-- return 0;
-- }
--
-- cmd->SCp.phase = TW_PHASE_SGLIST;
-- cmd->SCp.have_data_in = use_sg;
--
-- return use_sg;
--} /* End twl_map_scsi_sg_data() */
--
- /* This function hands scsi cdb's to the firmware */
- static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
- {
-@@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
- if (!sglistarg) {
- /* Map sglist from scsi layer to cmd packet */
- if (scsi_sg_count(srb)) {
-- sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
-- if (sg_count == 0)
-+ sg_count = scsi_dma_map(srb);
-+ if (sg_count <= 0)
- goto out;
-
- scsi_for_each_sg(srb, sg, sg_count, i) {
-@@ -1102,15 +1082,6 @@ out:
- return retval;
- } /* End twl_initialize_device_extension() */
-
--/* This function will perform a pci-dma unmap */
--static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
--{
-- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
--
-- if (cmd->SCp.phase == TW_PHASE_SGLIST)
-- scsi_dma_unmap(cmd);
--} /* End twl_unmap_scsi_data() */
--
- /* This function will handle attention interrupts */
- static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
- {
-@@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
- }
-
- /* Now complete the io */
-+ scsi_dma_unmap(cmd);
-+ cmd->scsi_done(cmd);
- tw_dev->state[request_id] = TW_S_COMPLETED;
- twl_free_request_id(tw_dev, request_id);
- tw_dev->posted_request_count--;
-- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
-- twl_unmap_scsi_data(tw_dev, request_id);
- }
-
- /* Check for another response interrupt */
-@@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
- if ((tw_dev->state[i] != TW_S_FINISHED) &&
- (tw_dev->state[i] != TW_S_INITIAL) &&
- (tw_dev->state[i] != TW_S_COMPLETED)) {
-- if (tw_dev->srb[i]) {
-- tw_dev->srb[i]->result = (DID_RESET << 16);
-- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
-- twl_unmap_scsi_data(tw_dev, i);
-+ struct scsi_cmnd *cmd = tw_dev->srb[i];
-+
-+ if (cmd) {
-+ cmd->result = (DID_RESET << 16);
-+ scsi_dma_unmap(cmd);
-+ cmd->scsi_done(cmd);
- }
- }
- }
-@@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
- /* Save the scsi command for use by the ISR */
- tw_dev->srb[request_id] = SCpnt;
-
-- /* Initialize phase to zero */
-- SCpnt->SCp.phase = TW_PHASE_INITIAL;
--
- retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
- if (retval) {
- tw_dev->state[request_id] = TW_S_COMPLETED;
-diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
-index d474892701d4..fec6449c7595 100644
---- a/drivers/scsi/3w-sas.h
-+++ b/drivers/scsi/3w-sas.h
-@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
- #define TW_CURRENT_DRIVER_BUILD 0
- #define TW_CURRENT_DRIVER_BRANCH 0
-
--/* Phase defines */
--#define TW_PHASE_INITIAL 0
--#define TW_PHASE_SGLIST 2
--
- /* Misc defines */
- #define TW_SECTOR_SIZE 512
- #define TW_MAX_UNITS 32
-diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
-index c75f2048319f..2940bd769936 100644
---- a/drivers/scsi/3w-xxxx.c
-+++ b/drivers/scsi/3w-xxxx.c
-@@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
- return 0;
- } /* End tw_initialize_device_extension() */
-
--static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
--{
-- int use_sg;
--
-- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
--
-- use_sg = scsi_dma_map(cmd);
-- if (use_sg < 0) {
-- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
-- return 0;
-- }
--
-- cmd->SCp.phase = TW_PHASE_SGLIST;
-- cmd->SCp.have_data_in = use_sg;
--
-- return use_sg;
--} /* End tw_map_scsi_sg_data() */
--
--static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
--{
-- dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
--
-- if (cmd->SCp.phase == TW_PHASE_SGLIST)
-- scsi_dma_unmap(cmd);
--} /* End tw_unmap_scsi_data() */
--
- /* This function will reset a device extension */
- static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
- {
-@@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
- srb = tw_dev->srb[i];
- if (srb != NULL) {
- srb->result = (DID_RESET << 16);
-- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
-- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
-+ scsi_dma_unmap(srb);
-+ srb->scsi_done(srb);
- }
- }
- }
-@@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
- command_packet->byte8.io.lba = lba;
- command_packet->byte6.block_count = num_sectors;
-
-- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
-- if (!use_sg)
-+ use_sg = scsi_dma_map(srb);
-+ if (use_sg <= 0)
- return 1;
-
- scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
-@@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
- /* Save the scsi command for use by the ISR */
- tw_dev->srb[request_id] = SCpnt;
-
-- /* Initialize phase to zero */
-- SCpnt->SCp.phase = TW_PHASE_INITIAL;
--
- switch (*command) {
- case READ_10:
- case READ_6:
-@@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
-
- /* Now complete the io */
- if ((error != TW_ISR_DONT_COMPLETE)) {
-+ scsi_dma_unmap(tw_dev->srb[request_id]);
-+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
- tw_dev->state[request_id] = TW_S_COMPLETED;
- tw_state_request_finish(tw_dev, request_id);
- tw_dev->posted_request_count--;
-- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
--
-- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
- }
- }
-
-diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
-index 29b0b84ed69e..6f65e663d393 100644
---- a/drivers/scsi/3w-xxxx.h
-+++ b/drivers/scsi/3w-xxxx.h
-@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
- #define TW_AEN_SMART_FAIL 0x000F
- #define TW_AEN_SBUF_FAIL 0x0024
-
--/* Phase defines */
--#define TW_PHASE_INITIAL 0
--#define TW_PHASE_SINGLE 1
--#define TW_PHASE_SGLIST 2
--
- /* Misc defines */
- #define TW_ALIGNMENT_6000 64 /* 64 bytes */
- #define TW_ALIGNMENT_7000 4 /* 4 bytes */
-diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
-index 262ab837a704..9f77d23239a2 100644
---- a/drivers/scsi/scsi_devinfo.c
-+++ b/drivers/scsi/scsi_devinfo.c
-@@ -226,6 +226,7 @@ static struct {
- {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
- {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
- {"Promise", "", NULL, BLIST_SPARSELUN},
-+ {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
- {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
- {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
- {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
-diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
-index 9c0a520d933c..3e6142f61499 100644
---- a/drivers/scsi/scsi_scan.c
-+++ b/drivers/scsi/scsi_scan.c
-@@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
- */
- if (*bflags & BLIST_MAX_512)
- blk_queue_max_hw_sectors(sdev->request_queue, 512);
-+ /*
-+ * Max 1024 sector transfer length for targets that report incorrect
-+ * max/optimal lengths and relied on the old block layer safe default
-+ */
-+ else if (*bflags & BLIST_MAX_1024)
-+ blk_queue_max_hw_sectors(sdev->request_queue, 1024);
-
- /*
- * Some devices may not want to have a start command automatically
-diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
-index 75b3603906c1..f0d22cdb51cd 100644
---- a/drivers/ssb/Kconfig
-+++ b/drivers/ssb/Kconfig
-@@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS
- bool "SSB Broadcom MIPS core driver"
- depends on SSB && MIPS
- select SSB_SERIAL
-+ select SSB_SFLASH
- help
- Driver for the Sonics Silicon Backplane attached
- Broadcom MIPS core.
-diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
-index 4e959c43f680..6afce7eb3d74 100644
---- a/drivers/tty/serial/atmel_serial.c
-+++ b/drivers/tty/serial/atmel_serial.c
-@@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
- config.direction = DMA_MEM_TO_DEV;
- config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- config.dst_addr = port->mapbase + ATMEL_US_THR;
-+ config.dst_maxburst = 1;
-
- ret = dmaengine_slave_config(atmel_port->chan_tx,
- &config);
-@@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
- config.direction = DMA_DEV_TO_MEM;
- config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- config.src_addr = port->mapbase + ATMEL_US_RHR;
-+ config.src_maxburst = 1;
-
- ret = dmaengine_slave_config(atmel_port->chan_rx,
- &config);
-diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
-index 33fb94f78967..0a52c8b55a5f 100644
---- a/drivers/tty/serial/of_serial.c
-+++ b/drivers/tty/serial/of_serial.c
-@@ -344,7 +344,6 @@ static struct of_device_id of_platform_serial_table[] = {
- { .compatible = "ibm,qpace-nwp-serial",
- .data = (void *)PORT_NWPSERIAL, },
- #endif
-- { .type = "serial", .data = (void *)PORT_UNKNOWN, },
- { /* end of list */ },
- };
-
-diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
-index 189f52e3111f..a0099a7f60d4 100644
---- a/drivers/tty/serial/uartlite.c
-+++ b/drivers/tty/serial/uartlite.c
-@@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
-
- static int ulite_probe(struct platform_device *pdev)
- {
-- struct resource *res, *res2;
-+ struct resource *res;
-+ int irq;
- int id = pdev->id;
- #ifdef CONFIG_OF
- const __be32 *prop;
-@@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev)
- if (!res)
- return -ENODEV;
-
-- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-- if (!res2)
-- return -ENODEV;
-+ irq = platform_get_irq(pdev, 0);
-+ if (irq <= 0)
-+ return -ENXIO;
-
-- return ulite_assign(&pdev->dev, id, res->start, res2->start);
-+ return ulite_assign(&pdev->dev, id, res->start, irq);
- }
-
- static int ulite_remove(struct platform_device *pdev)
-diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
-index cff531a51a78..54853a02ce9e 100644
---- a/drivers/tty/serial/xilinx_uartps.c
-+++ b/drivers/tty/serial/xilinx_uartps.c
-@@ -1325,9 +1325,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
- */
- static int cdns_uart_probe(struct platform_device *pdev)
- {
-- int rc, id;
-+ int rc, id, irq;
- struct uart_port *port;
-- struct resource *res, *res2;
-+ struct resource *res;
- struct cdns_uart *cdns_uart_data;
-
- cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
-@@ -1374,9 +1374,9 @@ static int cdns_uart_probe(struct platform_device *pdev)
- goto err_out_clk_disable;
- }
-
-- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-- if (!res2) {
-- rc = -ENODEV;
-+ irq = platform_get_irq(pdev, 0);
-+ if (irq <= 0) {
-+ rc = -ENXIO;
- goto err_out_clk_disable;
- }
-
-@@ -1405,7 +1405,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
- * and triggers invocation of the config_port() entry point.
- */
- port->mapbase = res->start;
-- port->irq = res2->start;
-+ port->irq = irq;
- port->dev = &pdev->dev;
- port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
- port->private_data = cdns_uart_data;
-diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
-index 562e581f6765..3770330a2201 100644
---- a/drivers/usb/chipidea/otg_fsm.c
-+++ b/drivers/usb/chipidea/otg_fsm.c
-@@ -537,7 +537,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
- {
- struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
-
-- mutex_unlock(&fsm->lock);
- if (on) {
- ci_role_stop(ci);
- ci_role_start(ci, CI_ROLE_HOST);
-@@ -546,7 +545,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
- hw_device_reset(ci);
- ci_role_start(ci, CI_ROLE_GADGET);
- }
-- mutex_lock(&fsm->lock);
- return 0;
- }
-
-@@ -554,12 +552,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on)
- {
- struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
-
-- mutex_unlock(&fsm->lock);
- if (on)
- usb_gadget_vbus_connect(&ci->gadget);
- else
- usb_gadget_vbus_disconnect(&ci->gadget);
-- mutex_lock(&fsm->lock);
-
- return 0;
- }
-diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
-index 683617714e7c..220c0fd059bb 100644
---- a/drivers/usb/class/cdc-acm.c
-+++ b/drivers/usb/class/cdc-acm.c
-@@ -1133,11 +1133,16 @@ static int acm_probe(struct usb_interface *intf,
- }
-
- while (buflen > 0) {
-+ elength = buffer[0];
-+ if (!elength) {
-+ dev_err(&intf->dev, "skipping garbage byte\n");
-+ elength = 1;
-+ goto next_desc;
-+ }
- if (buffer[1] != USB_DT_CS_INTERFACE) {
- dev_err(&intf->dev, "skipping garbage\n");
- goto next_desc;
- }
-- elength = buffer[0];
-
- switch (buffer[2]) {
- case USB_CDC_UNION_TYPE: /* we've found it */
-diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
-index 9893d696fc97..f58caa9e6a27 100644
---- a/drivers/usb/storage/uas-detect.h
-+++ b/drivers/usb/storage/uas-detect.h
-@@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt,
- }
-
- static int uas_use_uas_driver(struct usb_interface *intf,
-- const struct usb_device_id *id)
-+ const struct usb_device_id *id,
-+ unsigned long *flags_ret)
- {
- struct usb_host_endpoint *eps[4] = { };
- struct usb_device *udev = interface_to_usbdev(intf);
-@@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf,
- * this writing the following versions exist:
- * ASM1051 - no uas support version
- * ASM1051 - with broken (*) uas support
-- * ASM1053 - with working uas support
-+ * ASM1053 - with working uas support, but problems with large xfers
- * ASM1153 - with working uas support
- *
- * Devices with these chips re-use a number of device-ids over the
-@@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf,
- } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
- /* Possibly an ASM1051, disable uas */
- flags |= US_FL_IGNORE_UAS;
-+ } else {
-+ /* ASM1053, these have issues with large transfers */
-+ flags |= US_FL_MAX_SECTORS_240;
- }
- }
-
-@@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf,
- return 0;
- }
-
-+ if (flags_ret)
-+ *flags_ret = flags;
-+
- return 1;
- }
-diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
-index 6cdabdc119a7..6d3122afeed3 100644
---- a/drivers/usb/storage/uas.c
-+++ b/drivers/usb/storage/uas.c
-@@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
-
- static int uas_slave_alloc(struct scsi_device *sdev)
- {
-- sdev->hostdata = (void *)sdev->host->hostdata;
-+ struct uas_dev_info *devinfo =
-+ (struct uas_dev_info *)sdev->host->hostdata;
-+
-+ sdev->hostdata = devinfo;
-
- /* USB has unusual DMA-alignment requirements: Although the
- * starting address of each scatter-gather element doesn't matter,
-@@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev)
- */
- blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
-
-+ if (devinfo->flags & US_FL_MAX_SECTORS_64)
-+ blk_queue_max_hw_sectors(sdev->request_queue, 64);
-+ else if (devinfo->flags & US_FL_MAX_SECTORS_240)
-+ blk_queue_max_hw_sectors(sdev->request_queue, 240);
-+
- return 0;
- }
-
-@@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
- struct Scsi_Host *shost = NULL;
- struct uas_dev_info *devinfo;
- struct usb_device *udev = interface_to_usbdev(intf);
-+ unsigned long dev_flags;
-
-- if (!uas_use_uas_driver(intf, id))
-+ if (!uas_use_uas_driver(intf, id, &dev_flags))
- return -ENODEV;
-
- if (uas_switch_interface(udev, intf))
-@@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
- devinfo->udev = udev;
- devinfo->resetting = 0;
- devinfo->shutdown = 0;
-- devinfo->flags = id->driver_info;
-- usb_stor_adjust_quirks(udev, &devinfo->flags);
-+ devinfo->flags = dev_flags;
- init_usb_anchor(&devinfo->cmd_urbs);
- init_usb_anchor(&devinfo->sense_urbs);
- init_usb_anchor(&devinfo->data_urbs);
-diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
-index 5600c33fcadb..6c10c888f35f 100644
---- a/drivers/usb/storage/usb.c
-+++ b/drivers/usb/storage/usb.c
-@@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
- US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
- US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
- US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
-- US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES);
-+ US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
-+ US_FL_MAX_SECTORS_240);
-
- p = quirks;
- while (*p) {
-@@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
- case 'f':
- f |= US_FL_NO_REPORT_OPCODES;
- break;
-+ case 'g':
-+ f |= US_FL_MAX_SECTORS_240;
-+ break;
- case 'h':
- f |= US_FL_CAPACITY_HEURISTICS;
- break;
-@@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf,
-
- /* If uas is enabled and this device can do uas then ignore it. */
- #if IS_ENABLED(CONFIG_USB_UAS)
-- if (uas_use_uas_driver(intf, id))
-+ if (uas_use_uas_driver(intf, id, NULL))
- return -ENXIO;
- #endif
-
-diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
-index f23d4be3280e..2b4c5423672d 100644
---- a/fs/btrfs/ioctl.c
-+++ b/fs/btrfs/ioctl.c
-@@ -2403,7 +2403,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
- "Attempt to delete subvolume %llu during send",
- dest->root_key.objectid);
- err = -EPERM;
-- goto out_dput;
-+ goto out_unlock_inode;
- }
-
- d_invalidate(dentry);
-@@ -2498,6 +2498,7 @@ out_up_write:
- root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
- spin_unlock(&dest->root_item_lock);
- }
-+out_unlock_inode:
- mutex_unlock(&inode->i_mutex);
- if (!err) {
- shrink_dcache_sb(root->fs_info->sb);
-diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
-index bed43081720f..16f6365f65e7 100644
---- a/fs/ext4/extents.c
-+++ b/fs/ext4/extents.c
-@@ -4934,13 +4934,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
- if (ret)
- return ret;
-
-- /*
-- * currently supporting (pre)allocate mode for extent-based
-- * files _only_
-- */
-- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
-- return -EOPNOTSUPP;
--
- if (mode & FALLOC_FL_COLLAPSE_RANGE)
- return ext4_collapse_range(inode, offset, len);
-
-@@ -4962,6 +4955,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
-
- mutex_lock(&inode->i_mutex);
-
-+ /*
-+ * We only support preallocation for extent-based files only
-+ */
-+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
-+ ret = -EOPNOTSUPP;
-+ goto out;
-+ }
-+
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode)) {
- new_size = offset + len;
-diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
-index e04d45733976..9a0121376358 100644
---- a/fs/ext4/extents_status.c
-+++ b/fs/ext4/extents_status.c
-@@ -705,6 +705,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
-
- BUG_ON(end < lblk);
-
-+ if ((status & EXTENT_STATUS_DELAYED) &&
-+ (status & EXTENT_STATUS_WRITTEN)) {
-+ ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
-+ " delayed and written which can potentially "
-+ " cause data loss.\n", lblk, len);
-+ WARN_ON(1);
-+ }
-+
- newes.es_lblk = lblk;
- newes.es_len = len;
- ext4_es_store_pblock_status(&newes, pblk, status);
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 5cb9a212b86f..852cc521f327 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -534,6 +534,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
-+ !(status & EXTENT_STATUS_WRITTEN) &&
- ext4_find_delalloc_range(inode, map->m_lblk,
- map->m_lblk + map->m_len - 1))
- status |= EXTENT_STATUS_DELAYED;
-@@ -638,6 +639,7 @@ found:
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
-+ !(status & EXTENT_STATUS_WRITTEN) &&
- ext4_find_delalloc_range(inode, map->m_lblk,
- map->m_lblk + map->m_len - 1))
- status |= EXTENT_STATUS_DELAYED;
-diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
-index d98094a9f476..ff10f3decbc9 100644
---- a/fs/hfsplus/xattr.c
-+++ b/fs/hfsplus/xattr.c
-@@ -806,9 +806,6 @@ end_removexattr:
- static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size, int type)
- {
-- char *xattr_name;
-- int res;
--
- if (!strcmp(name, ""))
- return -EINVAL;
-
-@@ -818,24 +815,19 @@ static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
- */
- if (is_known_namespace(name))
- return -EOPNOTSUPP;
-- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
-- + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
-- if (!xattr_name)
-- return -ENOMEM;
-- strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
-- strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
-
-- res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
-- kfree(xattr_name);
-- return res;
-+ /*
-+ * osx is the namespace we use to indicate an unprefixed
-+ * attribute on the filesystem (like the ones that OS X
-+ * creates), so we pass the name through unmodified (after
-+ * ensuring it doesn't conflict with another namespace).
-+ */
-+ return hfsplus_getxattr(dentry, name, buffer, size);
- }
-
- static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
- const void *buffer, size_t size, int flags, int type)
- {
-- char *xattr_name;
-- int res;
--
- if (!strcmp(name, ""))
- return -EINVAL;
-
-@@ -845,16 +837,14 @@ static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
- */
- if (is_known_namespace(name))
- return -EOPNOTSUPP;
-- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
-- + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
-- if (!xattr_name)
-- return -ENOMEM;
-- strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
-- strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
-
-- res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
-- kfree(xattr_name);
-- return res;
-+ /*
-+ * osx is the namespace we use to indicate an unprefixed
-+ * attribute on the filesystem (like the ones that OS X
-+ * creates), so we pass the name through unmodified (after
-+ * ensuring it doesn't conflict with another namespace).
-+ */
-+ return hfsplus_setxattr(dentry, name, buffer, size, flags);
- }
-
- static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
-diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
-index a7f2604c5f25..7f5f78bd15ad 100644
---- a/include/linux/usb_usual.h
-+++ b/include/linux/usb_usual.h
-@@ -77,6 +77,8 @@
- /* Cannot handle ATA_12 or ATA_16 CDBs */ \
- US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
- /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
-+ US_FLAG(MAX_SECTORS_240, 0x08000000) \
-+ /* Sets max_sectors to 240 */ \
-
- #define US_FLAG(name, value) US_FL_##name = value ,
- enum { US_DO_ALL_FLAGS };
-diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
-index 183eaab7c380..96e3f56519e7 100644
---- a/include/scsi/scsi_devinfo.h
-+++ b/include/scsi/scsi_devinfo.h
-@@ -36,5 +36,6 @@
- for sequential scan */
- #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
- #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
-+#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
-
- #endif
-diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
-index 0de95ccb92cf..5bd134651f5e 100644
---- a/include/sound/emu10k1.h
-+++ b/include/sound/emu10k1.h
-@@ -41,7 +41,8 @@
-
- #define EMUPAGESIZE 4096
- #define MAXREQVOICES 8
--#define MAXPAGES 8192
-+#define MAXPAGES0 4096 /* 32 bit mode */
-+#define MAXPAGES1 8192 /* 31 bit mode */
- #define RESERVED 0
- #define NUM_MIDI 16
- #define NUM_G 64 /* use all channels */
-@@ -50,8 +51,7 @@
-
- /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
- #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
--#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
-- /* See ALSA bug #1276 - rlrevell */
-+#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
-
- #define TMEMSIZE 256*1024
- #define TMEMSIZEREG 4
-@@ -466,8 +466,11 @@
-
- #define MAPB 0x0d /* Cache map B */
-
--#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
--#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
-+#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
-+#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
-+
-+#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
-+#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
-
- /* 0x0e, 0x0f: Not used */
-
-@@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
- unsigned short model; /* subsystem id */
- unsigned int card_type; /* EMU10K1_CARD_* */
- unsigned int ecard_ctrl; /* ecard control bits */
-+ unsigned int address_mode; /* address mode */
- unsigned long dma_mask; /* PCI DMA mask */
- unsigned int delay_pcm_irq; /* in samples */
- int max_cache_pages; /* max memory size / PAGE_SIZE */
-diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
-index 8d7416e46861..15355892a0ff 100644
---- a/include/sound/soc-dapm.h
-+++ b/include/sound/soc-dapm.h
-@@ -287,7 +287,7 @@ struct device;
- .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
- .tlv.p = (tlv_array), \
- .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
-- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
-+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
- #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
- SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
- #define SOC_DAPM_ENUM(xname, xenum) \
-diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
-index a64e7a207d2b..0c5796eadae1 100644
---- a/kernel/bpf/core.c
-+++ b/kernel/bpf/core.c
-@@ -357,8 +357,8 @@ select_insn:
- ALU64_MOD_X:
- if (unlikely(SRC == 0))
- return 0;
-- tmp = DST;
-- DST = do_div(tmp, SRC);
-+ div64_u64_rem(DST, SRC, &tmp);
-+ DST = tmp;
- CONT;
- ALU_MOD_X:
- if (unlikely(SRC == 0))
-@@ -367,8 +367,8 @@ select_insn:
- DST = do_div(tmp, (u32) SRC);
- CONT;
- ALU64_MOD_K:
-- tmp = DST;
-- DST = do_div(tmp, IMM);
-+ div64_u64_rem(DST, IMM, &tmp);
-+ DST = tmp;
- CONT;
- ALU_MOD_K:
- tmp = (u32) DST;
-@@ -377,7 +377,7 @@ select_insn:
- ALU64_DIV_X:
- if (unlikely(SRC == 0))
- return 0;
-- do_div(DST, SRC);
-+ DST = div64_u64(DST, SRC);
- CONT;
- ALU_DIV_X:
- if (unlikely(SRC == 0))
-@@ -387,7 +387,7 @@ select_insn:
- DST = (u32) tmp;
- CONT;
- ALU64_DIV_K:
-- do_div(DST, IMM);
-+ DST = div64_u64(DST, IMM);
- CONT;
- ALU_DIV_K:
- tmp = (u32) DST;
-diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
-index 208d5439e59b..787b0d699969 100644
---- a/net/ipv4/ping.c
-+++ b/net/ipv4/ping.c
-@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
- if (sk_hashed(sk)) {
- write_lock_bh(&ping_table.lock);
- hlist_nulls_del(&sk->sk_nulls_node);
-+ sk_nulls_node_init(&sk->sk_nulls_node);
- sock_put(sk);
- isk->inet_num = 0;
- isk->inet_sport = 0;
-diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index ad5064362c5c..20fc0202cbbe 100644
---- a/net/ipv4/route.c
-+++ b/net/ipv4/route.c
-@@ -963,10 +963,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
- if (dst_metric_locked(dst, RTAX_MTU))
- return;
-
-- if (dst->dev->mtu < mtu)
-- return;
--
-- if (rt->rt_pmtu && rt->rt_pmtu < mtu)
-+ if (ipv4_mtu(dst) < mtu)
- return;
-
- if (mtu < ip_rt_min_pmtu)
-diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
-index 37d0220a094c..db7a2e5e4a14 100644
---- a/sound/pci/emu10k1/emu10k1.c
-+++ b/sound/pci/emu10k1/emu10k1.c
-@@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
- }
- #endif
-
-- strcpy(card->driver, emu->card_capabilities->driver);
-- strcpy(card->shortname, emu->card_capabilities->name);
-+ strlcpy(card->driver, emu->card_capabilities->driver,
-+ sizeof(card->driver));
-+ strlcpy(card->shortname, emu->card_capabilities->name,
-+ sizeof(card->shortname));
- snprintf(card->longname, sizeof(card->longname),
- "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
- card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
-diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
-index 874cd76c7b7f..d2c7ea3a7610 100644
---- a/sound/pci/emu10k1/emu10k1_callback.c
-+++ b/sound/pci/emu10k1/emu10k1_callback.c
-@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
- snd_emu10k1_ptr_write(hw, Z2, ch, 0);
-
- /* invalidate maps */
-- temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
-+ temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
- snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
- snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
- #if 0
-@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
- snd_emu10k1_ptr_write(hw, CDF, ch, sample);
-
- /* invalidate maps */
-- temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
-+ temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
- snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
- snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
-
-diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
-index b4458a630a7c..df9f5c7c9c77 100644
---- a/sound/pci/emu10k1/emu10k1_main.c
-+++ b/sound/pci/emu10k1/emu10k1_main.c
-@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
- snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
- snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
-
-- silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
-+ silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
- for (ch = 0; ch < NUM_G; ch++) {
- snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
- snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
-@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
- outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
- }
-
-+ if (emu->address_mode == 0) {
-+ /* use 16M in 4G */
-+ outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
-+ }
-+
- return 0;
- }
-
-@@ -1421,7 +1426,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
- *
- */
- {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
-- .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
-+ .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
- .id = "Audigy2",
- .emu10k2_chip = 1,
- .ca0108_chip = 1,
-@@ -1571,7 +1576,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
- .adc_1361t = 1, /* 24 bit capture instead of 16bit */
- .ac97_chip = 1} ,
- {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
-- .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
-+ .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
- .id = "Audigy2",
- .emu10k2_chip = 1,
- .ca0102_chip = 1,
-@@ -1877,8 +1882,10 @@ int snd_emu10k1_create(struct snd_card *card,
-
- is_audigy = emu->audigy = c->emu10k2_chip;
-
-+ /* set addressing mode */
-+ emu->address_mode = is_audigy ? 0 : 1;
- /* set the DMA transfer mask */
-- emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
-+ emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
- if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
- pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
- dev_err(card->dev,
-@@ -1903,7 +1910,7 @@ int snd_emu10k1_create(struct snd_card *card,
-
- emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
-- 32 * 1024, &emu->ptb_pages) < 0) {
-+ (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
- err = -ENOMEM;
- goto error;
- }
-@@ -2002,8 +2009,8 @@ int snd_emu10k1_create(struct snd_card *card,
-
- /* Clear silent pages and set up pointers */
- memset(emu->silent_page.area, 0, PAGE_SIZE);
-- silent_page = emu->silent_page.addr << 1;
-- for (idx = 0; idx < MAXPAGES; idx++)
-+ silent_page = emu->silent_page.addr << emu->address_mode;
-+ for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
- ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
-
- /* set up voice indices */
-diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
-index 0dc07385af0e..14a305bd8a98 100644
---- a/sound/pci/emu10k1/emupcm.c
-+++ b/sound/pci/emu10k1/emupcm.c
-@@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
- snd_emu10k1_ptr_write(emu, Z1, voice, 0);
- snd_emu10k1_ptr_write(emu, Z2, voice, 0);
- /* invalidate maps */
-- silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
-+ silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
- snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
- snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
- /* modulation envelope */
-diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
-index c68e6dd2fa67..4f1f69be1865 100644
---- a/sound/pci/emu10k1/memory.c
-+++ b/sound/pci/emu10k1/memory.c
-@@ -34,10 +34,11 @@
- * aligned pages in others
- */
- #define __set_ptb_entry(emu,page,addr) \
-- (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
-+ (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
-
- #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
--#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
-+#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
-+#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
- /* get aligned page from offset address */
- #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
- /* get offset address from aligned page */
-@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
- }
- page = blk->mapped_page + blk->pages;
- }
-- size = MAX_ALIGN_PAGES - page;
-+ size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
- if (size >= max_size) {
- *nextp = pos;
- return page;
-@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
- q = get_emu10k1_memblk(p, mapped_link);
- end_page = q->mapped_page;
- } else
-- end_page = MAX_ALIGN_PAGES;
-+ end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
-
- /* remove links */
- list_del(&blk->mapped_link);
-@@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
- if (snd_BUG_ON(!emu))
- return NULL;
- if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
-- runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
-+ runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
- return NULL;
- hdr = emu->memhdr;
- if (snd_BUG_ON(!hdr))
-diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
-index 2fe86d2e1b09..a63a86332deb 100644
---- a/sound/pci/hda/hda_codec.c
-+++ b/sound/pci/hda/hda_codec.c
-@@ -3027,6 +3027,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = {
- .put = vmaster_mute_mode_put,
- };
-
-+/* meta hook to call each driver's vmaster hook */
-+static void vmaster_hook(void *private_data, int enabled)
-+{
-+ struct hda_vmaster_mute_hook *hook = private_data;
-+
-+ if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
-+ enabled = hook->mute_mode;
-+ hook->hook(hook->codec, enabled);
-+}
-+
- /**
- * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED
- * @codec: the HDA codec
-@@ -3045,9 +3055,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
-
- if (!hook->hook || !hook->sw_kctl)
- return 0;
-- snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
- hook->codec = codec;
- hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
-+ snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
- if (!expose_enum_ctl)
- return 0;
- kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
-@@ -3073,14 +3083,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
- */
- if (hook->codec->bus->shutdown)
- return;
-- switch (hook->mute_mode) {
-- case HDA_VMUTE_FOLLOW_MASTER:
-- snd_ctl_sync_vmaster_hook(hook->sw_kctl);
-- break;
-- default:
-- hook->hook(hook->codec, hook->mute_mode);
-- break;
-- }
-+ snd_ctl_sync_vmaster_hook(hook->sw_kctl);
- }
- EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook);
-
-diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
-index 6ba0b5517c40..2341fc334163 100644
---- a/sound/pci/hda/thinkpad_helper.c
-+++ b/sound/pci/hda/thinkpad_helper.c
-@@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
- if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
- old_vmaster_hook = spec->vmaster_mute.hook;
- spec->vmaster_mute.hook = update_tpacpi_mute_led;
-+ spec->vmaster_mute_enum = 1;
- removefunc = false;
- }
- if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
-diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
-index fb9c20eace3f..97b33e96439a 100644
---- a/sound/soc/codecs/rt5677.c
-+++ b/sound/soc/codecs/rt5677.c
-@@ -62,6 +62,9 @@ static const struct reg_default init_list[] = {
- {RT5677_PR_BASE + 0x1e, 0x0000},
- {RT5677_PR_BASE + 0x12, 0x0eaa},
- {RT5677_PR_BASE + 0x14, 0x018a},
-+ {RT5677_PR_BASE + 0x15, 0x0490},
-+ {RT5677_PR_BASE + 0x38, 0x0f71},
-+ {RT5677_PR_BASE + 0x39, 0x0f71},
- };
- #define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list)
-
-@@ -901,7 +904,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
- {
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
-- int idx = rl6231_calc_dmic_clk(rt5677->sysclk);
-+ int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
-
- if (idx < 0)
- dev_err(codec->dev, "Failed to set DMIC clock\n");
-diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
-index 16f1b71edb55..aab0af681e8c 100644
---- a/sound/soc/codecs/tfa9879.c
-+++ b/sound/soc/codecs/tfa9879.c
-@@ -280,8 +280,8 @@ static int tfa9879_i2c_probe(struct i2c_client *i2c,
- int i;
-
- tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL);
-- if (IS_ERR(tfa9879))
-- return PTR_ERR(tfa9879);
-+ if (!tfa9879)
-+ return -ENOMEM;
-
- i2c_set_clientdata(i2c, tfa9879);
-
-diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
-index 326d3c3804e3..5bf723689692 100644
---- a/sound/soc/samsung/s3c24xx-i2s.c
-+++ b/sound/soc/samsung/s3c24xx-i2s.c
-@@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
- return -ENOENT;
- }
- s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
-- if (s3c24xx_i2s.regs == NULL)
-- return -ENXIO;
-+ if (IS_ERR(s3c24xx_i2s.regs))
-+ return PTR_ERR(s3c24xx_i2s.regs);
-
- s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO;
- s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO;
-diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
-index ab37add269ae..82e350e9501c 100644
---- a/sound/synth/emux/emux_oss.c
-+++ b/sound/synth/emux/emux_oss.c
-@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
- if (snd_BUG_ON(!arg || !emu))
- return -ENXIO;
-
-- mutex_lock(&emu->register_mutex);
--
-- if (!snd_emux_inc_count(emu)) {
-- mutex_unlock(&emu->register_mutex);
-+ if (!snd_emux_inc_count(emu))
- return -EFAULT;
-- }
-
- memset(&callback, 0, sizeof(callback));
- callback.owner = THIS_MODULE;
-@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
- if (p == NULL) {
- snd_printk(KERN_ERR "can't create port\n");
- snd_emux_dec_count(emu);
-- mutex_unlock(&emu->register_mutex);
- return -ENOMEM;
- }
-
-@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
- reset_port_mode(p, arg->seq_mode);
-
- snd_emux_reset_port(p);
--
-- mutex_unlock(&emu->register_mutex);
- return 0;
- }
-
-@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
- if (snd_BUG_ON(!emu))
- return -ENXIO;
-
-- mutex_lock(&emu->register_mutex);
- snd_emux_sounds_off_all(p);
- snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
- snd_seq_event_port_detach(p->chset.client, p->chset.port);
- snd_emux_dec_count(emu);
-
-- mutex_unlock(&emu->register_mutex);
- return 0;
- }
-
-diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
-index 7778b8e19782..a0209204ae48 100644
---- a/sound/synth/emux/emux_seq.c
-+++ b/sound/synth/emux/emux_seq.c
-@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
- if (emu->voices)
- snd_emux_terminate_all(emu);
-
-- mutex_lock(&emu->register_mutex);
- if (emu->client >= 0) {
- snd_seq_delete_kernel_client(emu->client);
- emu->client = -1;
- }
-- mutex_unlock(&emu->register_mutex);
- }
-
-
-@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
- /*
- * increment usage count
- */
--int
--snd_emux_inc_count(struct snd_emux *emu)
-+static int
-+__snd_emux_inc_count(struct snd_emux *emu)
- {
- emu->used++;
- if (!try_module_get(emu->ops.owner))
-@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
- return 1;
- }
-
-+int snd_emux_inc_count(struct snd_emux *emu)
-+{
-+ int ret;
-+
-+ mutex_lock(&emu->register_mutex);
-+ ret = __snd_emux_inc_count(emu);
-+ mutex_unlock(&emu->register_mutex);
-+ return ret;
-+}
-
- /*
- * decrease usage count
- */
--void
--snd_emux_dec_count(struct snd_emux *emu)
-+static void
-+__snd_emux_dec_count(struct snd_emux *emu)
- {
- module_put(emu->card->module);
- emu->used--;
-@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
- module_put(emu->ops.owner);
- }
-
-+void snd_emux_dec_count(struct snd_emux *emu)
-+{
-+ mutex_lock(&emu->register_mutex);
-+ __snd_emux_dec_count(emu);
-+ mutex_unlock(&emu->register_mutex);
-+}
-
- /*
- * Routine that is called upon a first use of a particular port
-@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
-
- mutex_lock(&emu->register_mutex);
- snd_emux_init_port(p);
-- snd_emux_inc_count(emu);
-+ __snd_emux_inc_count(emu);
- mutex_unlock(&emu->register_mutex);
- return 0;
- }
-@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
-
- mutex_lock(&emu->register_mutex);
- snd_emux_sounds_off_all(p);
-- snd_emux_dec_count(emu);
-+ __snd_emux_dec_count(emu);
- mutex_unlock(&emu->register_mutex);
- return 0;
- }
diff --git a/1003_linux-4.0.4.patch b/1003_linux-4.0.4.patch
deleted file mode 100644
index e5c793a4..00000000
--- a/1003_linux-4.0.4.patch
+++ /dev/null
@@ -1,2713 +0,0 @@
-diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
-index a4873e5e3e36..e30e184f50c7 100644
---- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
-+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
-@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
- 80 81 68 69
- 70 71 72 73
- 74 75 76 77>;
-- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
-+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
- "saif0", "saif1", "i2c0", "i2c1",
- "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
- "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
-diff --git a/Makefile b/Makefile
-index dc9f43a019d6..3d16bcc87585 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 0
--SUBLEVEL = 3
-+SUBLEVEL = 4
- EXTRAVERSION =
- NAME = Hurr durr I'ma sheep
-
-diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
-index 0c76d9f05fd0..f4838ebd918b 100644
---- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
-+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
-@@ -105,6 +105,10 @@
- };
-
- internal-regs {
-+ rtc@10300 {
-+ /* No crystal connected to the internal RTC */
-+ status = "disabled";
-+ };
- serial@12000 {
- status = "okay";
- };
-diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
-index 7e6eef2488e8..82045398bf1f 100644
---- a/arch/arm/boot/dts/imx23-olinuxino.dts
-+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
-@@ -12,6 +12,7 @@
- */
-
- /dts-v1/;
-+#include <dt-bindings/gpio/gpio.h>
- #include "imx23.dtsi"
-
- / {
-@@ -93,6 +94,7 @@
-
- ahb@80080000 {
- usb0: usb@80080000 {
-+ dr_mode = "host";
- vbus-supply = <&reg_usb0_vbus>;
- status = "okay";
- };
-@@ -122,7 +124,7 @@
-
- user {
- label = "green";
-- gpios = <&gpio2 1 1>;
-+ gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
- };
- };
- };
-diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
-index e4d3aecc4ed2..677f81d9dcd5 100644
---- a/arch/arm/boot/dts/imx25.dtsi
-+++ b/arch/arm/boot/dts/imx25.dtsi
-@@ -428,6 +428,7 @@
-
- pwm4: pwm@53fc8000 {
- compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
-+ #pwm-cells = <2>;
- reg = <0x53fc8000 0x4000>;
- clocks = <&clks 108>, <&clks 52>;
- clock-names = "ipg", "per";
-diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
-index 47f68ac868d4..5ed245a3f9ac 100644
---- a/arch/arm/boot/dts/imx28.dtsi
-+++ b/arch/arm/boot/dts/imx28.dtsi
-@@ -900,7 +900,7 @@
- 80 81 68 69
- 70 71 72 73
- 74 75 76 77>;
-- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
-+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
- "saif0", "saif1", "i2c0", "i2c1",
- "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
- "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
-diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
-index 19cc269a08d4..1ce6133b67f5 100644
---- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
-+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
-@@ -31,6 +31,7 @@
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio4 15 0>;
-+ enable-active-high;
- };
-
- reg_usb_h1_vbus: regulator@1 {
-@@ -40,6 +41,7 @@
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio1 0 0>;
-+ enable-active-high;
- };
- };
-
-diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
-index db80f9d376fa..9c8bdf2c93a1 100644
---- a/arch/arm/boot/dts/omap3-n900.dts
-+++ b/arch/arm/boot/dts/omap3-n900.dts
-@@ -484,6 +484,8 @@
- DRVDD-supply = <&vmmc2>;
- IOVDD-supply = <&vio>;
- DVDD-supply = <&vio>;
-+
-+ ai3x-micbias-vg = <1>;
- };
-
- tlv320aic3x_aux: tlv320aic3x@19 {
-@@ -495,6 +497,8 @@
- DRVDD-supply = <&vmmc2>;
- IOVDD-supply = <&vio>;
- DVDD-supply = <&vio>;
-+
-+ ai3x-micbias-vg = <2>;
- };
-
- tsl2563: tsl2563@29 {
-diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
-index bfd3f1c734b8..2201cd5da3bb 100644
---- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
-+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
-@@ -1017,23 +1017,6 @@
- status = "disabled";
- };
-
-- vmmci: regulator-gpio {
-- compatible = "regulator-gpio";
--
-- regulator-min-microvolt = <1800000>;
-- regulator-max-microvolt = <2900000>;
-- regulator-name = "mmci-reg";
-- regulator-type = "voltage";
--
-- startup-delay-us = <100>;
-- enable-active-high;
--
-- states = <1800000 0x1
-- 2900000 0x0>;
--
-- status = "disabled";
-- };
--
- mcde@a0350000 {
- compatible = "stericsson,mcde";
- reg = <0xa0350000 0x1000>, /* MCDE */
-diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
-index bf8f0eddc2c0..744c1e3a744d 100644
---- a/arch/arm/boot/dts/ste-href.dtsi
-+++ b/arch/arm/boot/dts/ste-href.dtsi
-@@ -111,6 +111,21 @@
- pinctrl-1 = <&i2c3_sleep_mode>;
- };
-
-+ vmmci: regulator-gpio {
-+ compatible = "regulator-gpio";
-+
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <2900000>;
-+ regulator-name = "mmci-reg";
-+ regulator-type = "voltage";
-+
-+ startup-delay-us = <100>;
-+ enable-active-high;
-+
-+ states = <1800000 0x1
-+ 2900000 0x0>;
-+ };
-+
- // External Micro SD slot
- sdi0_per1@80126000 {
- arm,primecell-periphid = <0x10480180>;
-diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
-index 206826a855c0..1bc84ebdccaa 100644
---- a/arch/arm/boot/dts/ste-snowball.dts
-+++ b/arch/arm/boot/dts/ste-snowball.dts
-@@ -146,8 +146,21 @@
- };
-
- vmmci: regulator-gpio {
-+ compatible = "regulator-gpio";
-+
- gpios = <&gpio7 4 0x4>;
- enable-gpio = <&gpio6 25 0x4>;
-+
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <2900000>;
-+ regulator-name = "mmci-reg";
-+ regulator-type = "voltage";
-+
-+ startup-delay-us = <100>;
-+ enable-active-high;
-+
-+ states = <1800000 0x1
-+ 2900000 0x0>;
- };
-
- // External Micro SD slot
-diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
-index 902397dd1000..1c1cdfa566ac 100644
---- a/arch/arm/kernel/Makefile
-+++ b/arch/arm/kernel/Makefile
-@@ -86,7 +86,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-
- obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
- ifeq ($(CONFIG_ARM_PSCI),y)
--obj-y += psci.o
-+obj-y += psci.o psci-call.o
- obj-$(CONFIG_SMP) += psci_smp.o
- endif
-
-diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S
-new file mode 100644
-index 000000000000..a78e9e1e206d
---- /dev/null
-+++ b/arch/arm/kernel/psci-call.S
-@@ -0,0 +1,31 @@
-+/*
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Copyright (C) 2015 ARM Limited
-+ *
-+ * Author: Mark Rutland <mark.rutland@arm.com>
-+ */
-+
-+#include <linux/linkage.h>
-+
-+#include <asm/opcodes-sec.h>
-+#include <asm/opcodes-virt.h>
-+
-+/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
-+ENTRY(__invoke_psci_fn_hvc)
-+ __HVC(0)
-+ bx lr
-+ENDPROC(__invoke_psci_fn_hvc)
-+
-+/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
-+ENTRY(__invoke_psci_fn_smc)
-+ __SMC(0)
-+ bx lr
-+ENDPROC(__invoke_psci_fn_smc)
-diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
-index f73891b6b730..f90fdf4ce7c7 100644
---- a/arch/arm/kernel/psci.c
-+++ b/arch/arm/kernel/psci.c
-@@ -23,8 +23,6 @@
-
- #include <asm/compiler.h>
- #include <asm/errno.h>
--#include <asm/opcodes-sec.h>
--#include <asm/opcodes-virt.h>
- #include <asm/psci.h>
- #include <asm/system_misc.h>
-
-@@ -33,6 +31,9 @@ struct psci_operations psci_ops;
- static int (*invoke_psci_fn)(u32, u32, u32, u32);
- typedef int (*psci_initcall_t)(const struct device_node *);
-
-+asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
-+asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
-+
- enum psci_function {
- PSCI_FN_CPU_SUSPEND,
- PSCI_FN_CPU_ON,
-@@ -71,40 +72,6 @@ static u32 psci_power_state_pack(struct psci_power_state state)
- & PSCI_0_2_POWER_STATE_AFFL_MASK);
- }
-
--/*
-- * The following two functions are invoked via the invoke_psci_fn pointer
-- * and will not be inlined, allowing us to piggyback on the AAPCS.
-- */
--static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
-- u32 arg2)
--{
-- asm volatile(
-- __asmeq("%0", "r0")
-- __asmeq("%1", "r1")
-- __asmeq("%2", "r2")
-- __asmeq("%3", "r3")
-- __HVC(0)
-- : "+r" (function_id)
-- : "r" (arg0), "r" (arg1), "r" (arg2));
--
-- return function_id;
--}
--
--static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
-- u32 arg2)
--{
-- asm volatile(
-- __asmeq("%0", "r0")
-- __asmeq("%1", "r1")
-- __asmeq("%2", "r2")
-- __asmeq("%3", "r3")
-- __SMC(0)
-- : "+r" (function_id)
-- : "r" (arg0), "r" (arg1), "r" (arg2));
--
-- return function_id;
--}
--
- static int psci_get_version(void)
- {
- int err;
-diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
-index cbefbd7cfdb5..661d753df584 100644
---- a/arch/arm/mach-omap2/prm-regbits-34xx.h
-+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
-@@ -112,6 +112,7 @@
- #define OMAP3430_VC_CMD_ONLP_SHIFT 16
- #define OMAP3430_VC_CMD_RET_SHIFT 8
- #define OMAP3430_VC_CMD_OFF_SHIFT 0
-+#define OMAP3430_SREN_MASK (1 << 4)
- #define OMAP3430_HSEN_MASK (1 << 3)
- #define OMAP3430_MCODE_MASK (0x7 << 0)
- #define OMAP3430_VALID_MASK (1 << 24)
-diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
-index b1c7a33e00e7..e794828dee55 100644
---- a/arch/arm/mach-omap2/prm-regbits-44xx.h
-+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
-@@ -35,6 +35,7 @@
- #define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1
- #define OMAP4430_GLOBAL_WUEN_MASK (1 << 16)
- #define OMAP4430_HSMCODE_MASK (0x7 << 0)
-+#define OMAP4430_SRMODEEN_MASK (1 << 4)
- #define OMAP4430_HSMODEEN_MASK (1 << 3)
- #define OMAP4430_HSSCLL_SHIFT 24
- #define OMAP4430_ICEPICK_RST_SHIFT 9
-diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
-index be9ef834fa81..076fd20d7e5a 100644
---- a/arch/arm/mach-omap2/vc.c
-+++ b/arch/arm/mach-omap2/vc.c
-@@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
- * idle. And we can also scale voltages to zero for off-idle.
- * Note that no actual voltage scaling during off-idle will
- * happen unless the board specific twl4030 PMIC scripts are
-- * loaded.
-+ * loaded. See also omap_vc_i2c_init for comments regarding
-+ * erratum i531.
- */
- val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET);
- if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) {
-@@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
- return;
- }
-
-+ /*
-+ * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around
-+ * erratum i531 "Extra Power Consumed When Repeated Start Operation
-+ * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)".
-+ * Otherwise I2C4 eventually leads into about 23mW extra power being
-+ * consumed even during off idle using VMODE.
-+ */
- i2c_high_speed = voltdm->pmic->i2c_high_speed;
- if (i2c_high_speed)
-- voltdm->rmw(vc->common->i2c_cfg_hsen_mask,
-+ voltdm->rmw(vc->common->i2c_cfg_clear_mask,
- vc->common->i2c_cfg_hsen_mask,
- vc->common->i2c_cfg_reg);
-
-diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h
-index cdbdd78e755e..89b83b7ff3ec 100644
---- a/arch/arm/mach-omap2/vc.h
-+++ b/arch/arm/mach-omap2/vc.h
-@@ -34,6 +34,7 @@ struct voltagedomain;
- * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register
- * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register
- * @i2c_cfg_reg: I2C configuration register offset
-+ * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register
- * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register
- * @i2c_mcode_mask: MCODE field mask for I2C config register
- *
-@@ -52,6 +53,7 @@ struct omap_vc_common {
- u8 cmd_ret_shift;
- u8 cmd_off_shift;
- u8 i2c_cfg_reg;
-+ u8 i2c_cfg_clear_mask;
- u8 i2c_cfg_hsen_mask;
- u8 i2c_mcode_mask;
- };
-diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
-index 75bc4aa22b3a..71d74c9172c1 100644
---- a/arch/arm/mach-omap2/vc3xxx_data.c
-+++ b/arch/arm/mach-omap2/vc3xxx_data.c
-@@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = {
- .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT,
- .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT,
- .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT,
-+ .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK,
- .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK,
- .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET,
- .i2c_mcode_mask = OMAP3430_MCODE_MASK,
-diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
-index 085e5d6a04fd..2abd5fa8a697 100644
---- a/arch/arm/mach-omap2/vc44xx_data.c
-+++ b/arch/arm/mach-omap2/vc44xx_data.c
-@@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = {
- .cmd_ret_shift = OMAP4430_RET_SHIFT,
- .cmd_off_shift = OMAP4430_OFF_SHIFT,
- .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET,
-+ .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK,
- .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK,
- .i2c_mcode_mask = OMAP4430_HSMCODE_MASK,
- };
-diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
-index e1268f905026..f412b53ed268 100644
---- a/arch/arm/net/bpf_jit_32.c
-+++ b/arch/arm/net/bpf_jit_32.c
-@@ -449,10 +449,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
- return;
- }
- #endif
-- if (rm != ARM_R0)
-- emit(ARM_MOV_R(ARM_R0, rm), ctx);
-+
-+ /*
-+ * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
-+ * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
-+ * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
-+ * before using it as a source for ARM_R1.
-+ *
-+ * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
-+ * ARM_R5 (r_X) so there is no particular register overlap
-+ * issues.
-+ */
- if (rn != ARM_R1)
- emit(ARM_MOV_R(ARM_R1, rn), ctx);
-+ if (rm != ARM_R0)
-+ emit(ARM_MOV_R(ARM_R0, rm), ctx);
-
- ctx->seen |= SEEN_CALL;
- emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
-diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
-index cf87de3fc390..64b611782ef0 100644
---- a/arch/x86/include/asm/spinlock.h
-+++ b/arch/x86/include/asm/spinlock.h
-@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
- struct __raw_tickets tmp = READ_ONCE(lock->tickets);
-
- tmp.head &= ~TICKET_SLOWPATH_FLAG;
-- return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
-+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
- }
- #define arch_spin_is_contended arch_spin_is_contended
-
-diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
-index e4695985f9de..d93963340c3c 100644
---- a/arch/x86/pci/acpi.c
-+++ b/arch/x86/pci/acpi.c
-@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
- kfree(info);
- }
-
-+/*
-+ * An IO port or MMIO resource assigned to a PCI host bridge may be
-+ * consumed by the host bridge itself or available to its child
-+ * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
-+ * to tell whether the resource is consumed by the host bridge itself,
-+ * but firmware hasn't used that bit consistently, so we can't rely on it.
-+ *
-+ * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
-+ * to be available to child bus/devices except one special case:
-+ * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
-+ * to access PCI configuration space.
-+ *
-+ * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
-+ */
-+static bool resource_is_pcicfg_ioport(struct resource *res)
-+{
-+ return (res->flags & IORESOURCE_IO) &&
-+ res->start == 0xCF8 && res->end == 0xCFF;
-+}
-+
- static void probe_pci_root_info(struct pci_root_info *info,
- struct acpi_device *device,
- int busnum, int domain,
-@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
- "no IO and memory resources present in _CRS\n");
- else
- resource_list_for_each_entry_safe(entry, tmp, list) {
-- if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
-- (entry->res->flags & IORESOURCE_DISABLED))
-+ if ((entry->res->flags & IORESOURCE_DISABLED) ||
-+ resource_is_pcicfg_ioport(entry->res))
- resource_list_destroy_entry(entry);
- else
- entry->res->name = info->name;
-diff --git a/block/blk-core.c b/block/blk-core.c
-index 794c3e7f01cf..66406474f0c4 100644
---- a/block/blk-core.c
-+++ b/block/blk-core.c
-@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q)
- q->queue_lock = &q->__queue_lock;
- spin_unlock_irq(lock);
-
-+ bdi_destroy(&q->backing_dev_info);
-+
- /* @q is and will stay empty, shutdown and put */
- blk_put_queue(q);
- }
-diff --git a/block/blk-mq.c b/block/blk-mq.c
-index 33c428530193..5c39703e644f 100644
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -675,8 +675,11 @@ static void blk_mq_rq_timer(unsigned long priv)
- data.next = blk_rq_timeout(round_jiffies_up(data.next));
- mod_timer(&q->timeout, data.next);
- } else {
-- queue_for_each_hw_ctx(q, hctx, i)
-- blk_mq_tag_idle(hctx);
-+ queue_for_each_hw_ctx(q, hctx, i) {
-+ /* the hctx may be unmapped, so check it here */
-+ if (blk_mq_hw_queue_mapped(hctx))
-+ blk_mq_tag_idle(hctx);
-+ }
- }
- }
-
-@@ -1570,22 +1573,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
- return NOTIFY_OK;
- }
-
--static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
--{
-- struct request_queue *q = hctx->queue;
-- struct blk_mq_tag_set *set = q->tag_set;
--
-- if (set->tags[hctx->queue_num])
-- return NOTIFY_OK;
--
-- set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
-- if (!set->tags[hctx->queue_num])
-- return NOTIFY_STOP;
--
-- hctx->tags = set->tags[hctx->queue_num];
-- return NOTIFY_OK;
--}
--
- static int blk_mq_hctx_notify(void *data, unsigned long action,
- unsigned int cpu)
- {
-@@ -1593,8 +1580,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
-
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
- return blk_mq_hctx_cpu_offline(hctx, cpu);
-- else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
-- return blk_mq_hctx_cpu_online(hctx, cpu);
-+
-+ /*
-+ * In case of CPU online, tags may be reallocated
-+ * in blk_mq_map_swqueue() after mapping is updated.
-+ */
-
- return NOTIFY_OK;
- }
-@@ -1776,6 +1766,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
- unsigned int i;
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx;
-+ struct blk_mq_tag_set *set = q->tag_set;
-
- queue_for_each_hw_ctx(q, hctx, i) {
- cpumask_clear(hctx->cpumask);
-@@ -1802,16 +1793,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
- * disable it and free the request entries.
- */
- if (!hctx->nr_ctx) {
-- struct blk_mq_tag_set *set = q->tag_set;
--
- if (set->tags[i]) {
- blk_mq_free_rq_map(set, set->tags[i], i);
- set->tags[i] = NULL;
-- hctx->tags = NULL;
- }
-+ hctx->tags = NULL;
- continue;
- }
-
-+ /* unmapped hw queue can be remapped after CPU topo changed */
-+ if (!set->tags[i])
-+ set->tags[i] = blk_mq_init_rq_map(set, i);
-+ hctx->tags = set->tags[i];
-+ WARN_ON(!hctx->tags);
-+
- /*
- * Initialize batch roundrobin counts
- */
-@@ -2075,9 +2070,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
- */
- list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_freeze_queue_start(q);
-- list_for_each_entry(q, &all_q_list, all_q_node)
-+ list_for_each_entry(q, &all_q_list, all_q_node) {
- blk_mq_freeze_queue_wait(q);
-
-+ /*
-+ * timeout handler can't touch hw queue during the
-+ * reinitialization
-+ */
-+ del_timer_sync(&q->timeout);
-+ }
-+
- list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_queue_reinit(q);
-
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-index faaf36ade7eb..2b8fd302f677 100644
---- a/block/blk-sysfs.c
-+++ b/block/blk-sysfs.c
-@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
-
- blk_trace_shutdown(q);
-
-- bdi_destroy(&q->backing_dev_info);
--
- ida_simple_remove(&blk_queue_ida, q->id);
- call_rcu(&q->rcu_head, blk_free_queue_rcu);
- }
-diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
-index b193f8425999..ff6d8adc9cda 100644
---- a/drivers/acpi/acpi_pnp.c
-+++ b/drivers/acpi/acpi_pnp.c
-@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
- {"PNPb006"},
- /* cs423x-pnpbios */
- {"CSC0100"},
-+ {"CSC0103"},
-+ {"CSC0110"},
- {"CSC0000"},
- {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */
- /* es18xx-pnpbios */
-diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
-index cf607fe69dbd..c240bdf824f2 100644
---- a/drivers/acpi/acpica/acmacros.h
-+++ b/drivers/acpi/acpica/acmacros.h
-@@ -63,23 +63,12 @@
- #define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
-
- /*
-- * printf() format helpers. These macros are workarounds for the difficulties
-+ * printf() format helper. This macros is a workaround for the difficulties
- * with emitting 64-bit integers and 64-bit pointers with the same code
- * for both 32-bit and 64-bit hosts.
- */
- #define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
-
--#if ACPI_MACHINE_WIDTH == 64
--#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
--#define ACPI_FORMAT_TO_UINT(i) ACPI_FORMAT_UINT64(i)
--#define ACPI_PRINTF_UINT "0x%8.8X%8.8X"
--
--#else
--#define ACPI_FORMAT_NATIVE_UINT(i) 0, (u32) (i)
--#define ACPI_FORMAT_TO_UINT(i) (u32) (i)
--#define ACPI_PRINTF_UINT "0x%8.8X"
--#endif
--
- /*
- * Macros for moving data around to/from buffers that are possibly unaligned.
- * If the hardware supports the transfer of unaligned data, just do the store.
-diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
-index 77244182ff02..ea0cc4e08f80 100644
---- a/drivers/acpi/acpica/dsopcode.c
-+++ b/drivers/acpi/acpica/dsopcode.c
-@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
- obj_desc,
-- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
-+ ACPI_FORMAT_UINT64(obj_desc->region.address),
- obj_desc->region.length));
-
- /* Now the address and length are valid for this opregion */
-@@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
-
-- obj_desc->region.address =
-- (acpi_physical_address) ACPI_TO_INTEGER(table);
-+ obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
- obj_desc->region.length = table->length;
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
- obj_desc,
-- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
-+ ACPI_FORMAT_UINT64(obj_desc->region.address),
- obj_desc->region.length));
-
- /* Now the address and length are valid for this opregion */
-diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
-index 9abace3401f9..2ba28a63fb68 100644
---- a/drivers/acpi/acpica/evregion.c
-+++ b/drivers/acpi/acpica/evregion.c
-@@ -272,7 +272,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
- &region_obj->region.handler->address_space, handler,
-- ACPI_FORMAT_NATIVE_UINT(address),
-+ ACPI_FORMAT_UINT64(address),
- acpi_ut_get_region_name(region_obj->region.
- space_id)));
-
-diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
-index 7c213b6b6472..1da52bef632e 100644
---- a/drivers/acpi/acpica/exdump.c
-+++ b/drivers/acpi/acpica/exdump.c
-@@ -767,8 +767,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
- acpi_os_printf("\n");
- } else {
- acpi_os_printf(" base %8.8X%8.8X Length %X\n",
-- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
-- address),
-+ ACPI_FORMAT_UINT64(obj_desc->region.
-+ address),
- obj_desc->region.length);
- }
- break;
-diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
-index 49479927e7f7..725a3746a2df 100644
---- a/drivers/acpi/acpica/exfldio.c
-+++ b/drivers/acpi/acpica/exfldio.c
-@@ -263,17 +263,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
-- " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
-+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
- acpi_ut_get_region_name(rgn_desc->region.
- space_id),
- rgn_desc->region.space_id,
- obj_desc->common_field.access_byte_width,
- obj_desc->common_field.base_byte_offset,
-- field_datum_byte_offset, ACPI_CAST_PTR(void,
-- (rgn_desc->
-- region.
-- address +
-- region_offset))));
-+ field_datum_byte_offset,
-+ ACPI_FORMAT_UINT64(rgn_desc->region.address +
-+ region_offset)));
-
- /* Invoke the appropriate address_space/op_region handler */
-
-diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
-index 0fe188e238ef..b4bbf3150bc1 100644
---- a/drivers/acpi/acpica/exregion.c
-+++ b/drivers/acpi/acpica/exregion.c
-@@ -181,7 +181,7 @@ acpi_ex_system_memory_space_handler(u32 function,
- if (!mem_info->mapped_logical_address) {
- ACPI_ERROR((AE_INFO,
- "Could not map memory at 0x%8.8X%8.8X, size %u",
-- ACPI_FORMAT_NATIVE_UINT(address),
-+ ACPI_FORMAT_UINT64(address),
- (u32) map_length));
- mem_info->mapped_length = 0;
- return_ACPI_STATUS(AE_NO_MEMORY);
-@@ -202,8 +202,7 @@ acpi_ex_system_memory_space_handler(u32 function,
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
-- bit_width, function,
-- ACPI_FORMAT_NATIVE_UINT(address)));
-+ bit_width, function, ACPI_FORMAT_UINT64(address)));
-
- /*
- * Perform the memory read or write
-@@ -318,8 +317,7 @@ acpi_ex_system_io_space_handler(u32 function,
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
-- bit_width, function,
-- ACPI_FORMAT_NATIVE_UINT(address)));
-+ bit_width, function, ACPI_FORMAT_UINT64(address)));
-
- /* Decode the function parameter */
-
-diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
-index 2bd33fe56cb3..29033d71417b 100644
---- a/drivers/acpi/acpica/hwvalid.c
-+++ b/drivers/acpi/acpica/hwvalid.c
-@@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
- byte_width = ACPI_DIV_8(bit_width);
- last_address = address + byte_width - 1;
-
-- ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
-- ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
-- last_address),
-- byte_width));
-+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
-+ "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
-+ ACPI_FORMAT_UINT64(address),
-+ ACPI_FORMAT_UINT64(last_address), byte_width));
-
- /* Maximum 16-bit address in I/O space */
-
- if (last_address > ACPI_UINT16_MAX) {
- ACPI_ERROR((AE_INFO,
-- "Illegal I/O port address/length above 64K: %p/0x%X",
-- ACPI_CAST_PTR(void, address), byte_width));
-+ "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
-+ ACPI_FORMAT_UINT64(address), byte_width));
- return_ACPI_STATUS(AE_LIMIT);
- }
-
-@@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
-
- if (acpi_gbl_osi_data >= port_info->osi_dependency) {
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
-- "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
-- ACPI_CAST_PTR(void, address),
-+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
-+ ACPI_FORMAT_UINT64(address),
- byte_width, port_info->name,
- port_info->start,
- port_info->end));
-diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
-index 80f097eb7381..d259393505fa 100644
---- a/drivers/acpi/acpica/nsdump.c
-+++ b/drivers/acpi/acpica/nsdump.c
-@@ -271,12 +271,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
- switch (type) {
- case ACPI_TYPE_PROCESSOR:
-
-- acpi_os_printf("ID %02X Len %02X Addr %p\n",
-+ acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
- obj_desc->processor.proc_id,
- obj_desc->processor.length,
-- ACPI_CAST_PTR(void,
-- obj_desc->processor.
-- address));
-+ ACPI_FORMAT_UINT64(obj_desc->processor.
-+ address));
- break;
-
- case ACPI_TYPE_DEVICE:
-@@ -347,8 +346,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
- space_id));
- if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
- acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
-- ACPI_FORMAT_NATIVE_UINT
-- (obj_desc->region.address),
-+ ACPI_FORMAT_UINT64(obj_desc->
-+ region.
-+ address),
- obj_desc->region.length);
- } else {
- acpi_os_printf
-diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
-index 6a144957aadd..fd5998b2b46b 100644
---- a/drivers/acpi/acpica/tbdata.c
-+++ b/drivers/acpi/acpica/tbdata.c
-@@ -113,9 +113,9 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc,
- case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
- case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
-
-- table =
-- ACPI_CAST_PTR(struct acpi_table_header,
-- table_desc->address);
-+ table = ACPI_CAST_PTR(struct acpi_table_header,
-+ ACPI_PHYSADDR_TO_PTR(table_desc->
-+ address));
- break;
-
- default:
-@@ -214,7 +214,8 @@ acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
- case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
- case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
-
-- table_header = ACPI_CAST_PTR(struct acpi_table_header, address);
-+ table_header = ACPI_CAST_PTR(struct acpi_table_header,
-+ ACPI_PHYSADDR_TO_PTR(address));
- if (!table_header) {
- return (AE_NO_MEMORY);
- }
-@@ -398,14 +399,14 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
- table_desc->length);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
-- "%4.4s " ACPI_PRINTF_UINT
-+ "%4.4s 0x%8.8X%8.8X"
- " Attempted table install failed",
- acpi_ut_valid_acpi_name(table_desc->
- signature.
- ascii) ?
- table_desc->signature.ascii : "????",
-- ACPI_FORMAT_TO_UINT(table_desc->
-- address)));
-+ ACPI_FORMAT_UINT64(table_desc->
-+ address)));
- goto invalidate_and_exit;
- }
- }
-diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
-index 7fbc2b9dcbbb..7e69bc73bd16 100644
---- a/drivers/acpi/acpica/tbinstal.c
-+++ b/drivers/acpi/acpica/tbinstal.c
-@@ -187,8 +187,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
- status = acpi_tb_acquire_temp_table(&new_table_desc, address,
- ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
- if (ACPI_FAILURE(status)) {
-- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
-- ACPI_CAST_PTR(void, address)));
-+ ACPI_ERROR((AE_INFO,
-+ "Could not acquire table length at %8.8X%8.8X",
-+ ACPI_FORMAT_UINT64(address)));
- return_ACPI_STATUS(status);
- }
-
-@@ -246,8 +247,9 @@ acpi_tb_install_standard_table(acpi_physical_address address,
-
- status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
- if (ACPI_FAILURE(status)) {
-- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
-- ACPI_CAST_PTR(void, address)));
-+ ACPI_ERROR((AE_INFO,
-+ "Could not acquire table length at %8.8X%8.8X",
-+ ACPI_FORMAT_UINT64(address)));
- return_ACPI_STATUS(status);
- }
-
-@@ -258,9 +260,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
- if (!reload &&
- acpi_gbl_disable_ssdt_table_install &&
- ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
-- ACPI_INFO((AE_INFO, "Ignoring installation of %4.4s at %p",
-- new_table_desc.signature.ascii, ACPI_CAST_PTR(void,
-- address)));
-+ ACPI_INFO((AE_INFO,
-+ "Ignoring installation of %4.4s at %8.8X%8.8X",
-+ new_table_desc.signature.ascii,
-+ ACPI_FORMAT_UINT64(address)));
- goto release_and_exit;
- }
-
-@@ -428,11 +431,11 @@ finish_override:
- return;
- }
-
-- ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
-- " %s table override, new table: " ACPI_PRINTF_UINT,
-+ ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
-+ " %s table override, new table: 0x%8.8X%8.8X",
- old_table_desc->signature.ascii,
-- ACPI_FORMAT_TO_UINT(old_table_desc->address),
-- override_type, ACPI_FORMAT_TO_UINT(new_table_desc.address)));
-+ ACPI_FORMAT_UINT64(old_table_desc->address),
-+ override_type, ACPI_FORMAT_UINT64(new_table_desc.address)));
-
- /* We can now uninstall the original table */
-
-@@ -516,7 +519,7 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
-
- if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
- ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
-- ACPI_FREE(ACPI_CAST_PTR(void, table_desc->address));
-+ ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
- }
-
- table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
-diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
-index ef16c06e5091..77ba5c71c6e7 100644
---- a/drivers/acpi/acpica/tbprint.c
-+++ b/drivers/acpi/acpica/tbprint.c
-@@ -127,18 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
- {
- struct acpi_table_header local_header;
-
-- /*
-- * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
-- * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
-- * The %p specifier does not emit uniform output on all hosts. On some,
-- * leading zeros are not supported.
-- */
- if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
-
- /* FACS only has signature and length fields */
-
-- ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
-- header->signature, ACPI_FORMAT_TO_UINT(address),
-+ ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
-+ header->signature, ACPI_FORMAT_UINT64(address),
- header->length));
- } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
-
-@@ -149,9 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
- header)->oem_id, ACPI_OEM_ID_SIZE);
- acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
-
-- ACPI_INFO((AE_INFO,
-- "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
-- ACPI_FORMAT_TO_UINT(address),
-+ ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
-+ ACPI_FORMAT_UINT64(address),
- (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
- revision >
- 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
-@@ -165,9 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
- acpi_tb_cleanup_table_header(&local_header, header);
-
- ACPI_INFO((AE_INFO,
-- "%-4.4s " ACPI_PRINTF_UINT
-+ "%-4.4s 0x%8.8X%8.8X"
- " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
-- local_header.signature, ACPI_FORMAT_TO_UINT(address),
-+ local_header.signature, ACPI_FORMAT_UINT64(address),
- local_header.length, local_header.revision,
- local_header.oem_id, local_header.oem_table_id,
- local_header.oem_revision,
-diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
-index eac52cf14f1a..fa76a3603aa1 100644
---- a/drivers/acpi/acpica/tbxfroot.c
-+++ b/drivers/acpi/acpica/tbxfroot.c
-@@ -142,7 +142,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
- *
- ******************************************************************************/
-
--acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
-+acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
- {
- u8 *table_ptr;
- u8 *mem_rover;
-@@ -200,7 +200,8 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
- physical_address +=
- (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
-
-- *table_address = physical_address;
-+ *table_address =
-+ (acpi_physical_address) physical_address;
- return_ACPI_STATUS(AE_OK);
- }
- }
-@@ -233,7 +234,7 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
- (ACPI_HI_RSDP_WINDOW_BASE +
- ACPI_PTR_DIFF(mem_rover, table_ptr));
-
-- *table_address = physical_address;
-+ *table_address = (acpi_physical_address) physical_address;
- return_ACPI_STATUS(AE_OK);
- }
-
-diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
-index 1279f50da757..911ea8e7fe87 100644
---- a/drivers/acpi/acpica/utaddress.c
-+++ b/drivers/acpi/acpica/utaddress.c
-@@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
- acpi_gbl_address_range_list[space_id] = range_info;
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-- "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
-+ "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
- acpi_ut_get_node_name(range_info->region_node),
-- ACPI_CAST_PTR(void, address),
-- ACPI_CAST_PTR(void, range_info->end_address)));
-+ ACPI_FORMAT_UINT64(address),
-+ ACPI_FORMAT_UINT64(range_info->end_address)));
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(AE_OK);
-@@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-- "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
-+ "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
- acpi_ut_get_node_name(range_info->
- region_node),
-- ACPI_CAST_PTR(void,
-- range_info->
-- start_address),
-- ACPI_CAST_PTR(void,
-- range_info->
-- end_address)));
-+ ACPI_FORMAT_UINT64(range_info->
-+ start_address),
-+ ACPI_FORMAT_UINT64(range_info->
-+ end_address)));
-
- ACPI_FREE(range_info);
- return_VOID;
-@@ -245,16 +243,14 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
- region_node);
-
- ACPI_WARNING((AE_INFO,
-- "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)",
-+ "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)",
- acpi_ut_get_region_name(space_id),
-- ACPI_CAST_PTR(void, address),
-- ACPI_CAST_PTR(void, end_address),
-- ACPI_CAST_PTR(void,
-- range_info->
-- start_address),
-- ACPI_CAST_PTR(void,
-- range_info->
-- end_address),
-+ ACPI_FORMAT_UINT64(address),
-+ ACPI_FORMAT_UINT64(end_address),
-+ ACPI_FORMAT_UINT64(range_info->
-+ start_address),
-+ ACPI_FORMAT_UINT64(range_info->
-+ end_address),
- pathname));
- ACPI_FREE(pathname);
- }
-diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
-index 5589a6e2a023..8244f013f210 100644
---- a/drivers/acpi/resource.c
-+++ b/drivers/acpi/resource.c
-@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
- * @ares: Input ACPI resource object.
- * @types: Valid resource types of IORESOURCE_XXX
- *
-- * This is a hepler function to support acpi_dev_get_resources(), which filters
-+ * This is a helper function to support acpi_dev_get_resources(), which filters
- * ACPI resource objects according to resource types.
- */
- int acpi_dev_filter_resource_type(struct acpi_resource *ares,
-diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
-index 26e5b5060523..bf034f8b7c1a 100644
---- a/drivers/acpi/sbshc.c
-+++ b/drivers/acpi/sbshc.c
-@@ -14,6 +14,7 @@
- #include <linux/delay.h>
- #include <linux/module.h>
- #include <linux/interrupt.h>
-+#include <linux/dmi.h>
- #include "sbshc.h"
-
- #define PREFIX "ACPI: "
-@@ -87,6 +88,8 @@ enum acpi_smb_offset {
- ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
- };
-
-+static bool macbook;
-+
- static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
- {
- return ec_read(hc->offset + address, data);
-@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
- }
-
- mutex_lock(&hc->lock);
-+ if (macbook)
-+ udelay(5);
- if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
- goto end;
- if (temp) {
-@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
- acpi_handle handle, acpi_ec_query_func func,
- void *data);
-
-+static int macbook_dmi_match(const struct dmi_system_id *d)
-+{
-+ pr_debug("Detected MacBook, enabling workaround\n");
-+ macbook = true;
-+ return 0;
-+}
-+
-+static struct dmi_system_id acpi_smbus_dmi_table[] = {
-+ { macbook_dmi_match, "Apple MacBook", {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
-+ },
-+ { },
-+};
-+
- static int acpi_smbus_hc_add(struct acpi_device *device)
- {
- int status;
- unsigned long long val;
- struct acpi_smb_hc *hc;
-
-+ dmi_check_system(acpi_smbus_dmi_table);
-+
- if (!device)
- return -EINVAL;
-
-diff --git a/drivers/block/loop.c b/drivers/block/loop.c
-index d1f168b73634..773e964f14d9 100644
---- a/drivers/block/loop.c
-+++ b/drivers/block/loop.c
-@@ -1672,8 +1672,8 @@ out:
-
- static void loop_remove(struct loop_device *lo)
- {
-- del_gendisk(lo->lo_disk);
- blk_cleanup_queue(lo->lo_queue);
-+ del_gendisk(lo->lo_disk);
- blk_mq_free_tag_set(&lo->tag_set);
- put_disk(lo->lo_disk);
- kfree(lo);
-diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
-index 7722ed53bd65..af3bc7a8033b 100644
---- a/drivers/gpio/gpiolib-sysfs.c
-+++ b/drivers/gpio/gpiolib-sysfs.c
-@@ -551,6 +551,7 @@ static struct class gpio_class = {
- */
- int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
- {
-+ struct gpio_chip *chip;
- unsigned long flags;
- int status;
- const char *ioname = NULL;
-@@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
- return -EINVAL;
- }
-
-+ chip = desc->chip;
-+
- mutex_lock(&sysfs_lock);
-
-+ /* check if chip is being removed */
-+ if (!chip || !chip->exported) {
-+ status = -ENODEV;
-+ goto fail_unlock;
-+ }
-+
- spin_lock_irqsave(&gpio_lock, flags);
- if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
- test_bit(FLAG_EXPORT, &desc->flags)) {
-@@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip)
- {
- int status;
- struct device *dev;
-+ struct gpio_desc *desc;
-+ unsigned int i;
-
- mutex_lock(&sysfs_lock);
- dev = class_find_device(&gpio_class, NULL, chip, match_export);
- if (dev) {
- put_device(dev);
- device_unregister(dev);
-+ /* prevent further gpiod exports */
- chip->exported = false;
- status = 0;
- } else
-@@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip)
-
- if (status)
- chip_dbg(chip, "%s: status %d\n", __func__, status);
-+
-+ /* unregister gpiod class devices owned by sysfs */
-+ for (i = 0; i < chip->ngpio; i++) {
-+ desc = &chip->desc[i];
-+ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
-+ gpiod_free(desc);
-+ }
- }
-
- static int __init gpiolib_sysfs_init(void)
-diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
-index d8135adb2238..39762a7d2ec7 100644
---- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
-+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
-@@ -429,9 +429,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
-
- BUG_ON(!dqm || !qpd);
-
-- BUG_ON(!list_empty(&qpd->queues_list));
-+ pr_debug("In func %s\n", __func__);
-
-- pr_debug("kfd: In func %s\n", __func__);
-+ pr_debug("qpd->queues_list is %s\n",
-+ list_empty(&qpd->queues_list) ? "empty" : "not empty");
-
- retval = 0;
- mutex_lock(&dqm->lock);
-@@ -878,6 +879,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
- return -ENOMEM;
- }
-
-+ init_sdma_vm(dqm, q, qpd);
-+
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
- &q->gart_mqd_addr, &q->properties);
- if (retval != 0)
-diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
-index 10574a0c3a55..5769db4f51f3 100644
---- a/drivers/gpu/drm/drm_irq.c
-+++ b/drivers/gpu/drm/drm_irq.c
-@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
-
- /* Reinitialize corresponding vblank timestamp if high-precision query
- * available. Skip this step if query unsupported or failed. Will
-- * reinitialize delayed at next vblank interrupt in that case.
-+ * reinitialize delayed at next vblank interrupt in that case and
-+ * assign 0 for now, to mark the vblanktimestamp as invalid.
- */
-- if (rc) {
-- tslot = atomic_read(&vblank->count) + diff;
-- vblanktimestamp(dev, crtc, tslot) = t_vblank;
-- }
-+ tslot = atomic_read(&vblank->count) + diff;
-+ vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
-
- smp_mb__before_atomic();
- atomic_add(diff, &vblank->count);
-diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
-index a74aaf9242b9..88b36a9173c9 100644
---- a/drivers/gpu/drm/i915/intel_dp.c
-+++ b/drivers/gpu/drm/i915/intel_dp.c
-@@ -1176,7 +1176,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
-
- pipe_config->has_dp_encoder = true;
- pipe_config->has_drrs = false;
-- pipe_config->has_audio = intel_dp->has_audio;
-+ pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
-
- if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
- intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
-@@ -2026,8 +2026,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
- int dotclock;
-
- tmp = I915_READ(intel_dp->output_reg);
-- if (tmp & DP_AUDIO_OUTPUT_ENABLE)
-- pipe_config->has_audio = true;
-+
-+ pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
-
- if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
- if (tmp & DP_SYNC_HS_HIGH)
-diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
-index 071b96d6e146..fbc2a83795fa 100644
---- a/drivers/gpu/drm/i915/intel_lvds.c
-+++ b/drivers/gpu/drm/i915/intel_lvds.c
-@@ -812,12 +812,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
- static const struct dmi_system_id intel_dual_link_lvds[] = {
- {
- .callback = intel_dual_link_lvds_callback,
-- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
-+ .ident = "Apple MacBook Pro 15\" (2010)",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
-+ },
-+ },
-+ {
-+ .callback = intel_dual_link_lvds_callback,
-+ .ident = "Apple MacBook Pro 15\" (2011)",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
- },
- },
-+ {
-+ .callback = intel_dual_link_lvds_callback,
-+ .ident = "Apple MacBook Pro 15\" (2012)",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
-+ },
-+ },
- { } /* terminating entry */
- };
-
-@@ -847,6 +863,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
- if (i915.lvds_channel_mode > 0)
- return i915.lvds_channel_mode == 2;
-
-+ /* single channel LVDS is limited to 112 MHz */
-+ if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
-+ > 112999)
-+ return true;
-+
- if (dmi_check_system(intel_dual_link_lvds))
- return true;
-
-@@ -1104,6 +1125,8 @@ void intel_lvds_init(struct drm_device *dev)
- out:
- mutex_unlock(&dev->mode_config.mutex);
-
-+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
-+
- lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
- lvds_encoder->is_dual_link ? "dual" : "single");
-@@ -1118,7 +1141,6 @@ out:
- }
- drm_connector_register(connector);
-
-- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_panel_setup_backlight(connector, INVALID_PIPE);
-
- return;
-diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
-index c0ecd128b14b..7348f222684d 100644
---- a/drivers/gpu/drm/radeon/radeon_asic.c
-+++ b/drivers/gpu/drm/radeon/radeon_asic.c
-@@ -1180,7 +1180,7 @@ static struct radeon_asic rs780_asic = {
- static struct radeon_asic_ring rv770_uvd_ring = {
- .ib_execute = &uvd_v1_0_ib_execute,
- .emit_fence = &uvd_v2_2_fence_emit,
-- .emit_semaphore = &uvd_v1_0_semaphore_emit,
-+ .emit_semaphore = &uvd_v2_2_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &uvd_v1_0_ring_test,
- .ib_test = &uvd_v1_0_ib_test,
-diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
-index 72bdd3bf0d8e..c2fd3a5e6c55 100644
---- a/drivers/gpu/drm/radeon/radeon_asic.h
-+++ b/drivers/gpu/drm/radeon/radeon_asic.h
-@@ -919,6 +919,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
- int uvd_v2_2_resume(struct radeon_device *rdev);
- void uvd_v2_2_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
-+ struct radeon_ring *ring,
-+ struct radeon_semaphore *semaphore,
-+ bool emit_wait);
-
- /* uvd v3.1 */
- bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
-diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
-index b7d33a13db9f..b7c6bb69f3c7 100644
---- a/drivers/gpu/drm/radeon/radeon_audio.c
-+++ b/drivers/gpu/drm/radeon/radeon_audio.c
-@@ -464,6 +464,10 @@ void radeon_audio_detect(struct drm_connector *connector,
- return;
-
- rdev = connector->encoder->dev->dev_private;
-+
-+ if (!radeon_audio_chipset_supported(rdev))
-+ return;
-+
- radeon_encoder = to_radeon_encoder(connector->encoder);
- dig = radeon_encoder->enc_priv;
-
-diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
-index b292aca0f342..edafd3c2b170 100644
---- a/drivers/gpu/drm/radeon/radeon_ttm.c
-+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
-@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
- {
- struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
- struct radeon_ttm_tt *gtt = (void *)ttm;
-- struct scatterlist *sg;
-- int i;
-+ struct sg_page_iter sg_iter;
-
- int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
- enum dma_data_direction direction = write ?
-@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
- /* free the sg table and pages again */
- dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-
-- for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
-- struct page *page = sg_page(sg);
--
-+ for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
-+ struct page *page = sg_page_iter_page(&sg_iter);
- if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
- set_page_dirty(page);
-
-diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
-index c10b2aec6450..cd630287cf0a 100644
---- a/drivers/gpu/drm/radeon/radeon_uvd.c
-+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
-@@ -396,6 +396,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
- return 0;
- }
-
-+static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
-+ unsigned stream_type)
-+{
-+ switch (stream_type) {
-+ case 0: /* H264 */
-+ case 1: /* VC1 */
-+ /* always supported */
-+ return 0;
-+
-+ case 3: /* MPEG2 */
-+ case 4: /* MPEG4 */
-+ /* only since UVD 3 */
-+ if (p->rdev->family >= CHIP_PALM)
-+ return 0;
-+
-+ /* fall through */
-+ default:
-+ DRM_ERROR("UVD codec not supported by hardware %d!\n",
-+ stream_type);
-+ return -EINVAL;
-+ }
-+}
-+
- static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
- unsigned offset, unsigned buf_sizes[])
- {
-@@ -436,50 +459,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
- return -EINVAL;
- }
-
-- if (msg_type == 1) {
-- /* it's a decode msg, calc buffer sizes */
-- r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
-- /* calc image size (width * height) */
-- img_size = msg[6] * msg[7];
-+ switch (msg_type) {
-+ case 0:
-+ /* it's a create msg, calc image size (width * height) */
-+ img_size = msg[7] * msg[8];
-+
-+ r = radeon_uvd_validate_codec(p, msg[4]);
-+ radeon_bo_kunmap(bo);
-+ if (r)
-+ return r;
-+
-+ /* try to alloc a new handle */
-+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
-+ DRM_ERROR("Handle 0x%x already in use!\n", handle);
-+ return -EINVAL;
-+ }
-+
-+ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
-+ p->rdev->uvd.filp[i] = p->filp;
-+ p->rdev->uvd.img_size[i] = img_size;
-+ return 0;
-+ }
-+ }
-+
-+ DRM_ERROR("No more free UVD handles!\n");
-+ return -EINVAL;
-+
-+ case 1:
-+ /* it's a decode msg, validate codec and calc buffer sizes */
-+ r = radeon_uvd_validate_codec(p, msg[4]);
-+ if (!r)
-+ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
- radeon_bo_kunmap(bo);
- if (r)
- return r;
-
-- } else if (msg_type == 2) {
-+ /* validate the handle */
-+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
-+ if (p->rdev->uvd.filp[i] != p->filp) {
-+ DRM_ERROR("UVD handle collision detected!\n");
-+ return -EINVAL;
-+ }
-+ return 0;
-+ }
-+ }
-+
-+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
-+ return -ENOENT;
-+
-+ case 2:
- /* it's a destroy msg, free the handle */
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
- atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
- radeon_bo_kunmap(bo);
- return 0;
-- } else {
-- /* it's a create msg, calc image size (width * height) */
-- img_size = msg[7] * msg[8];
-- radeon_bo_kunmap(bo);
-
-- if (msg_type != 0) {
-- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
-- return -EINVAL;
-- }
--
-- /* it's a create msg, no special handling needed */
-- }
--
-- /* create or decode, validate the handle */
-- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-- if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
-- return 0;
-- }
-+ default:
-
-- /* handle not found try to alloc a new one */
-- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-- if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
-- p->rdev->uvd.filp[i] = p->filp;
-- p->rdev->uvd.img_size[i] = img_size;
-- return 0;
-- }
-+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
-+ return -EINVAL;
- }
-
-- DRM_ERROR("No more free UVD handles!\n");
-+ BUG();
- return -EINVAL;
- }
-
-diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
-index 976fe432f4e2..7ed561225007 100644
---- a/drivers/gpu/drm/radeon/radeon_vce.c
-+++ b/drivers/gpu/drm/radeon/radeon_vce.c
-@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
- *
- * @p: parser context
- * @handle: handle to validate
-+ * @allocated: allocated a new handle?
- *
- * Validates the handle and return the found session index or -EINVAL
- * we we don't have another free session index.
- */
--int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
-+static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
-+ uint32_t handle, bool *allocated)
- {
- unsigned i;
-
-+ *allocated = false;
-+
- /* validate the handle */
- for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-- if (atomic_read(&p->rdev->vce.handles[i]) == handle)
-+ if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
-+ if (p->rdev->vce.filp[i] != p->filp) {
-+ DRM_ERROR("VCE handle collision detected!\n");
-+ return -EINVAL;
-+ }
- return i;
-+ }
- }
-
- /* handle not found try to alloc a new one */
-@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
- if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
- p->rdev->vce.filp[i] = p->filp;
- p->rdev->vce.img_size[i] = 0;
-+ *allocated = true;
- return i;
- }
- }
-@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
- int radeon_vce_cs_parse(struct radeon_cs_parser *p)
- {
- int session_idx = -1;
-- bool destroyed = false;
-+ bool destroyed = false, created = false, allocated = false;
- uint32_t tmp, handle = 0;
- uint32_t *size = &tmp;
-- int i, r;
-+ int i, r = 0;
-
- while (p->idx < p->chunk_ib->length_dw) {
- uint32_t len = radeon_get_ib_value(p, p->idx);
-@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
-
- if ((len < 8) || (len & 3)) {
- DRM_ERROR("invalid VCE command length (%d)!\n", len);
-- return -EINVAL;
-+ r = -EINVAL;
-+ goto out;
- }
-
- if (destroyed) {
- DRM_ERROR("No other command allowed after destroy!\n");
-- return -EINVAL;
-+ r = -EINVAL;
-+ goto out;
- }
-
- switch (cmd) {
- case 0x00000001: // session
- handle = radeon_get_ib_value(p, p->idx + 2);
-- session_idx = radeon_vce_validate_handle(p, handle);
-+ session_idx = radeon_vce_validate_handle(p, handle,
-+ &allocated);
- if (session_idx < 0)
- return session_idx;
- size = &p->rdev->vce.img_size[session_idx];
-@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
- break;
-
- case 0x01000001: // create
-+ created = true;
-+ if (!allocated) {
-+ DRM_ERROR("Handle already in use!\n");
-+ r = -EINVAL;
-+ goto out;
-+ }
-+
- *size = radeon_get_ib_value(p, p->idx + 8) *
- radeon_get_ib_value(p, p->idx + 10) *
- 8 * 3 / 2;
-@@ -577,12 +597,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
- r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
- *size);
- if (r)
-- return r;
-+ goto out;
-
- r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
- *size / 3);
- if (r)
-- return r;
-+ goto out;
- break;
-
- case 0x02000001: // destroy
-@@ -593,7 +613,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
- r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
- *size * 2);
- if (r)
-- return r;
-+ goto out;
- break;
-
- case 0x05000004: // video bitstream buffer
-@@ -601,36 +621,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
- r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
- tmp);
- if (r)
-- return r;
-+ goto out;
- break;
-
- case 0x05000005: // feedback buffer
- r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
- 4096);
- if (r)
-- return r;
-+ goto out;
- break;
-
- default:
- DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
-- return -EINVAL;
-+ r = -EINVAL;
-+ goto out;
- }
-
- if (session_idx == -1) {
- DRM_ERROR("no session command at start of IB\n");
-- return -EINVAL;
-+ r = -EINVAL;
-+ goto out;
- }
-
- p->idx += len / 4;
- }
-
-- if (destroyed) {
-- /* IB contains a destroy msg, free the handle */
-+ if (allocated && !created) {
-+ DRM_ERROR("New session without create command!\n");
-+ r = -ENOENT;
-+ }
-+
-+out:
-+ if ((!r && destroyed) || (r && allocated)) {
-+ /*
-+ * IB contains a destroy msg or we have allocated an
-+ * handle and got an error, anyway free the handle
-+ */
- for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
- atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
- }
-
-- return 0;
-+ return r;
- }
-
- /**
-diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
-index 3cf1e2921545..9ef2064b1c9c 100644
---- a/drivers/gpu/drm/radeon/rv770d.h
-+++ b/drivers/gpu/drm/radeon/rv770d.h
-@@ -989,6 +989,9 @@
- ((n) & 0x3FFF) << 16)
-
- /* UVD */
-+#define UVD_SEMA_ADDR_LOW 0xef00
-+#define UVD_SEMA_ADDR_HIGH 0xef04
-+#define UVD_SEMA_CMD 0xef08
- #define UVD_GPCOM_VCPU_CMD 0xef0c
- #define UVD_GPCOM_VCPU_DATA0 0xef10
- #define UVD_GPCOM_VCPU_DATA1 0xef14
-diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
-index e72b3cb59358..c6b1cbca47fc 100644
---- a/drivers/gpu/drm/radeon/uvd_v1_0.c
-+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
-@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
- {
-- uint64_t addr = semaphore->gpu_addr;
--
-- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
-- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
--
-- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
-- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
--
-- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
-- radeon_ring_write(ring, emit_wait ? 1 : 0);
--
-- return true;
-+ /* disable semaphores for UVD V1 hardware */
-+ return false;
- }
-
- /**
-diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
-index 89193519f8a1..7ed778cec7c6 100644
---- a/drivers/gpu/drm/radeon/uvd_v2_2.c
-+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
-@@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
- }
-
- /**
-+ * uvd_v2_2_semaphore_emit - emit semaphore command
-+ *
-+ * @rdev: radeon_device pointer
-+ * @ring: radeon_ring pointer
-+ * @semaphore: semaphore to emit commands for
-+ * @emit_wait: true if we should emit a wait command
-+ *
-+ * Emit a semaphore command (either wait or signal) to the UVD ring.
-+ */
-+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
-+ struct radeon_ring *ring,
-+ struct radeon_semaphore *semaphore,
-+ bool emit_wait)
-+{
-+ uint64_t addr = semaphore->gpu_addr;
-+
-+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
-+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-+
-+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
-+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-+
-+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
-+ radeon_ring_write(ring, emit_wait ? 1 : 0);
-+
-+ return true;
-+}
-+
-+/**
- * uvd_v2_2_resume - memory controller programming
- *
- * @rdev: radeon_device pointer
-diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
-index d570030d899c..06441a43c3aa 100644
---- a/drivers/infiniband/core/cma.c
-+++ b/drivers/infiniband/core/cma.c
-@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
- memcpy(&ib->sib_addr, &path->dgid, 16);
- }
-
-+static __be16 ss_get_port(const struct sockaddr_storage *ss)
-+{
-+ if (ss->ss_family == AF_INET)
-+ return ((struct sockaddr_in *)ss)->sin_port;
-+ else if (ss->ss_family == AF_INET6)
-+ return ((struct sockaddr_in6 *)ss)->sin6_port;
-+ BUG();
-+}
-+
- static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
- struct cma_hdr *hdr)
- {
-- struct sockaddr_in *listen4, *ip4;
-+ struct sockaddr_in *ip4;
-
-- listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
- ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
-- ip4->sin_family = listen4->sin_family;
-+ ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
-- ip4->sin_port = listen4->sin_port;
-+ ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
-
- ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
-- ip4->sin_family = listen4->sin_family;
-+ ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
- ip4->sin_port = hdr->port;
- }
-@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
- static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
- struct cma_hdr *hdr)
- {
-- struct sockaddr_in6 *listen6, *ip6;
-+ struct sockaddr_in6 *ip6;
-
-- listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
- ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
-- ip6->sin6_family = listen6->sin6_family;
-+ ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->dst_addr.ip6;
-- ip6->sin6_port = listen6->sin6_port;
-+ ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
-
- ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
-- ip6->sin6_family = listen6->sin6_family;
-+ ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->src_addr.ip6;
- ip6->sin6_port = hdr->port;
- }
-diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
-index 414739295d04..713a96237a80 100644
---- a/drivers/md/dm-crypt.c
-+++ b/drivers/md/dm-crypt.c
-@@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc,
-
- switch (r) {
- /* async */
-- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&ctx->restart);
- reinit_completion(&ctx->restart);
-+ /* fall through*/
-+ case -EINPROGRESS:
- ctx->req = NULL;
- ctx->cc_sector++;
- continue;
-@@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
- struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
- struct crypt_config *cc = io->cc;
-
-- if (error == -EINPROGRESS)
-+ if (error == -EINPROGRESS) {
-+ complete(&ctx->restart);
- return;
-+ }
-
- if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
- error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
-@@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
- crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
-
- if (!atomic_dec_and_test(&ctx->cc_pending))
-- goto done;
-+ return;
-
- if (bio_data_dir(io->base_bio) == READ)
- kcryptd_crypt_read_done(io);
- else
- kcryptd_crypt_write_io_submit(io, 1);
--done:
-- if (!completion_done(&ctx->restart))
-- complete(&ctx->restart);
- }
-
- static void kcryptd_crypt(struct work_struct *work)
-diff --git a/drivers/md/md.c b/drivers/md/md.c
-index e6178787ce3d..e47d1dd046da 100644
---- a/drivers/md/md.c
-+++ b/drivers/md/md.c
-@@ -4754,12 +4754,12 @@ static void md_free(struct kobject *ko)
- if (mddev->sysfs_state)
- sysfs_put(mddev->sysfs_state);
-
-+ if (mddev->queue)
-+ blk_cleanup_queue(mddev->queue);
- if (mddev->gendisk) {
- del_gendisk(mddev->gendisk);
- put_disk(mddev->gendisk);
- }
-- if (mddev->queue)
-- blk_cleanup_queue(mddev->queue);
-
- kfree(mddev);
- }
-diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
-index dd5b1415f974..f902eb4ee569 100644
---- a/drivers/media/platform/marvell-ccic/mcam-core.c
-+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
-@@ -116,8 +116,8 @@ static struct mcam_format_struct {
- .planar = false,
- },
- {
-- .desc = "UYVY 4:2:2",
-- .pixelformat = V4L2_PIX_FMT_UYVY,
-+ .desc = "YVYU 4:2:2",
-+ .pixelformat = V4L2_PIX_FMT_YVYU,
- .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
- .bpp = 2,
- .planar = false,
-@@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
-
- switch (fmt->pixelformat) {
- case V4L2_PIX_FMT_YUYV:
-- case V4L2_PIX_FMT_UYVY:
-+ case V4L2_PIX_FMT_YVYU:
- widthy = fmt->width * 2;
- widthuv = 0;
- break;
-@@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- mcam_reg_write_mask(cam, REG_CTRL0,
-- C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
-+ C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
- break;
- case V4L2_PIX_FMT_YUYV:
- mcam_reg_write_mask(cam, REG_CTRL0,
-- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
-+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
- break;
-- case V4L2_PIX_FMT_UYVY:
-+ case V4L2_PIX_FMT_YVYU:
- mcam_reg_write_mask(cam, REG_CTRL0,
-- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
-+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
- break;
- case V4L2_PIX_FMT_JPEG:
- mcam_reg_write_mask(cam, REG_CTRL0,
-diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
-index aa0c6eac254a..7ffdf4dbaf8c 100644
---- a/drivers/media/platform/marvell-ccic/mcam-core.h
-+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
-@@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam);
- #define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
- #define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
- #define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
--#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
--#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
--#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
--#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
-+#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */
-+#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */
-+#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */
-+#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */
- /* Bayer bits 18,19 if needed */
- #define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
- #define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
-diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
-index c69afb5e264e..ed2e71a74a58 100644
---- a/drivers/mmc/card/block.c
-+++ b/drivers/mmc/card/block.c
-@@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
- md->reset_done &= ~type;
- }
-
-+int mmc_access_rpmb(struct mmc_queue *mq)
-+{
-+ struct mmc_blk_data *md = mq->data;
-+ /*
-+ * If this is a RPMB partition access, return ture
-+ */
-+ if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
-+ return true;
-+
-+ return false;
-+}
-+
- static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
- {
- struct mmc_blk_data *md = mq->data;
-diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
-index 236d194c2883..8efa3684aef8 100644
---- a/drivers/mmc/card/queue.c
-+++ b/drivers/mmc/card/queue.c
-@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
- return BLKPREP_KILL;
- }
-
-- if (mq && mmc_card_removed(mq->card))
-+ if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
- return BLKPREP_KILL;
-
- req->cmd_flags |= REQ_DONTPREP;
-diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
-index 5752d50049a3..99e6521e6169 100644
---- a/drivers/mmc/card/queue.h
-+++ b/drivers/mmc/card/queue.h
-@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
- extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
- extern void mmc_packed_clean(struct mmc_queue *);
-
-+extern int mmc_access_rpmb(struct mmc_queue *);
-+
- #endif
-diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
-index 23f10f72e5f3..57a8d00672d3 100644
---- a/drivers/mmc/core/core.c
-+++ b/drivers/mmc/core/core.c
-@@ -2648,6 +2648,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
- switch (mode) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
-+ case PM_RESTORE_PREPARE:
- spin_lock_irqsave(&host->lock, flags);
- host->rescan_disable = 1;
- spin_unlock_irqrestore(&host->lock, flags);
-diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
-index 7d9d6a321521..5165ae75d540 100644
---- a/drivers/mmc/host/sh_mmcif.c
-+++ b/drivers/mmc/host/sh_mmcif.c
-@@ -1402,7 +1402,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
- host = mmc_priv(mmc);
- host->mmc = mmc;
- host->addr = reg;
-- host->timeout = msecs_to_jiffies(1000);
-+ host->timeout = msecs_to_jiffies(10000);
- host->ccs_enable = !pd || !pd->ccs_unsupported;
- host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
-
-diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
-index 89dca77ca038..18ee2089df4a 100644
---- a/drivers/pinctrl/core.c
-+++ b/drivers/pinctrl/core.c
-@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
- EXPORT_SYMBOL_GPL(devm_pinctrl_put);
-
- int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
-- bool dup, bool locked)
-+ bool dup)
- {
- int i, ret;
- struct pinctrl_maps *maps_node;
-@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
- maps_node->maps = maps;
- }
-
-- if (!locked)
-- mutex_lock(&pinctrl_maps_mutex);
-+ mutex_lock(&pinctrl_maps_mutex);
- list_add_tail(&maps_node->node, &pinctrl_maps);
-- if (!locked)
-- mutex_unlock(&pinctrl_maps_mutex);
-+ mutex_unlock(&pinctrl_maps_mutex);
-
- return 0;
- }
-@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
- int pinctrl_register_mappings(struct pinctrl_map const *maps,
- unsigned num_maps)
- {
-- return pinctrl_register_map(maps, num_maps, true, false);
-+ return pinctrl_register_map(maps, num_maps, true);
- }
-
- void pinctrl_unregister_map(struct pinctrl_map const *map)
-diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
-index 75476b3d87da..b24ea846c867 100644
---- a/drivers/pinctrl/core.h
-+++ b/drivers/pinctrl/core.h
-@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
- }
-
- int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
-- bool dup, bool locked);
-+ bool dup);
- void pinctrl_unregister_map(struct pinctrl_map const *map);
-
- extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
-diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
-index eda13de2e7c0..0bbf7d71b281 100644
---- a/drivers/pinctrl/devicetree.c
-+++ b/drivers/pinctrl/devicetree.c
-@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
- dt_map->num_maps = num_maps;
- list_add_tail(&dt_map->node, &p->dt_maps);
-
-- return pinctrl_register_map(map, num_maps, false, true);
-+ return pinctrl_register_map(map, num_maps, false);
- }
-
- struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
-diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
-index 43e04af39e09..cb70ced7e0db 100644
---- a/drivers/rtc/rtc-armada38x.c
-+++ b/drivers/rtc/rtc-armada38x.c
-@@ -40,6 +40,13 @@ struct armada38x_rtc {
- void __iomem *regs;
- void __iomem *regs_soc;
- spinlock_t lock;
-+ /*
-+ * While setting the time, the RTC TIME register should not be
-+ * accessed. Setting the RTC time involves sleeping during
-+ * 100ms, so a mutex instead of a spinlock is used to protect
-+ * it
-+ */
-+ struct mutex mutex_time;
- int irq;
- };
-
-@@ -59,8 +66,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
- struct armada38x_rtc *rtc = dev_get_drvdata(dev);
- unsigned long time, time_check, flags;
-
-- spin_lock_irqsave(&rtc->lock, flags);
--
-+ mutex_lock(&rtc->mutex_time);
- time = readl(rtc->regs + RTC_TIME);
- /*
- * WA for failing time set attempts. As stated in HW ERRATA if
-@@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
- if ((time_check - time) > 1)
- time_check = readl(rtc->regs + RTC_TIME);
-
-- spin_unlock_irqrestore(&rtc->lock, flags);
-+ mutex_unlock(&rtc->mutex_time);
-
- rtc_time_to_tm(time_check, tm);
-
-@@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
- * then wait for 100ms before writing to the time register to be
- * sure that the data will be taken into account.
- */
-- spin_lock_irqsave(&rtc->lock, flags);
--
-+ mutex_lock(&rtc->mutex_time);
- rtc_delayed_write(0, rtc, RTC_STATUS);
--
-- spin_unlock_irqrestore(&rtc->lock, flags);
--
- msleep(100);
--
-- spin_lock_irqsave(&rtc->lock, flags);
--
- rtc_delayed_write(time, rtc, RTC_TIME);
-+ mutex_unlock(&rtc->mutex_time);
-
-- spin_unlock_irqrestore(&rtc->lock, flags);
- out:
- return ret;
- }
-@@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
- return -ENOMEM;
-
- spin_lock_init(&rtc->lock);
-+ mutex_init(&rtc->mutex_time);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
- rtc->regs = devm_ioremap_resource(&pdev->dev, res);
-diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
-index f1e57425e39f..5bab1c684bb1 100644
---- a/drivers/tty/hvc/hvc_xen.c
-+++ b/drivers/tty/hvc/hvc_xen.c
-@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
- return 0;
- }
-
-+static void xen_console_update_evtchn(struct xencons_info *info)
-+{
-+ if (xen_hvm_domain()) {
-+ uint64_t v;
-+ int err;
-+
-+ err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
-+ if (!err && v)
-+ info->evtchn = v;
-+ } else
-+ info->evtchn = xen_start_info->console.domU.evtchn;
-+}
-+
- void xen_console_resume(void)
- {
- struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
-- if (info != NULL && info->irq)
-+ if (info != NULL && info->irq) {
-+ if (!xen_initial_domain())
-+ xen_console_update_evtchn(info);
- rebind_evtchn_irq(info->evtchn, info->irq);
-+ }
- }
-
- static void xencons_disconnect_backend(struct xencons_info *info)
-diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
-index 4cde85501444..837d1778970b 100644
---- a/drivers/vfio/vfio.c
-+++ b/drivers/vfio/vfio.c
-@@ -711,6 +711,8 @@ void *vfio_del_group_dev(struct device *dev)
- void *device_data = device->device_data;
- struct vfio_unbound_dev *unbound;
- unsigned int i = 0;
-+ long ret;
-+ bool interrupted = false;
-
- /*
- * The group exists so long as we have a device reference. Get
-@@ -756,9 +758,22 @@ void *vfio_del_group_dev(struct device *dev)
-
- vfio_device_put(device);
-
-- } while (wait_event_interruptible_timeout(vfio.release_q,
-- !vfio_dev_present(group, dev),
-- HZ * 10) <= 0);
-+ if (interrupted) {
-+ ret = wait_event_timeout(vfio.release_q,
-+ !vfio_dev_present(group, dev), HZ * 10);
-+ } else {
-+ ret = wait_event_interruptible_timeout(vfio.release_q,
-+ !vfio_dev_present(group, dev), HZ * 10);
-+ if (ret == -ERESTARTSYS) {
-+ interrupted = true;
-+ dev_warn(dev,
-+ "Device is currently in use, task"
-+ " \"%s\" (%d) "
-+ "blocked until device is released",
-+ current->comm, task_pid_nr(current));
-+ }
-+ }
-+ } while (ret <= 0);
-
- vfio_group_put(group);
-
-diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
-index 5db43fc100a4..7dd46312c180 100644
---- a/drivers/xen/events/events_2l.c
-+++ b/drivers/xen/events/events_2l.c
-@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
- return IRQ_HANDLED;
- }
-
-+static void evtchn_2l_resume(void)
-+{
-+ int i;
-+
-+ for_each_online_cpu(i)
-+ memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
-+ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
-+}
-+
- static const struct evtchn_ops evtchn_ops_2l = {
- .max_channels = evtchn_2l_max_channels,
- .nr_channels = evtchn_2l_max_channels,
-@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
- .mask = evtchn_2l_mask,
- .unmask = evtchn_2l_unmask,
- .handle_events = evtchn_2l_handle_events,
-+ .resume = evtchn_2l_resume,
- };
-
- void __init xen_evtchn_2l_init(void)
-diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
-index 70fba973a107..2b8553bd8715 100644
---- a/drivers/xen/events/events_base.c
-+++ b/drivers/xen/events/events_base.c
-@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq)
- if (rc)
- goto err;
-
-- bind_evtchn_to_cpu(evtchn, 0);
- info->evtchn = evtchn;
-+ bind_evtchn_to_cpu(evtchn, 0);
-
- rc = xen_evtchn_port_setup(info);
- if (rc)
-@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
-
- mutex_unlock(&irq_mapping_update_lock);
-
-- /* new event channels are always bound to cpu 0 */
-- irq_set_affinity(irq, cpumask_of(0));
-+ bind_evtchn_to_cpu(evtchn, info->cpu);
-+ /* This will be deferred until interrupt is processed */
-+ irq_set_affinity(irq, cpumask_of(info->cpu));
-
- /* Unmask the event channel. */
- enable_irq(irq);
-diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
-index 75fe3d466515..9c234209d8b5 100644
---- a/drivers/xen/xen-pciback/conf_space.c
-+++ b/drivers/xen/xen-pciback/conf_space.c
-@@ -16,8 +16,8 @@
- #include "conf_space.h"
- #include "conf_space_quirks.h"
-
--bool permissive;
--module_param(permissive, bool, 0644);
-+bool xen_pcibk_permissive;
-+module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
-
- /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
- * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
-@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
- * This means that some fields may still be read-only because
- * they have entries in the config_field list that intercept
- * the write and do nothing. */
-- if (dev_data->permissive || permissive) {
-+ if (dev_data->permissive || xen_pcibk_permissive) {
- switch (size) {
- case 1:
- err = pci_write_config_byte(dev, offset,
-diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
-index 2e1d73d1d5d0..62461a8ba1d6 100644
---- a/drivers/xen/xen-pciback/conf_space.h
-+++ b/drivers/xen/xen-pciback/conf_space.h
-@@ -64,7 +64,7 @@ struct config_field_entry {
- void *data;
- };
-
--extern bool permissive;
-+extern bool xen_pcibk_permissive;
-
- #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
-
-diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
-index 2d7369391472..f8baf463dd35 100644
---- a/drivers/xen/xen-pciback/conf_space_header.c
-+++ b/drivers/xen/xen-pciback/conf_space_header.c
-@@ -105,7 +105,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
-
- cmd->val = value;
-
-- if (!permissive && (!dev_data || !dev_data->permissive))
-+ if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
- return 0;
-
- /* Only allow the guest to control certain bits. */
-diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
-index 564b31584860..5390a674b5e3 100644
---- a/drivers/xen/xenbus/xenbus_probe.c
-+++ b/drivers/xen/xenbus/xenbus_probe.c
-@@ -57,6 +57,7 @@
- #include <xen/xen.h>
- #include <xen/xenbus.h>
- #include <xen/events.h>
-+#include <xen/xen-ops.h>
- #include <xen/page.h>
-
- #include <xen/hvm.h>
-@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void)
- return err;
- }
-
-+static int xenbus_resume_cb(struct notifier_block *nb,
-+ unsigned long action, void *data)
-+{
-+ int err = 0;
-+
-+ if (xen_hvm_domain()) {
-+ uint64_t v;
-+
-+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
-+ if (!err && v)
-+ xen_store_evtchn = v;
-+ else
-+ pr_warn("Cannot update xenstore event channel: %d\n",
-+ err);
-+ } else
-+ xen_store_evtchn = xen_start_info->store_evtchn;
-+
-+ return err;
-+}
-+
-+static struct notifier_block xenbus_resume_nb = {
-+ .notifier_call = xenbus_resume_cb,
-+};
-+
- static int __init xenbus_init(void)
- {
- int err = 0;
-@@ -793,6 +818,10 @@ static int __init xenbus_init(void)
- goto out_error;
- }
-
-+ if ((xen_store_domain_type != XS_LOCAL) &&
-+ (xen_store_domain_type != XS_UNKNOWN))
-+ xen_resume_notifier_register(&xenbus_resume_nb);
-+
- #ifdef CONFIG_XEN_COMPAT_XENFS
- /*
- * Create xenfs mountpoint in /proc for compatibility with
-diff --git a/fs/coredump.c b/fs/coredump.c
-index f319926ddf8c..bbbe139ab280 100644
---- a/fs/coredump.c
-+++ b/fs/coredump.c
-@@ -657,7 +657,7 @@ void do_coredump(const siginfo_t *siginfo)
- */
- if (!uid_eq(inode->i_uid, current_fsuid()))
- goto close_fail;
-- if (!cprm.file->f_op->write)
-+ if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
- goto close_fail;
- if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
- goto close_fail;
-diff --git a/fs/namei.c b/fs/namei.c
-index caa38a24e1f7..50a8583e8156 100644
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -3228,7 +3228,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
-
- if (unlikely(file->f_flags & __O_TMPFILE)) {
- error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
-- goto out;
-+ goto out2;
- }
-
- error = path_init(dfd, pathname->name, flags, nd);
-@@ -3258,6 +3258,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
- }
- out:
- path_cleanup(nd);
-+out2:
- if (!(opened & FILE_OPENED)) {
- BUG_ON(!error);
- put_filp(file);
-diff --git a/fs/namespace.c b/fs/namespace.c
-index 4622ee32a5e2..38ed1e1bed41 100644
---- a/fs/namespace.c
-+++ b/fs/namespace.c
-@@ -3178,6 +3178,12 @@ bool fs_fully_visible(struct file_system_type *type)
- if (mnt->mnt.mnt_sb->s_type != type)
- continue;
-
-+ /* This mount is not fully visible if it's root directory
-+ * is not the root directory of the filesystem.
-+ */
-+ if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
-+ continue;
-+
- /* This mount is not fully visible if there are any child mounts
- * that cover anything except for empty directories.
- */
-diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
-index ecdbae19a766..090d8ce25bd1 100644
---- a/fs/nilfs2/btree.c
-+++ b/fs/nilfs2/btree.c
-@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
- nchildren = nilfs_btree_node_get_nchildren(node);
-
- if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
-- level > NILFS_BTREE_LEVEL_MAX ||
-+ level >= NILFS_BTREE_LEVEL_MAX ||
- nchildren < 0 ||
- nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
- pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
-diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
-index a6944b25fd5b..fdf4b41d0609 100644
---- a/fs/ocfs2/dlm/dlmmaster.c
-+++ b/fs/ocfs2/dlm/dlmmaster.c
-@@ -757,6 +757,19 @@ lookup:
- if (tmpres) {
- spin_unlock(&dlm->spinlock);
- spin_lock(&tmpres->spinlock);
-+
-+ /*
-+ * Right after dlm spinlock was released, dlm_thread could have
-+ * purged the lockres. Check if lockres got unhashed. If so
-+ * start over.
-+ */
-+ if (hlist_unhashed(&tmpres->hash_node)) {
-+ spin_unlock(&tmpres->spinlock);
-+ dlm_lockres_put(tmpres);
-+ tmpres = NULL;
-+ goto lookup;
-+ }
-+
- /* Wait on the thread that is mastering the resource */
- if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
- __dlm_wait_on_lockres(tmpres);
-diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
-index d56f5d722138..65aa4fa0ae4e 100644
---- a/include/acpi/acpixf.h
-+++ b/include/acpi/acpixf.h
-@@ -431,13 +431,13 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
- ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
-
- ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
-- acpi_find_root_pointer(acpi_size * rsdp_address))
--
-+ acpi_find_root_pointer(acpi_physical_address *
-+ rsdp_address))
- ACPI_EXTERNAL_RETURN_STATUS(acpi_status
-- acpi_get_table_header(acpi_string signature,
-- u32 instance,
-- struct acpi_table_header
-- *out_table_header))
-+ acpi_get_table_header(acpi_string signature,
-+ u32 instance,
-+ struct acpi_table_header
-+ *out_table_header))
- ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_table(acpi_string signature, u32 instance,
- struct acpi_table_header
-diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
-index ff3fea3194c6..9abb763e4b86 100644
---- a/include/linux/nilfs2_fs.h
-+++ b/include/linux/nilfs2_fs.h
-@@ -460,7 +460,7 @@ struct nilfs_btree_node {
- /* level */
- #define NILFS_BTREE_LEVEL_DATA 0
- #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
--#define NILFS_BTREE_LEVEL_MAX 14
-+#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
-
- /**
- * struct nilfs_palloc_group_desc - block group descriptor
-diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index d487f8dc6d39..72a5224c8084 100644
---- a/mm/memory-failure.c
-+++ b/mm/memory-failure.c
-@@ -1141,10 +1141,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
- * The check (unnecessarily) ignores LRU pages being isolated and
- * walked by the page reclaim code, however that's not a big loss.
- */
-- if (!PageHuge(p) && !PageTransTail(p)) {
-- if (!PageLRU(p))
-- shake_page(p, 0);
-- if (!PageLRU(p)) {
-+ if (!PageHuge(p)) {
-+ if (!PageLRU(hpage))
-+ shake_page(hpage, 0);
-+ if (!PageLRU(hpage)) {
- /*
- * shake_page could have turned it free.
- */
-@@ -1721,12 +1721,12 @@ int soft_offline_page(struct page *page, int flags)
- } else if (ret == 0) { /* for free pages */
- if (PageHuge(page)) {
- set_page_hwpoison_huge_page(hpage);
-- dequeue_hwpoisoned_huge_page(hpage);
-- atomic_long_add(1 << compound_order(hpage),
-+ if (!dequeue_hwpoisoned_huge_page(hpage))
-+ atomic_long_add(1 << compound_order(hpage),
- &num_poisoned_pages);
- } else {
-- SetPageHWPoison(page);
-- atomic_long_inc(&num_poisoned_pages);
-+ if (!TestSetPageHWPoison(page))
-+ atomic_long_inc(&num_poisoned_pages);
- }
- }
- unset_migratetype_isolate(page, MIGRATE_MOVABLE);
-diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index 644bcb665773..ad05f2f7bb65 100644
---- a/mm/page-writeback.c
-+++ b/mm/page-writeback.c
-@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
- long x;
-
- x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
-- limit - setpoint + 1);
-+ (limit - setpoint) | 1);
- pos_ratio = x;
- pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
- pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
-@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
- * scale global setpoint to bdi's:
- * bdi_setpoint = setpoint * bdi_thresh / thresh
- */
-- x = div_u64((u64)bdi_thresh << 16, thresh + 1);
-+ x = div_u64((u64)bdi_thresh << 16, thresh | 1);
- bdi_setpoint = setpoint * (u64)x >> 16;
- /*
- * Use span=(8*write_bw) in single bdi case as indicated by
-@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
-
- if (bdi_dirty < x_intercept - span / 4) {
- pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
-- x_intercept - bdi_setpoint + 1);
-+ (x_intercept - bdi_setpoint) | 1);
- } else
- pos_ratio /= 4;
-
-diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
-index c0eea1dfe90f..f19da4b47c1d 100644
---- a/sound/oss/sequencer.c
-+++ b/sound/oss/sequencer.c
-@@ -681,13 +681,8 @@ static int seq_timing_event(unsigned char *event_rec)
- break;
-
- case TMR_ECHO:
-- if (seq_mode == SEQ_2)
-- seq_copy_to_input(event_rec, 8);
-- else
-- {
-- parm = (parm << 8 | SEQ_ECHO);
-- seq_copy_to_input((unsigned char *) &parm, 4);
-- }
-+ parm = (parm << 8 | SEQ_ECHO);
-+ seq_copy_to_input((unsigned char *) &parm, 4);
- break;
-
- default:;
-@@ -1324,7 +1319,6 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
- int mode = translate_mode(file);
- struct synth_info inf;
- struct seq_event_rec event_rec;
-- unsigned long flags;
- int __user *p = arg;
-
- orig_dev = dev = dev >> 4;
-@@ -1479,9 +1473,7 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
- case SNDCTL_SEQ_OUTOFBAND:
- if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
- return -EFAULT;
-- spin_lock_irqsave(&lock,flags);
- play_event(event_rec.arr);
-- spin_unlock_irqrestore(&lock,flags);
- return 0;
-
- case SNDCTL_MIDI_INFO:
diff --git a/1004_linux-4.0.5.patch b/1004_linux-4.0.5.patch
deleted file mode 100644
index 84509c00..00000000
--- a/1004_linux-4.0.5.patch
+++ /dev/null
@@ -1,4937 +0,0 @@
-diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401
-index 8eb88e974055..711f75e189eb 100644
---- a/Documentation/hwmon/tmp401
-+++ b/Documentation/hwmon/tmp401
-@@ -20,7 +20,7 @@ Supported chips:
- Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
- * Texas Instruments TMP435
- Prefix: 'tmp435'
-- Addresses scanned: I2C 0x37, 0x48 - 0x4f
-+ Addresses scanned: I2C 0x48 - 0x4f
- Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
-
- Authors:
-diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
-index 1e52d67d0abf..dbe6623fed1c 100644
---- a/Documentation/serial/tty.txt
-+++ b/Documentation/serial/tty.txt
-@@ -198,6 +198,9 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
-
- TTY_OTHER_CLOSED Device is a pty and the other side has closed.
-
-+TTY_OTHER_DONE Device is a pty and the other side has closed and
-+ all pending input processing has been completed.
-+
- TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
- smaller chunks.
-
-diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
-index 53838d9c6295..c59bd9bc41ef 100644
---- a/Documentation/virtual/kvm/mmu.txt
-+++ b/Documentation/virtual/kvm/mmu.txt
-@@ -169,6 +169,10 @@ Shadow pages contain the following information:
- Contains the value of cr4.smep && !cr0.wp for which the page is valid
- (pages for which this is true are different from other pages; see the
- treatment of cr0.wp=0 below).
-+ role.smap_andnot_wp:
-+ Contains the value of cr4.smap && !cr0.wp for which the page is valid
-+ (pages for which this is true are different from other pages; see the
-+ treatment of cr0.wp=0 below).
- gfn:
- Either the guest page table containing the translations shadowed by this
- page, or the base page frame for linear translations. See role.direct.
-@@ -344,10 +348,16 @@ on fault type:
-
- (user write faults generate a #PF)
-
--In the first case there is an additional complication if CR4.SMEP is
--enabled: since we've turned the page into a kernel page, the kernel may now
--execute it. We handle this by also setting spte.nx. If we get a user
--fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
-+In the first case there are two additional complications:
-+- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
-+ the kernel may now execute it. We handle this by also setting spte.nx.
-+ If we get a user fetch or read fault, we'll change spte.u=1 and
-+ spte.nx=gpte.nx back.
-+- if CR4.SMAP is disabled: since the page has been changed to a kernel
-+ page, it can not be reused when CR4.SMAP is enabled. We set
-+ CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
-+ here we do not care the case that CR4.SMAP is enabled since KVM will
-+ directly inject #PF to guest due to failed permission check.
-
- To prevent an spte that was converted into a kernel page with cr0.wp=0
- from being written by the kernel after cr0.wp has changed to 1, we make
-diff --git a/Makefile b/Makefile
-index 3d16bcc87585..1880cf77059b 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 0
--SUBLEVEL = 4
-+SUBLEVEL = 5
- EXTRAVERSION =
- NAME = Hurr durr I'ma sheep
-
-diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
-index 067551b6920a..9917a45fc430 100644
---- a/arch/arc/include/asm/atomic.h
-+++ b/arch/arc/include/asm/atomic.h
-@@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
- atomic_ops_unlock(flags); \
- }
-
--#define ATOMIC_OP_RETURN(op, c_op) \
-+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
- static inline int atomic_##op##_return(int i, atomic_t *v) \
- { \
- unsigned long flags; \
-diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
-index a1c776b8dcec..992ea0b063d5 100644
---- a/arch/arm/boot/dts/Makefile
-+++ b/arch/arm/boot/dts/Makefile
-@@ -215,7 +215,7 @@ dtb-$(CONFIG_SOC_IMX25) += \
- imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \
- imx25-karo-tx25.dtb \
- imx25-pdk.dtb
--dtb-$(CONFIG_SOC_IMX31) += \
-+dtb-$(CONFIG_SOC_IMX27) += \
- imx27-apf27.dtb \
- imx27-apf27dev.dtb \
- imx27-eukrea-mbimxsd27-baseboard.dtb \
-diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
-index 173ffa479ad3..792394dd0f2a 100644
---- a/arch/arm/boot/dts/exynos4412-trats2.dts
-+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
-@@ -736,7 +736,7 @@
-
- display-timings {
- timing-0 {
-- clock-frequency = <0>;
-+ clock-frequency = <57153600>;
- hactive = <720>;
- vactive = <1280>;
- hfront-porch = <5>;
-diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
-index 4b063b68db44..9ce1d2128749 100644
---- a/arch/arm/boot/dts/imx27.dtsi
-+++ b/arch/arm/boot/dts/imx27.dtsi
-@@ -531,7 +531,7 @@
-
- fec: ethernet@1002b000 {
- compatible = "fsl,imx27-fec";
-- reg = <0x1002b000 0x4000>;
-+ reg = <0x1002b000 0x1000>;
- interrupts = <50>;
- clocks = <&clks IMX27_CLK_FEC_IPG_GATE>,
- <&clks IMX27_CLK_FEC_AHB_GATE>;
-diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
-index f8ccc21fa032..4e7f40c577e6 100644
---- a/arch/arm/kernel/entry-common.S
-+++ b/arch/arm/kernel/entry-common.S
-@@ -33,7 +33,9 @@ ret_fast_syscall:
- UNWIND(.fnstart )
- UNWIND(.cantunwind )
- disable_irq @ disable interrupts
-- ldr r1, [tsk, #TI_FLAGS]
-+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
-+ tst r1, #_TIF_SYSCALL_WORK
-+ bne __sys_trace_return
- tst r1, #_TIF_WORK_MASK
- bne fast_work_pending
- asm_trace_hardirqs_on
-diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
-index 37266a826437..1f02bcb350e5 100644
---- a/arch/arm/mach-exynos/pm_domains.c
-+++ b/arch/arm/mach-exynos/pm_domains.c
-@@ -169,7 +169,7 @@ no_clk:
- args.np = np;
- args.args_count = 0;
- child_domain = of_genpd_get_from_provider(&args);
-- if (!child_domain)
-+ if (IS_ERR(child_domain))
- continue;
-
- if (of_parse_phandle_with_args(np, "power-domains",
-@@ -177,7 +177,7 @@ no_clk:
- continue;
-
- parent_domain = of_genpd_get_from_provider(&args);
-- if (!parent_domain)
-+ if (IS_ERR(parent_domain))
- continue;
-
- if (pm_genpd_add_subdomain(parent_domain, child_domain))
-diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
-index 31d25834b9c4..cf950790fbdc 100644
---- a/arch/arm/mach-exynos/sleep.S
-+++ b/arch/arm/mach-exynos/sleep.S
-@@ -23,14 +23,7 @@
- #define CPU_MASK 0xff0ffff0
- #define CPU_CORTEX_A9 0x410fc090
-
-- /*
-- * The following code is located into the .data section. This is to
-- * allow l2x0_regs_phys to be accessed with a relative load while we
-- * can't rely on any MMU translation. We could have put l2x0_regs_phys
-- * in the .text section as well, but some setups might insist on it to
-- * be truly read-only. (Reference from: arch/arm/kernel/sleep.S)
-- */
-- .data
-+ .text
- .align
-
- /*
-@@ -69,10 +62,12 @@ ENTRY(exynos_cpu_resume_ns)
- cmp r0, r1
- bne skip_cp15
-
-- adr r0, cp15_save_power
-+ adr r0, _cp15_save_power
- ldr r1, [r0]
-- adr r0, cp15_save_diag
-+ ldr r1, [r0, r1]
-+ adr r0, _cp15_save_diag
- ldr r2, [r0]
-+ ldr r2, [r0, r2]
- mov r0, #SMC_CMD_C15RESUME
- dsb
- smc #0
-@@ -118,14 +113,20 @@ skip_l2x0:
- skip_cp15:
- b cpu_resume
- ENDPROC(exynos_cpu_resume_ns)
-+
-+ .align
-+_cp15_save_power:
-+ .long cp15_save_power - .
-+_cp15_save_diag:
-+ .long cp15_save_diag - .
-+#ifdef CONFIG_CACHE_L2X0
-+1: .long l2x0_saved_regs - .
-+#endif /* CONFIG_CACHE_L2X0 */
-+
-+ .data
- .globl cp15_save_diag
- cp15_save_diag:
- .long 0 @ cp15 diagnostic
- .globl cp15_save_power
- cp15_save_power:
- .long 0 @ cp15 power control
--
--#ifdef CONFIG_CACHE_L2X0
-- .align
--1: .long l2x0_saved_regs - .
--#endif /* CONFIG_CACHE_L2X0 */
-diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
-index 4e6ef896c619..7186382672b5 100644
---- a/arch/arm/mm/mmu.c
-+++ b/arch/arm/mm/mmu.c
-@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
- }
-
- /*
-- * Find the first non-section-aligned page, and point
-+ * Find the first non-pmd-aligned page, and point
- * memblock_limit at it. This relies on rounding the
-- * limit down to be section-aligned, which happens at
-- * the end of this function.
-+ * limit down to be pmd-aligned, which happens at the
-+ * end of this function.
- *
- * With this algorithm, the start or end of almost any
-- * bank can be non-section-aligned. The only exception
-- * is that the start of the bank 0 must be section-
-+ * bank can be non-pmd-aligned. The only exception is
-+ * that the start of the bank 0 must be section-
- * aligned, since otherwise memory would need to be
- * allocated when mapping the start of bank 0, which
- * occurs before any free memory is mapped.
- */
- if (!memblock_limit) {
-- if (!IS_ALIGNED(block_start, SECTION_SIZE))
-+ if (!IS_ALIGNED(block_start, PMD_SIZE))
- memblock_limit = block_start;
-- else if (!IS_ALIGNED(block_end, SECTION_SIZE))
-+ else if (!IS_ALIGNED(block_end, PMD_SIZE))
- memblock_limit = arm_lowmem_limit;
- }
-
-@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
- high_memory = __va(arm_lowmem_limit - 1) + 1;
-
- /*
-- * Round the memblock limit down to a section size. This
-+ * Round the memblock limit down to a pmd size. This
- * helps to ensure that we will allocate memory from the
-- * last full section, which should be mapped.
-+ * last full pmd, which should be mapped.
- */
- if (memblock_limit)
-- memblock_limit = round_down(memblock_limit, SECTION_SIZE);
-+ memblock_limit = round_down(memblock_limit, PMD_SIZE);
- if (!memblock_limit)
- memblock_limit = arm_lowmem_limit;
-
-diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
-index edba042b2325..dc6a4842683a 100644
---- a/arch/arm64/net/bpf_jit_comp.c
-+++ b/arch/arm64/net/bpf_jit_comp.c
-@@ -487,7 +487,7 @@ emit_cond_jmp:
- return -EINVAL;
- }
-
-- imm64 = (u64)insn1.imm << 32 | imm;
-+ imm64 = (u64)insn1.imm << 32 | (u32)imm;
- emit_a64_mov_i64(dst, imm64, ctx);
-
- return 1;
-diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
-index d2c09f6475c5..f20cedcb50f1 100644
---- a/arch/mips/kernel/elf.c
-+++ b/arch/mips/kernel/elf.c
-@@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
-
- /* Lets see if this is an O32 ELF */
- if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
-- /* FR = 1 for N32 */
-- if (ehdr32->e_flags & EF_MIPS_ABI2)
-- state->overall_fp_mode = FP_FR1;
-- else
-- /* Set a good default FPU mode for O32 */
-- state->overall_fp_mode = cpu_has_mips_r6 ?
-- FP_FRE : FP_FR0;
--
- if (ehdr32->e_flags & EF_MIPS_FP64) {
- /*
- * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
-@@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
- (char *)&abiflags,
- sizeof(abiflags));
- } else {
-- /* FR=1 is really the only option for 64-bit */
-- state->overall_fp_mode = FP_FR1;
--
- if (phdr64->p_type != PT_MIPS_ABIFLAGS)
- return 0;
- if (phdr64->p_filesz < sizeof(abiflags))
-@@ -147,6 +136,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
- struct elf32_hdr *ehdr = _ehdr;
- struct mode_req prog_req, interp_req;
- int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
-+ bool is_mips64;
-
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
- return 0;
-@@ -162,10 +152,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
- abi0 = abi1 = fp_abi;
- }
-
-- /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
-- max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
-- (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
-- MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
-+ is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
-+ (ehdr->e_flags & EF_MIPS_ABI2);
-+
-+ if (is_mips64) {
-+ /* MIPS64 code always uses FR=1, thus the default is easy */
-+ state->overall_fp_mode = FP_FR1;
-+
-+ /* Disallow access to the various FPXX & FP64 ABIs */
-+ max_abi = MIPS_ABI_FP_SOFT;
-+ } else {
-+ /* Default to a mode capable of running code expecting FR=0 */
-+ state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
-+
-+ /* Allow all ABIs we know about */
-+ max_abi = MIPS_ABI_FP_64A;
-+ }
-
- if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
- (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
-diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
-index 3391d061eccc..78c9fd32c554 100644
---- a/arch/parisc/include/asm/elf.h
-+++ b/arch/parisc/include/asm/elf.h
-@@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration... */
-
- #define ELF_HWCAP 0
-
-+#define STACK_RND_MASK (is_32bit_task() ? \
-+ 0x7ff >> (PAGE_SHIFT - 12) : \
-+ 0x3ffff >> (PAGE_SHIFT - 12))
-+
- struct mm_struct;
- extern unsigned long arch_randomize_brk(struct mm_struct *);
- #define arch_randomize_brk arch_randomize_brk
-diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index e1ffea2f9a0b..5aba01ac457f 100644
---- a/arch/parisc/kernel/sys_parisc.c
-+++ b/arch/parisc/kernel/sys_parisc.c
-@@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void)
- if (stack_base > STACK_SIZE_MAX)
- stack_base = STACK_SIZE_MAX;
-
-+ /* Add space for stack randomization. */
-+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
-+
- return PAGE_ALIGN(STACK_TOP - stack_base);
- }
-
-diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
-index 15c99b649b04..b2eb4686bd8f 100644
---- a/arch/powerpc/kernel/mce.c
-+++ b/arch/powerpc/kernel/mce.c
-@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
- uint64_t nip, uint64_t addr)
- {
- uint64_t srr1;
-- int index = __this_cpu_inc_return(mce_nest_count);
-+ int index = __this_cpu_inc_return(mce_nest_count) - 1;
- struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
-
- /*
-@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
- if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
- return;
-
-- index = __this_cpu_inc_return(mce_queue_count);
-+ index = __this_cpu_inc_return(mce_queue_count) - 1;
- /* If queue is full, just return for now. */
- if (index >= MAX_MC_EVT) {
- __this_cpu_dec(mce_queue_count);
-diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
-index f096e72262f4..1db685104ffc 100644
---- a/arch/powerpc/kernel/vmlinux.lds.S
-+++ b/arch/powerpc/kernel/vmlinux.lds.S
-@@ -213,6 +213,7 @@ SECTIONS
- *(.opd)
- }
-
-+ . = ALIGN(256);
- .got : AT(ADDR(.got) - LOAD_OFFSET) {
- __toc_start = .;
- #ifndef CONFIG_RELOCATABLE
-diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
-index 7940dc90e80b..b258110da952 100644
---- a/arch/s390/crypto/ghash_s390.c
-+++ b/arch/s390/crypto/ghash_s390.c
-@@ -16,11 +16,12 @@
- #define GHASH_DIGEST_SIZE 16
-
- struct ghash_ctx {
-- u8 icv[16];
-- u8 key[16];
-+ u8 key[GHASH_BLOCK_SIZE];
- };
-
- struct ghash_desc_ctx {
-+ u8 icv[GHASH_BLOCK_SIZE];
-+ u8 key[GHASH_BLOCK_SIZE];
- u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
- };
-@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
- static int ghash_init(struct shash_desc *desc)
- {
- struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
-
- memset(dctx, 0, sizeof(*dctx));
-+ memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
-
- return 0;
- }
-@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
- }
-
- memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
-- memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
-
- return 0;
- }
-@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
- {
- struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
- unsigned int n;
- u8 *buf = dctx->buffer;
- int ret;
-@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
- src += n;
-
- if (!dctx->bytes) {
-- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
-+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
- GHASH_BLOCK_SIZE);
- if (ret != GHASH_BLOCK_SIZE)
- return -EIO;
-@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
-
- n = srclen & ~(GHASH_BLOCK_SIZE - 1);
- if (n) {
-- ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
-+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
- if (ret != n)
- return -EIO;
- src += n;
-@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
- return 0;
- }
-
--static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
-+static int ghash_flush(struct ghash_desc_ctx *dctx)
- {
- u8 *buf = dctx->buffer;
- int ret;
-@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
-
- memset(pos, 0, dctx->bytes);
-
-- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
-+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
- if (ret != GHASH_BLOCK_SIZE)
- return -EIO;
-+
-+ dctx->bytes = 0;
- }
-
-- dctx->bytes = 0;
- return 0;
- }
-
- static int ghash_final(struct shash_desc *desc, u8 *dst)
- {
- struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
- int ret;
-
-- ret = ghash_flush(ctx, dctx);
-+ ret = ghash_flush(dctx);
- if (!ret)
-- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
-+ memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
- return ret;
- }
-
-diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
-index e08ec38f8c6e..e10112da008d 100644
---- a/arch/s390/include/asm/pgtable.h
-+++ b/arch/s390/include/asm/pgtable.h
-@@ -600,7 +600,7 @@ static inline int pmd_large(pmd_t pmd)
- return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
- }
-
--static inline int pmd_pfn(pmd_t pmd)
-+static inline unsigned long pmd_pfn(pmd_t pmd)
- {
- unsigned long origin_mask;
-
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index a236e39cc385..1c0fb570b5c2 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -212,6 +212,7 @@ union kvm_mmu_page_role {
- unsigned nxe:1;
- unsigned cr0_wp:1;
- unsigned smep_andnot_wp:1;
-+ unsigned smap_andnot_wp:1;
- };
- };
-
-@@ -404,6 +405,7 @@ struct kvm_vcpu_arch {
- struct kvm_mmu_memory_cache mmu_page_header_cache;
-
- struct fpu guest_fpu;
-+ bool eager_fpu;
- u64 xcr0;
- u64 guest_supported_xcr0;
- u32 guest_xstate_size;
-@@ -735,6 +737,7 @@ struct kvm_x86_ops {
- void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
- unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
- void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
-+ void (*fpu_activate)(struct kvm_vcpu *vcpu);
- void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
-
- void (*tlb_flush)(struct kvm_vcpu *vcpu);
-diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index 3c036cb4a370..11dd8f23fcea 100644
---- a/arch/x86/kernel/cpu/mcheck/mce.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce.c
-@@ -705,6 +705,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
- struct pt_regs *regs)
- {
- int i, ret = 0;
-+ char *tmp;
-
- for (i = 0; i < mca_cfg.banks; i++) {
- m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
-@@ -713,9 +714,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
- if (quirk_no_way_out)
- quirk_no_way_out(i, m, regs);
- }
-- if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
-- MCE_PANIC_SEVERITY)
-+
-+ if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
-+ *msg = tmp;
- ret = 1;
-+ }
- }
- return ret;
- }
-diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
-index c4bb8b8e5017..76d8cbe5a10f 100644
---- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
-+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
-@@ -680,6 +680,7 @@ static int __init rapl_pmu_init(void)
- break;
- case 60: /* Haswell */
- case 69: /* Haswell-Celeron */
-+ case 61: /* Broadwell */
- rapl_cntr_mask = RAPL_IDX_HSW;
- rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
- break;
-diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
-index d5651fce0b71..f341d56b7883 100644
---- a/arch/x86/kernel/i387.c
-+++ b/arch/x86/kernel/i387.c
-@@ -169,6 +169,21 @@ static void init_thread_xstate(void)
- xstate_size = sizeof(struct i387_fxsave_struct);
- else
- xstate_size = sizeof(struct i387_fsave_struct);
-+
-+ /*
-+ * Quirk: we don't yet handle the XSAVES* instructions
-+ * correctly, as we don't correctly convert between
-+ * standard and compacted format when interfacing
-+ * with user-space - so disable it for now.
-+ *
-+ * The difference is small: with recent CPUs the
-+ * compacted format is only marginally smaller than
-+ * the standard FPU state format.
-+ *
-+ * ( This is easy to backport while we are fixing
-+ * XSAVES* support. )
-+ */
-+ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
- }
-
- /*
-diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
-index 8a80737ee6e6..307f9ec28e08 100644
---- a/arch/x86/kvm/cpuid.c
-+++ b/arch/x86/kvm/cpuid.c
-@@ -16,6 +16,8 @@
- #include <linux/module.h>
- #include <linux/vmalloc.h>
- #include <linux/uaccess.h>
-+#include <asm/i387.h> /* For use_eager_fpu. Ugh! */
-+#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */
- #include <asm/user.h>
- #include <asm/xsave.h>
- #include "cpuid.h"
-@@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
- if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
- best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
-
-+ vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
-+
- /*
- * The existing code assumes virtual address is 48-bit in the canonical
- * address checks; exit if it is ever changed.
-diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
-index 4452eedfaedd..9bec2b8cdced 100644
---- a/arch/x86/kvm/cpuid.h
-+++ b/arch/x86/kvm/cpuid.h
-@@ -111,4 +111,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_RTM));
- }
-+
-+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
-+{
-+ struct kvm_cpuid_entry2 *best;
-+
-+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
-+ return best && (best->ebx & bit(X86_FEATURE_MPX));
-+}
- #endif
-diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
-index cee759299a35..88ee9282a57e 100644
---- a/arch/x86/kvm/mmu.c
-+++ b/arch/x86/kvm/mmu.c
-@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
- }
- }
-
--void update_permission_bitmask(struct kvm_vcpu *vcpu,
-- struct kvm_mmu *mmu, bool ept)
-+static void update_permission_bitmask(struct kvm_vcpu *vcpu,
-+ struct kvm_mmu *mmu, bool ept)
- {
- unsigned bit, byte, pfec;
- u8 map;
-@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
- void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
- {
- bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
-+ bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
- struct kvm_mmu *context = &vcpu->arch.mmu;
-
- MMU_WARN_ON(VALID_PAGE(context->root_hpa));
-@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
- context->base_role.cr0_wp = is_write_protection(vcpu);
- context->base_role.smep_andnot_wp
- = smep && !is_write_protection(vcpu);
-+ context->base_role.smap_andnot_wp
-+ = smap && !is_write_protection(vcpu);
- }
- EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
-
-@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes)
- {
- gfn_t gfn = gpa >> PAGE_SHIFT;
-- union kvm_mmu_page_role mask = { .word = 0 };
- struct kvm_mmu_page *sp;
- LIST_HEAD(invalid_list);
- u64 entry, gentry, *spte;
- int npte;
- bool remote_flush, local_flush, zap_page;
-+ union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
-+ .cr0_wp = 1,
-+ .cr4_pae = 1,
-+ .nxe = 1,
-+ .smep_andnot_wp = 1,
-+ .smap_andnot_wp = 1,
-+ };
-
- /*
- * If we don't have indirect shadow pages, it means no page is
-@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- ++vcpu->kvm->stat.mmu_pte_write;
- kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
-
-- mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
- if (detect_write_misaligned(sp, gpa, bytes) ||
- detect_write_flooding(sp)) {
-diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
-index c7d65637c851..0ada65ecddcf 100644
---- a/arch/x86/kvm/mmu.h
-+++ b/arch/x86/kvm/mmu.h
-@@ -71,8 +71,6 @@ enum {
- int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
- void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
- void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
--void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-- bool ept);
-
- static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
- {
-@@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- int index = (pfec >> 1) +
- (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
-
-+ WARN_ON(pfec & PFERR_RSVD_MASK);
-+
- return (mmu->permissions[index] >> pte_access) & 1;
- }
-
-diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
-index fd49c867b25a..6e6d115fe9b5 100644
---- a/arch/x86/kvm/paging_tmpl.h
-+++ b/arch/x86/kvm/paging_tmpl.h
-@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
- mmu_is_nested(vcpu));
- if (likely(r != RET_MMIO_PF_INVALID))
- return r;
-+
-+ /*
-+ * page fault with PFEC.RSVD = 1 is caused by shadow
-+ * page fault, should not be used to walk guest page
-+ * table.
-+ */
-+ error_code &= ~PFERR_RSVD_MASK;
- };
-
- r = mmu_topup_memory_caches(vcpu);
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index cc618c882f90..a4e62fcfabcb 100644
---- a/arch/x86/kvm/svm.c
-+++ b/arch/x86/kvm/svm.c
-@@ -4374,6 +4374,7 @@ static struct kvm_x86_ops svm_x86_ops = {
- .cache_reg = svm_cache_reg,
- .get_rflags = svm_get_rflags,
- .set_rflags = svm_set_rflags,
-+ .fpu_activate = svm_fpu_activate,
- .fpu_deactivate = svm_fpu_deactivate,
-
- .tlb_flush = svm_flush_tlb,
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index a60bd3aa0965..5318d64674b0 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -10179,6 +10179,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
- .cache_reg = vmx_cache_reg,
- .get_rflags = vmx_get_rflags,
- .set_rflags = vmx_set_rflags,
-+ .fpu_activate = vmx_fpu_activate,
- .fpu_deactivate = vmx_fpu_deactivate,
-
- .tlb_flush = vmx_flush_tlb,
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index e222ba5d2beb..8838057da9c3 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
- int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
- {
- unsigned long old_cr4 = kvm_read_cr4(vcpu);
-- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
-- X86_CR4_PAE | X86_CR4_SMEP;
-+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
-+ X86_CR4_SMEP | X86_CR4_SMAP;
-+
- if (cr4 & CR4_RESERVED_BITS)
- return 1;
-
-@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
- (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
- kvm_mmu_reset_context(vcpu);
-
-- if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
-- update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
--
- if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
- kvm_update_cpuid(vcpu);
-
-@@ -6141,6 +6139,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
- return;
-
- page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
-+ if (is_error_page(page))
-+ return;
- kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
-
- /*
-@@ -6996,7 +6996,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
- fpu_save_init(&vcpu->arch.guest_fpu);
- __kernel_fpu_end();
- ++vcpu->stat.fpu_reload;
-- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
-+ if (!vcpu->arch.eager_fpu)
-+ kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
-+
- trace_kvm_fpu(0);
- }
-
-@@ -7012,11 +7014,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
- struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
- unsigned int id)
- {
-+ struct kvm_vcpu *vcpu;
-+
- if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
- printk_once(KERN_WARNING
- "kvm: SMP vm created on host with unstable TSC; "
- "guest TSC will not be reliable\n");
-- return kvm_x86_ops->vcpu_create(kvm, id);
-+
-+ vcpu = kvm_x86_ops->vcpu_create(kvm, id);
-+
-+ /*
-+ * Activate fpu unconditionally in case the guest needs eager FPU. It will be
-+ * deactivated soon if it doesn't.
-+ */
-+ kvm_x86_ops->fpu_activate(vcpu);
-+ return vcpu;
- }
-
- int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
-index f9eeae871593..5aa1f6e281d2 100644
---- a/drivers/acpi/osl.c
-+++ b/drivers/acpi/osl.c
-@@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
- request_mem_region(addr, length, desc);
- }
-
--static int __init acpi_reserve_resources(void)
-+static void __init acpi_reserve_resources(void)
- {
- acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
- "ACPI PM1a_EVT_BLK");
-@@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void)
- if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
- acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
- acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
--
-- return 0;
- }
--device_initcall(acpi_reserve_resources);
-
- void acpi_os_printf(const char *fmt, ...)
- {
-@@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void)
-
- acpi_status __init acpi_os_initialize1(void)
- {
-+ acpi_reserve_resources();
- kacpid_wq = alloc_workqueue("kacpid", 0, 1);
- kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
- kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
-diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
-index 33bb06e006c9..adce56fa9cef 100644
---- a/drivers/ata/ahci.c
-+++ b/drivers/ata/ahci.c
-@@ -66,6 +66,7 @@ enum board_ids {
- board_ahci_yes_fbs,
-
- /* board IDs for specific chipsets in alphabetical order */
-+ board_ahci_avn,
- board_ahci_mcp65,
- board_ahci_mcp77,
- board_ahci_mcp89,
-@@ -84,6 +85,8 @@ enum board_ids {
- static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
- static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
-+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
-+ unsigned long deadline);
- static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
- static bool is_mcp89_apple(struct pci_dev *pdev);
- static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
-@@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
- .hardreset = ahci_p5wdh_hardreset,
- };
-
-+static struct ata_port_operations ahci_avn_ops = {
-+ .inherits = &ahci_ops,
-+ .hardreset = ahci_avn_hardreset,
-+};
-+
- static const struct ata_port_info ahci_port_info[] = {
- /* by features */
- [board_ahci] = {
-@@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = {
- .port_ops = &ahci_ops,
- },
- /* by chipsets */
-+ [board_ahci_avn] = {
-+ .flags = AHCI_FLAG_COMMON,
-+ .pio_mask = ATA_PIO4,
-+ .udma_mask = ATA_UDMA6,
-+ .port_ops = &ahci_avn_ops,
-+ },
- [board_ahci_mcp65] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
- AHCI_HFLAG_YES_NCQ),
-@@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
-- { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
-- { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
-- { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
-- { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
-- { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
-- { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
-- { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
-- { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
-+ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
-+ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
-+ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
-+ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
-+ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
-+ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
-+ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
-+ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
-@@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
- return rc;
- }
-
-+/*
-+ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
-+ *
-+ * It has been observed with some SSDs that the timing of events in the
-+ * link synchronization phase can leave the port in a state that can not
-+ * be recovered by a SATA-hard-reset alone. The failing signature is
-+ * SStatus.DET stuck at 1 ("Device presence detected but Phy
-+ * communication not established"). It was found that unloading and
-+ * reloading the driver when this problem occurs allows the drive
-+ * connection to be recovered (DET advanced to 0x3). The critical
-+ * component of reloading the driver is that the port state machines are
-+ * reset by bouncing "port enable" in the AHCI PCS configuration
-+ * register. So, reproduce that effect by bouncing a port whenever we
-+ * see DET==1 after a reset.
-+ */
-+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
-+ unsigned long deadline)
-+{
-+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
-+ struct ata_port *ap = link->ap;
-+ struct ahci_port_priv *pp = ap->private_data;
-+ struct ahci_host_priv *hpriv = ap->host->private_data;
-+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
-+ unsigned long tmo = deadline - jiffies;
-+ struct ata_taskfile tf;
-+ bool online;
-+ int rc, i;
-+
-+ DPRINTK("ENTER\n");
-+
-+ ahci_stop_engine(ap);
-+
-+ for (i = 0; i < 2; i++) {
-+ u16 val;
-+ u32 sstatus;
-+ int port = ap->port_no;
-+ struct ata_host *host = ap->host;
-+ struct pci_dev *pdev = to_pci_dev(host->dev);
-+
-+ /* clear D2H reception area to properly wait for D2H FIS */
-+ ata_tf_init(link->device, &tf);
-+ tf.command = ATA_BUSY;
-+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
-+
-+ rc = sata_link_hardreset(link, timing, deadline, &online,
-+ ahci_check_ready);
-+
-+ if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
-+ (sstatus & 0xf) != 1)
-+ break;
-+
-+ ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
-+ port);
-+
-+ pci_read_config_word(pdev, 0x92, &val);
-+ val &= ~(1 << port);
-+ pci_write_config_word(pdev, 0x92, val);
-+ ata_msleep(ap, 1000);
-+ val |= 1 << port;
-+ pci_write_config_word(pdev, 0x92, val);
-+ deadline += tmo;
-+ }
-+
-+ hpriv->start_engine(ap);
-+
-+ if (online)
-+ *class = ahci_dev_classify(ap);
-+
-+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
-+ return rc;
-+}
-+
-+
- #ifdef CONFIG_PM
- static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
- {
-diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
-index 61a9c07e0dff..287c4ba0219f 100644
---- a/drivers/ata/libahci.c
-+++ b/drivers/ata/libahci.c
-@@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
- if (unlikely(resetting))
- status &= ~PORT_IRQ_BAD_PMP;
-
-- /* if LPM is enabled, PHYRDY doesn't mean anything */
-- if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
-+ if (sata_lpm_ignore_phy_events(&ap->link)) {
- status &= ~PORT_IRQ_PHYRDY;
- ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
- }
-diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 23dac3babfe3..87b4b7f9fdc6 100644
---- a/drivers/ata/libata-core.c
-+++ b/drivers/ata/libata-core.c
-@@ -4214,7 +4214,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
-- { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
-+ { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
-
- /*
-@@ -6728,6 +6728,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
- return tmp;
- }
-
-+/**
-+ * sata_lpm_ignore_phy_events - test if PHY event should be ignored
-+ * @link: Link receiving the event
-+ *
-+ * Test whether the received PHY event has to be ignored or not.
-+ *
-+ * LOCKING:
-+ * None:
-+ *
-+ * RETURNS:
-+ * True if the event has to be ignored.
-+ */
-+bool sata_lpm_ignore_phy_events(struct ata_link *link)
-+{
-+ unsigned long lpm_timeout = link->last_lpm_change +
-+ msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
-+
-+ /* if LPM is enabled, PHYRDY doesn't mean anything */
-+ if (link->lpm_policy > ATA_LPM_MAX_POWER)
-+ return true;
-+
-+ /* ignore the first PHY event after the LPM policy changed
-+ * as it is might be spurious
-+ */
-+ if ((link->flags & ATA_LFLAG_CHANGED) &&
-+ time_before(jiffies, lpm_timeout))
-+ return true;
-+
-+ return false;
-+}
-+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
-+
- /*
- * Dummy port_ops
- */
-diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
-index d2029a462e2c..89c3d83e1ca7 100644
---- a/drivers/ata/libata-eh.c
-+++ b/drivers/ata/libata-eh.c
-@@ -3489,6 +3489,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- }
- }
-
-+ link->last_lpm_change = jiffies;
-+ link->flags |= ATA_LFLAG_CHANGED;
-+
- return 0;
-
- fail:
-diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
-index 237f23f68bfc..1daa0ea2f1ac 100644
---- a/drivers/clk/clk.c
-+++ b/drivers/clk/clk.c
-@@ -1443,8 +1443,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
- */
- if (clk->prepare_count) {
- clk_core_prepare(parent);
-+ flags = clk_enable_lock();
- clk_core_enable(parent);
- clk_core_enable(clk);
-+ clk_enable_unlock(flags);
- }
-
- /* update the clk tree topology */
-@@ -1459,13 +1461,17 @@ static void __clk_set_parent_after(struct clk_core *core,
- struct clk_core *parent,
- struct clk_core *old_parent)
- {
-+ unsigned long flags;
-+
- /*
- * Finish the migration of prepare state and undo the changes done
- * for preventing a race with clk_enable().
- */
- if (core->prepare_count) {
-+ flags = clk_enable_lock();
- clk_core_disable(core);
- clk_core_disable(old_parent);
-+ clk_enable_unlock(flags);
- clk_core_unprepare(old_parent);
- }
- }
-@@ -1489,8 +1495,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
- clk_enable_unlock(flags);
-
- if (clk->prepare_count) {
-+ flags = clk_enable_lock();
- clk_core_disable(clk);
- clk_core_disable(parent);
-+ clk_enable_unlock(flags);
- clk_core_unprepare(parent);
- }
- return ret;
-diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
-index 07d666cc6a29..bea4a173eef5 100644
---- a/drivers/clk/samsung/clk-exynos5420.c
-+++ b/drivers/clk/samsung/clk-exynos5420.c
-@@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
- { .offset = SRC_MASK_PERIC0, .value = 0x11111110, },
- { .offset = SRC_MASK_PERIC1, .value = 0x11111100, },
- { .offset = SRC_MASK_ISP, .value = 0x11111000, },
-+ { .offset = GATE_BUS_TOP, .value = 0xffffffff, },
- { .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
- { .offset = GATE_IP_PERIC, .value = 0xffffffff, },
- };
-diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
-index 2eebd28b4c40..ccc20188f00c 100644
---- a/drivers/firmware/dmi_scan.c
-+++ b/drivers/firmware/dmi_scan.c
-@@ -499,18 +499,19 @@ static int __init dmi_present(const u8 *buf)
- buf += 16;
-
- if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
-+ if (smbios_ver)
-+ dmi_ver = smbios_ver;
-+ else
-+ dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
- dmi_num = get_unaligned_le16(buf + 12);
- dmi_len = get_unaligned_le16(buf + 6);
- dmi_base = get_unaligned_le32(buf + 8);
-
- if (dmi_walk_early(dmi_decode) == 0) {
- if (smbios_ver) {
-- dmi_ver = smbios_ver;
- pr_info("SMBIOS %d.%d present.\n",
- dmi_ver >> 8, dmi_ver & 0xFF);
- } else {
-- dmi_ver = (buf[14] & 0xF0) << 4 |
-- (buf[14] & 0x0F);
- pr_info("Legacy DMI %d.%d present.\n",
- dmi_ver >> 8, dmi_ver & 0xFF);
- }
-diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
-index 443518f63f15..a6b0def4bd7b 100644
---- a/drivers/gpio/gpio-kempld.c
-+++ b/drivers/gpio/gpio-kempld.c
-@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
- = container_of(chip, struct kempld_gpio_data, chip);
- struct kempld_device_data *pld = gpio->pld;
-
-- return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
-+ return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
- }
-
- static int kempld_gpio_pincount(struct kempld_device_data *pld)
-diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
-index 498399323a8c..406624a0b201 100644
---- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
-+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
-@@ -729,7 +729,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
- kfd2kgd->get_max_engine_clock_in_mhz(
- dev->gpu->kgd));
- sysfs_show_64bit_prop(buffer, "local_mem_size",
-- kfd2kgd->get_vmem_size(dev->gpu->kgd));
-+ (unsigned long long int) 0);
-
- sysfs_show_32bit_prop(buffer, "fw_version",
- kfd2kgd->get_fw_version(
-diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
-index 5ba5792bfdba..98b125763ecd 100644
---- a/drivers/gpu/drm/drm_plane_helper.c
-+++ b/drivers/gpu/drm/drm_plane_helper.c
-@@ -476,6 +476,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
- if (!crtc[i])
- continue;
-
-+ if (crtc[i]->cursor == plane)
-+ continue;
-+
- /* There's no other way to figure out whether the crtc is running. */
- ret = drm_crtc_vblank_get(crtc[i]);
- if (ret == 0) {
-diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
-index 1afc0b419da2..965a45619f6b 100644
---- a/drivers/gpu/drm/radeon/atombios_crtc.c
-+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
-@@ -1789,7 +1789,9 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
- if ((crtc->mode.clock == test_crtc->mode.clock) &&
- (adjusted_clock == test_adjusted_clock) &&
- (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
-- (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
-+ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
-+ (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
-+ drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
- return test_radeon_crtc->pll_id;
- }
- }
-diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
-index 8d74de82456e..8b2c4c890507 100644
---- a/drivers/gpu/drm/radeon/atombios_dp.c
-+++ b/drivers/gpu/drm/radeon/atombios_dp.c
-@@ -412,19 +412,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
- {
- struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- u8 msg[DP_DPCD_SIZE];
-- int ret;
-+ int ret, i;
-
-- ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-- DP_DPCD_SIZE);
-- if (ret > 0) {
-- memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
-+ for (i = 0; i < 7; i++) {
-+ ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-+ DP_DPCD_SIZE);
-+ if (ret == DP_DPCD_SIZE) {
-+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
-
-- DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-- dig_connector->dpcd);
-+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-+ dig_connector->dpcd);
-
-- radeon_dp_probe_oui(radeon_connector);
-+ radeon_dp_probe_oui(radeon_connector);
-
-- return true;
-+ return true;
-+ }
- }
- dig_connector->dpcd[0] = 0;
- return false;
-diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
-index 3e670d344a20..19aafb71fd8e 100644
---- a/drivers/gpu/drm/radeon/cik.c
-+++ b/drivers/gpu/drm/radeon/cik.c
-@@ -5804,7 +5804,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
- /* restore context1-15 */
- /* set vm size, must be a multiple of 4 */
- WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
-+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
- for (i = 1; i < 16; i++) {
- if (i < 8)
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
-diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
-index 0926739c9fa7..9953356fe263 100644
---- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
-+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
-@@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
- if (enable) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
-- if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-+ if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
-@@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
- if (!dig || !dig->afmt)
- return;
-
-- if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-+ if (enable && connector &&
-+ drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector;
-diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
-index dab00812abaa..02d585455f49 100644
---- a/drivers/gpu/drm/radeon/ni.c
-+++ b/drivers/gpu/drm/radeon/ni.c
-@@ -1272,7 +1272,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
- */
- for (i = 1; i < 8; i++) {
- WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
-- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
-+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
-+ rdev->vm_manager.max_pfn - 1);
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
- rdev->vm_manager.saved_table_addr[i]);
- }
-diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
-index b7c6bb69f3c7..88c04bc0a7f6 100644
---- a/drivers/gpu/drm/radeon/radeon_audio.c
-+++ b/drivers/gpu/drm/radeon/radeon_audio.c
-@@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector,
- if (!connector || !connector->encoder)
- return;
-
-- if (!radeon_encoder_is_digital(connector->encoder))
-- return;
--
- rdev = connector->encoder->dev->dev_private;
-
- if (!radeon_audio_chipset_supported(rdev))
-@@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector,
- radeon_encoder = to_radeon_encoder(connector->encoder);
- dig = radeon_encoder->enc_priv;
-
-- if (!dig->afmt)
-- return;
--
- if (status == connector_status_connected) {
-- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-+ struct radeon_connector *radeon_connector;
-+ int sink_type;
-+
-+ if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-+ radeon_encoder->audio = NULL;
-+ return;
-+ }
-+
-+ radeon_connector = to_radeon_connector(connector);
-+ sink_type = radeon_dp_getsinktype(radeon_connector);
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
-- radeon_dp_getsinktype(radeon_connector) ==
-- CONNECTOR_OBJECT_ID_DISPLAYPORT)
-+ sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
- radeon_encoder->audio = rdev->audio.dp_funcs;
- else
- radeon_encoder->audio = rdev->audio.hdmi_funcs;
-
- dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-- if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
-- } else {
-- radeon_audio_enable(rdev, dig->afmt->pin, 0);
-- dig->afmt->pin = NULL;
-- }
-+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
- } else {
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
- dig->afmt->pin = NULL;
-diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
-index 27973e3faf0e..27def67cb6be 100644
---- a/drivers/gpu/drm/radeon/radeon_connectors.c
-+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
-@@ -1333,10 +1333,8 @@ out:
- /* updated in get modes as well since we need to know if it's analog or digital */
- radeon_connector_update_scratch_regs(connector, ret);
-
-- if (radeon_audio != 0) {
-- radeon_connector_get_edid(connector);
-+ if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
-- }
-
- exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
-@@ -1661,10 +1659,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
-
- radeon_connector_update_scratch_regs(connector, ret);
-
-- if (radeon_audio != 0) {
-- radeon_connector_get_edid(connector);
-+ if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
-- }
-
- out:
- pm_runtime_mark_last_busy(connector->dev->dev);
-diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
-index a7fb2735d4a9..f433491fab6f 100644
---- a/drivers/gpu/drm/radeon/si.c
-+++ b/drivers/gpu/drm/radeon/si.c
-@@ -4288,7 +4288,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
- /* empty context1-15 */
- /* set vm size, must be a multiple of 4 */
- WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
-+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
- /* Assign the pt base to something valid for now; the pts used for
- * the VMs are determined by the application and setup and assigned
- * on the fly in the vm part of radeon_gart.c
-diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
-index e77658cd037c..2caf5b2f3446 100644
---- a/drivers/hid/hid-logitech-hidpp.c
-+++ b/drivers/hid/hid-logitech-hidpp.c
-@@ -39,7 +39,6 @@ MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
- /* bits 1..20 are reserved for classes */
- #define HIDPP_QUIRK_DELAYED_INIT BIT(21)
- #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
--#define HIDPP_QUIRK_MULTI_INPUT BIT(23)
-
- /*
- * There are two hidpp protocols in use, the first version hidpp10 is known
-@@ -701,12 +700,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
- {
-- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
--
-- if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
-- (field->application == HID_GD_KEYBOARD))
-- return 0;
--
- return -1;
- }
-
-@@ -715,10 +708,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
- {
- struct wtp_data *wd = hidpp->private_data;
-
-- if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
-- /* this is the generic hid-input call */
-- return;
--
- __set_bit(EV_ABS, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __clear_bit(EV_REL, input_dev->evbit);
-@@ -1234,10 +1223,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
- if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
- connect_mask &= ~HID_CONNECT_HIDINPUT;
-
-- /* Re-enable hidinput for multi-input devices */
-- if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
-- connect_mask |= HID_CONNECT_HIDINPUT;
--
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
-@@ -1285,11 +1270,6 @@ static const struct hid_device_id hidpp_devices[] = {
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_T651),
- .driver_data = HIDPP_QUIRK_CLASS_WTP },
-- { /* Keyboard TK820 */
-- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-- USB_VENDOR_ID_LOGITECH, 0x4102),
-- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
-- HIDPP_QUIRK_CLASS_WTP },
-
- { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
-diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
-index f3830db02d46..37f01702d081 100644
---- a/drivers/hwmon/nct6683.c
-+++ b/drivers/hwmon/nct6683.c
-@@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
- (*t)->dev_attr.attr.name, tg->base + i);
- if ((*t)->s2) {
- a2 = &su->u.a2;
-+ sysfs_attr_init(&a2->dev_attr.attr);
- a2->dev_attr.attr.name = su->name;
- a2->nr = (*t)->u.s.nr + i;
- a2->index = (*t)->u.s.index;
-@@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
- *attrs = &a2->dev_attr.attr;
- } else {
- a = &su->u.a1;
-+ sysfs_attr_init(&a->dev_attr.attr);
- a->dev_attr.attr.name = su->name;
- a->index = (*t)->u.index + i;
- a->dev_attr.attr.mode =
-diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
-index 1be41177b620..0773930c110e 100644
---- a/drivers/hwmon/nct6775.c
-+++ b/drivers/hwmon/nct6775.c
-@@ -994,6 +994,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
- (*t)->dev_attr.attr.name, tg->base + i);
- if ((*t)->s2) {
- a2 = &su->u.a2;
-+ sysfs_attr_init(&a2->dev_attr.attr);
- a2->dev_attr.attr.name = su->name;
- a2->nr = (*t)->u.s.nr + i;
- a2->index = (*t)->u.s.index;
-@@ -1004,6 +1005,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
- *attrs = &a2->dev_attr.attr;
- } else {
- a = &su->u.a1;
-+ sysfs_attr_init(&a->dev_attr.attr);
- a->dev_attr.attr.name = su->name;
- a->index = (*t)->u.index + i;
- a->dev_attr.attr.mode =
-diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
-index 112e4d45e4a0..68800115876b 100644
---- a/drivers/hwmon/ntc_thermistor.c
-+++ b/drivers/hwmon/ntc_thermistor.c
-@@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data *
- ntc_thermistor_parse_dt(struct platform_device *pdev)
- {
- struct iio_channel *chan;
-+ enum iio_chan_type type;
- struct device_node *np = pdev->dev.of_node;
- struct ntc_thermistor_platform_data *pdata;
-+ int ret;
-
- if (!np)
- return NULL;
-@@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
- if (IS_ERR(chan))
- return ERR_CAST(chan);
-
-+ ret = iio_get_channel_type(chan, &type);
-+ if (ret < 0)
-+ return ERR_PTR(ret);
-+
-+ if (type != IIO_VOLTAGE)
-+ return ERR_PTR(-EINVAL);
-+
- if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
- return ERR_PTR(-ENODEV);
- if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
-diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
-index 99664ebc738d..ccf4cffe0ee1 100644
---- a/drivers/hwmon/tmp401.c
-+++ b/drivers/hwmon/tmp401.c
-@@ -44,7 +44,7 @@
- #include <linux/sysfs.h>
-
- /* Addresses to scan */
--static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d,
-+static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
- 0x4e, 0x4f, I2C_CLIENT_END };
-
- enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
-diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
-index 53f32629283a..6805db0e4f07 100644
---- a/drivers/iio/accel/st_accel_core.c
-+++ b/drivers/iio/accel/st_accel_core.c
-@@ -465,6 +465,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
-
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->info = &accel_info;
-+ mutex_init(&adata->tb.buf_lock);
-
- st_sensors_power_enable(indio_dev);
-
-diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
-index 08bcfb061ca5..56008a86b78f 100644
---- a/drivers/iio/adc/axp288_adc.c
-+++ b/drivers/iio/adc/axp288_adc.c
-@@ -53,39 +53,42 @@ static const struct iio_chan_spec const axp288_adc_channels[] = {
- .channel = 0,
- .address = AXP288_TS_ADC_H,
- .datasheet_name = "TS_PIN",
-+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .indexed = 1,
- .type = IIO_TEMP,
- .channel = 1,
- .address = AXP288_PMIC_ADC_H,
- .datasheet_name = "PMIC_TEMP",
-+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .indexed = 1,
- .type = IIO_TEMP,
- .channel = 2,
- .address = AXP288_GP_ADC_H,
- .datasheet_name = "GPADC",
-+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .indexed = 1,
- .type = IIO_CURRENT,
- .channel = 3,
- .address = AXP20X_BATT_CHRG_I_H,
- .datasheet_name = "BATT_CHG_I",
-- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
-+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .indexed = 1,
- .type = IIO_CURRENT,
- .channel = 4,
- .address = AXP20X_BATT_DISCHRG_I_H,
- .datasheet_name = "BATT_DISCHRG_I",
-- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
-+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .indexed = 1,
- .type = IIO_VOLTAGE,
- .channel = 5,
- .address = AXP20X_BATT_V_H,
- .datasheet_name = "BATT_V",
-- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
-+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- },
- };
-
-@@ -151,9 +154,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
- chan->address))
- dev_err(&indio_dev->dev, "TS pin restore\n");
- break;
-- case IIO_CHAN_INFO_PROCESSED:
-- ret = axp288_adc_read_channel(val, chan->address, info->regmap);
-- break;
- default:
- ret = -EINVAL;
- }
-diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
-index 51e2a83c9404..115f6e99a7fa 100644
---- a/drivers/iio/adc/cc10001_adc.c
-+++ b/drivers/iio/adc/cc10001_adc.c
-@@ -35,8 +35,9 @@
- #define CC10001_ADC_EOC_SET BIT(0)
-
- #define CC10001_ADC_CHSEL_SAMPLED 0x0c
--#define CC10001_ADC_POWER_UP 0x10
--#define CC10001_ADC_POWER_UP_SET BIT(0)
-+#define CC10001_ADC_POWER_DOWN 0x10
-+#define CC10001_ADC_POWER_DOWN_SET BIT(0)
-+
- #define CC10001_ADC_DEBUG 0x14
- #define CC10001_ADC_DATA_COUNT 0x20
-
-@@ -62,7 +63,6 @@ struct cc10001_adc_device {
- u16 *buf;
-
- struct mutex lock;
-- unsigned long channel_map;
- unsigned int start_delay_ns;
- unsigned int eoc_delay_ns;
- };
-@@ -79,6 +79,18 @@ static inline u32 cc10001_adc_read_reg(struct cc10001_adc_device *adc_dev,
- return readl(adc_dev->reg_base + reg);
- }
-
-+static void cc10001_adc_power_up(struct cc10001_adc_device *adc_dev)
-+{
-+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, 0);
-+ ndelay(adc_dev->start_delay_ns);
-+}
-+
-+static void cc10001_adc_power_down(struct cc10001_adc_device *adc_dev)
-+{
-+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN,
-+ CC10001_ADC_POWER_DOWN_SET);
-+}
-+
- static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
- unsigned int channel)
- {
-@@ -88,6 +100,7 @@ static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
- val = (channel & CC10001_ADC_CH_MASK) | CC10001_ADC_MODE_SINGLE_CONV;
- cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
-
-+ udelay(1);
- val = cc10001_adc_read_reg(adc_dev, CC10001_ADC_CONFIG);
- val = val | CC10001_ADC_START_CONV;
- cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
-@@ -129,6 +142,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
- struct iio_dev *indio_dev;
- unsigned int delay_ns;
- unsigned int channel;
-+ unsigned int scan_idx;
- bool sample_invalid;
- u16 *data;
- int i;
-@@ -139,20 +153,17 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
-
- mutex_lock(&adc_dev->lock);
-
-- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
-- CC10001_ADC_POWER_UP_SET);
--
-- /* Wait for 8 (6+2) clock cycles before activating START */
-- ndelay(adc_dev->start_delay_ns);
-+ cc10001_adc_power_up(adc_dev);
-
- /* Calculate delay step for eoc and sampled data */
- delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
-
- i = 0;
- sample_invalid = false;
-- for_each_set_bit(channel, indio_dev->active_scan_mask,
-+ for_each_set_bit(scan_idx, indio_dev->active_scan_mask,
- indio_dev->masklength) {
-
-+ channel = indio_dev->channels[scan_idx].channel;
- cc10001_adc_start(adc_dev, channel);
-
- data[i] = cc10001_adc_poll_done(indio_dev, channel, delay_ns);
-@@ -166,7 +177,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
- }
-
- done:
-- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
-+ cc10001_adc_power_down(adc_dev);
-
- mutex_unlock(&adc_dev->lock);
-
-@@ -185,11 +196,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
- unsigned int delay_ns;
- u16 val;
-
-- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
-- CC10001_ADC_POWER_UP_SET);
--
-- /* Wait for 8 (6+2) clock cycles before activating START */
-- ndelay(adc_dev->start_delay_ns);
-+ cc10001_adc_power_up(adc_dev);
-
- /* Calculate delay step for eoc and sampled data */
- delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
-@@ -198,7 +205,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
-
- val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns);
-
-- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
-+ cc10001_adc_power_down(adc_dev);
-
- return val;
- }
-@@ -224,7 +231,7 @@ static int cc10001_adc_read_raw(struct iio_dev *indio_dev,
-
- case IIO_CHAN_INFO_SCALE:
- ret = regulator_get_voltage(adc_dev->reg);
-- if (ret)
-+ if (ret < 0)
- return ret;
-
- *val = ret / 1000;
-@@ -255,22 +262,22 @@ static const struct iio_info cc10001_adc_info = {
- .update_scan_mode = &cc10001_update_scan_mode,
- };
-
--static int cc10001_adc_channel_init(struct iio_dev *indio_dev)
-+static int cc10001_adc_channel_init(struct iio_dev *indio_dev,
-+ unsigned long channel_map)
- {
-- struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
- struct iio_chan_spec *chan_array, *timestamp;
- unsigned int bit, idx = 0;
-
-- indio_dev->num_channels = bitmap_weight(&adc_dev->channel_map,
-- CC10001_ADC_NUM_CHANNELS);
-+ indio_dev->num_channels = bitmap_weight(&channel_map,
-+ CC10001_ADC_NUM_CHANNELS) + 1;
-
-- chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels + 1,
-+ chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels,
- sizeof(struct iio_chan_spec),
- GFP_KERNEL);
- if (!chan_array)
- return -ENOMEM;
-
-- for_each_set_bit(bit, &adc_dev->channel_map, CC10001_ADC_NUM_CHANNELS) {
-+ for_each_set_bit(bit, &channel_map, CC10001_ADC_NUM_CHANNELS) {
- struct iio_chan_spec *chan = &chan_array[idx];
-
- chan->type = IIO_VOLTAGE;
-@@ -305,6 +312,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
- unsigned long adc_clk_rate;
- struct resource *res;
- struct iio_dev *indio_dev;
-+ unsigned long channel_map;
- int ret;
-
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
-@@ -313,9 +321,9 @@ static int cc10001_adc_probe(struct platform_device *pdev)
-
- adc_dev = iio_priv(indio_dev);
-
-- adc_dev->channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
-+ channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
- if (!of_property_read_u32(node, "adc-reserved-channels", &ret))
-- adc_dev->channel_map &= ~ret;
-+ channel_map &= ~ret;
-
- adc_dev->reg = devm_regulator_get(&pdev->dev, "vref");
- if (IS_ERR(adc_dev->reg))
-@@ -361,7 +369,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
- adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES;
-
- /* Setup the ADC channels available on the device */
-- ret = cc10001_adc_channel_init(indio_dev);
-+ ret = cc10001_adc_channel_init(indio_dev, channel_map);
- if (ret < 0)
- goto err_disable_clk;
-
-diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
-index 3211729bcb0b..0c4618b4d515 100644
---- a/drivers/iio/adc/qcom-spmi-vadc.c
-+++ b/drivers/iio/adc/qcom-spmi-vadc.c
-@@ -18,6 +18,7 @@
- #include <linux/iio/iio.h>
- #include <linux/interrupt.h>
- #include <linux/kernel.h>
-+#include <linux/math64.h>
- #include <linux/module.h>
- #include <linux/of.h>
- #include <linux/platform_device.h>
-@@ -471,11 +472,11 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
- const struct vadc_channel_prop *prop, u16 adc_code)
- {
- const struct vadc_prescale_ratio *prescale;
-- s32 voltage;
-+ s64 voltage;
-
- voltage = adc_code - vadc->graph[prop->calibration].gnd;
- voltage *= vadc->graph[prop->calibration].dx;
-- voltage = voltage / vadc->graph[prop->calibration].dy;
-+ voltage = div64_s64(voltage, vadc->graph[prop->calibration].dy);
-
- if (prop->calibration == VADC_CALIB_ABSOLUTE)
- voltage += vadc->graph[prop->calibration].dx;
-@@ -487,7 +488,7 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
-
- voltage = voltage * prescale->den;
-
-- return voltage / prescale->num;
-+ return div64_s64(voltage, prescale->num);
- }
-
- static int vadc_decimation_from_dt(u32 value)
-diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
-index a221f7329b79..ce93bd8e3f68 100644
---- a/drivers/iio/adc/xilinx-xadc-core.c
-+++ b/drivers/iio/adc/xilinx-xadc-core.c
-@@ -856,6 +856,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
- switch (chan->address) {
- case XADC_REG_VCCINT:
- case XADC_REG_VCCAUX:
-+ case XADC_REG_VREFP:
- case XADC_REG_VCCBRAM:
- case XADC_REG_VCCPINT:
- case XADC_REG_VCCPAUX:
-@@ -996,7 +997,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
- .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
- .scan_index = (_scan_index), \
- .scan_type = { \
-- .sign = 'u', \
-+ .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
- .realbits = 12, \
- .storagebits = 16, \
- .shift = 4, \
-@@ -1008,7 +1009,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
- static const struct iio_chan_spec xadc_channels[] = {
- XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
- XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
-- XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true),
-+ XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
- XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
- XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
- XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
-diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
-index c7487e8d7f80..54adc5087210 100644
---- a/drivers/iio/adc/xilinx-xadc.h
-+++ b/drivers/iio/adc/xilinx-xadc.h
-@@ -145,9 +145,9 @@ static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
- #define XADC_REG_MAX_VCCPINT 0x28
- #define XADC_REG_MAX_VCCPAUX 0x29
- #define XADC_REG_MAX_VCCO_DDR 0x2a
--#define XADC_REG_MIN_VCCPINT 0x2b
--#define XADC_REG_MIN_VCCPAUX 0x2c
--#define XADC_REG_MIN_VCCO_DDR 0x2d
-+#define XADC_REG_MIN_VCCPINT 0x2c
-+#define XADC_REG_MIN_VCCPAUX 0x2d
-+#define XADC_REG_MIN_VCCO_DDR 0x2e
-
- #define XADC_REG_CONF0 0x40
- #define XADC_REG_CONF1 0x41
-diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
-index edd13d2b4121..8dd0477e201c 100644
---- a/drivers/iio/common/st_sensors/st_sensors_core.c
-+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
-@@ -304,8 +304,6 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *of_pdata;
- int err = 0;
-
-- mutex_init(&sdata->tb.buf_lock);
--
- /* If OF/DT pdata exists, it will take precedence of anything else */
- of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
- if (of_pdata)
-diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
-index f07a2336f7dc..566f7d2df031 100644
---- a/drivers/iio/gyro/st_gyro_core.c
-+++ b/drivers/iio/gyro/st_gyro_core.c
-@@ -317,6 +317,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
-
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->info = &gyro_info;
-+ mutex_init(&gdata->tb.buf_lock);
-
- st_sensors_power_enable(indio_dev);
-
-diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
-index 3ecf79ed08ac..88f21bbe947c 100644
---- a/drivers/iio/light/hid-sensor-prox.c
-+++ b/drivers/iio/light/hid-sensor-prox.c
-@@ -43,8 +43,6 @@ struct prox_state {
- static const struct iio_chan_spec prox_channels[] = {
- {
- .type = IIO_PROXIMITY,
-- .modified = 1,
-- .channel2 = IIO_NO_MOD,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
-diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
-index 8ade473f99fe..2e56f812a644 100644
---- a/drivers/iio/magnetometer/st_magn_core.c
-+++ b/drivers/iio/magnetometer/st_magn_core.c
-@@ -369,6 +369,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
-
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->info = &magn_info;
-+ mutex_init(&mdata->tb.buf_lock);
-
- st_sensors_power_enable(indio_dev);
-
-diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
-index 1af314926ebd..476a7d03d2ce 100644
---- a/drivers/iio/pressure/hid-sensor-press.c
-+++ b/drivers/iio/pressure/hid-sensor-press.c
-@@ -47,8 +47,6 @@ struct press_state {
- static const struct iio_chan_spec press_channels[] = {
- {
- .type = IIO_PRESSURE,
-- .modified = 1,
-- .channel2 = IIO_NO_MOD,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
-diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
-index 97baf40d424b..e881fa6291e9 100644
---- a/drivers/iio/pressure/st_pressure_core.c
-+++ b/drivers/iio/pressure/st_pressure_core.c
-@@ -417,6 +417,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
-
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->info = &press_info;
-+ mutex_init(&press_data->tb.buf_lock);
-
- st_sensors_power_enable(indio_dev);
-
-diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
-index b85ddbc979e0..e5558b2660f2 100644
---- a/drivers/infiniband/core/iwpm_msg.c
-+++ b/drivers/infiniband/core/iwpm_msg.c
-@@ -33,7 +33,7 @@
-
- #include "iwpm_util.h"
-
--static const char iwpm_ulib_name[] = "iWarpPortMapperUser";
-+static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
- static int iwpm_ulib_version = 3;
- static int iwpm_user_pid = IWPM_PID_UNDEFINED;
- static atomic_t echo_nlmsg_seq;
-diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
-index 991dc6b20a58..79363b687195 100644
---- a/drivers/input/mouse/elantech.c
-+++ b/drivers/input/mouse/elantech.c
-@@ -315,7 +315,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
- unsigned int x2, unsigned int y2)
- {
- elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
-- elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
-+ elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
- }
-
- /*
-diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
-index 6d5a5c44453b..173e70dbf61b 100644
---- a/drivers/iommu/amd_iommu_v2.c
-+++ b/drivers/iommu/amd_iommu_v2.c
-@@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
-
- static void put_pasid_state_wait(struct pasid_state *pasid_state)
- {
-+ atomic_dec(&pasid_state->count);
- wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
- free_pasid_state(pasid_state);
- }
-diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
-index a3adde6519f0..bd6252b01510 100644
---- a/drivers/iommu/arm-smmu.c
-+++ b/drivers/iommu/arm-smmu.c
-@@ -224,14 +224,7 @@
- #define RESUME_TERMINATE (1 << 0)
-
- #define TTBCR2_SEP_SHIFT 15
--#define TTBCR2_SEP_MASK 0x7
--
--#define TTBCR2_ADDR_32 0
--#define TTBCR2_ADDR_36 1
--#define TTBCR2_ADDR_40 2
--#define TTBCR2_ADDR_42 3
--#define TTBCR2_ADDR_44 4
--#define TTBCR2_ADDR_48 5
-+#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
-
- #define TTBRn_HI_ASID_SHIFT 16
-
-@@ -783,26 +776,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
- if (smmu->version > ARM_SMMU_V1) {
- reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-- switch (smmu->va_size) {
-- case 32:
-- reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
-- break;
-- case 36:
-- reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
-- break;
-- case 40:
-- reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
-- break;
-- case 42:
-- reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
-- break;
-- case 44:
-- reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
-- break;
-- case 48:
-- reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
-- break;
-- }
-+ reg |= TTBCR2_SEP_UPSTREAM;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
- }
- } else {
-diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
-index 7dc93aa004c8..312ffd3d0017 100644
---- a/drivers/lguest/core.c
-+++ b/drivers/lguest/core.c
-@@ -173,7 +173,7 @@ static void unmap_switcher(void)
- bool lguest_address_ok(const struct lguest *lg,
- unsigned long addr, unsigned long len)
- {
-- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
-+ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
- }
-
- /*
-diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
-index 6554d9148927..757f1ba34c4d 100644
---- a/drivers/md/dm-table.c
-+++ b/drivers/md/dm-table.c
-@@ -823,6 +823,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
- }
- EXPORT_SYMBOL(dm_consume_args);
-
-+static bool __table_type_request_based(unsigned table_type)
-+{
-+ return (table_type == DM_TYPE_REQUEST_BASED ||
-+ table_type == DM_TYPE_MQ_REQUEST_BASED);
-+}
-+
- static int dm_table_set_type(struct dm_table *t)
- {
- unsigned i;
-@@ -855,8 +861,7 @@ static int dm_table_set_type(struct dm_table *t)
- * Determine the type from the live device.
- * Default to bio-based if device is new.
- */
-- if (live_md_type == DM_TYPE_REQUEST_BASED ||
-- live_md_type == DM_TYPE_MQ_REQUEST_BASED)
-+ if (__table_type_request_based(live_md_type))
- request_based = 1;
- else
- bio_based = 1;
-@@ -906,7 +911,7 @@ static int dm_table_set_type(struct dm_table *t)
- }
- t->type = DM_TYPE_MQ_REQUEST_BASED;
-
-- } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
-+ } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
- /* inherit live MD type */
- t->type = live_md_type;
-
-@@ -928,10 +933,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
-
- bool dm_table_request_based(struct dm_table *t)
- {
-- unsigned table_type = dm_table_get_type(t);
--
-- return (table_type == DM_TYPE_REQUEST_BASED ||
-- table_type == DM_TYPE_MQ_REQUEST_BASED);
-+ return __table_type_request_based(dm_table_get_type(t));
- }
-
- bool dm_table_mq_request_based(struct dm_table *t)
-diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index 8001fe9e3434..9b4e30a82e4a 100644
---- a/drivers/md/dm.c
-+++ b/drivers/md/dm.c
-@@ -1642,8 +1642,7 @@ static int dm_merge_bvec(struct request_queue *q,
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_live_table_fast(md);
- struct dm_target *ti;
-- sector_t max_sectors;
-- int max_size = 0;
-+ sector_t max_sectors, max_size = 0;
-
- if (unlikely(!map))
- goto out;
-@@ -1658,8 +1657,16 @@ static int dm_merge_bvec(struct request_queue *q,
- max_sectors = min(max_io_len(bvm->bi_sector, ti),
- (sector_t) queue_max_sectors(q));
- max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-- if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
-- max_size = 0;
-+
-+ /*
-+ * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
-+ * to the targets' merge function since it holds sectors not bytes).
-+ * Just doing this as an interim fix for stable@ because the more
-+ * comprehensive cleanup of switching to sector_t will impact every
-+ * DM target that implements a ->merge hook.
-+ */
-+ if (max_size > INT_MAX)
-+ max_size = INT_MAX;
-
- /*
- * merge_bvec_fn() returns number of bytes
-@@ -1667,7 +1674,7 @@ static int dm_merge_bvec(struct request_queue *q,
- * max is precomputed maximal io size
- */
- if (max_size && ti->type->merge)
-- max_size = ti->type->merge(ti, bvm, biovec, max_size);
-+ max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
- /*
- * If the target doesn't support merge method and some of the devices
- * provided their merge_bvec method (we know this by looking for the
-diff --git a/drivers/md/md.c b/drivers/md/md.c
-index e47d1dd046da..907534b7f40d 100644
---- a/drivers/md/md.c
-+++ b/drivers/md/md.c
-@@ -4138,12 +4138,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
- if (!mddev->pers || !mddev->pers->sync_request)
- return -EINVAL;
-
-- if (cmd_match(page, "frozen"))
-- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-- else
-- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-
- if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
-+ if (cmd_match(page, "frozen"))
-+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-+ else
-+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- flush_workqueue(md_misc_wq);
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-@@ -4156,16 +4156,17 @@ action_store(struct mddev *mddev, const char *page, size_t len)
- test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
- return -EBUSY;
- else if (cmd_match(page, "resync"))
-- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- else if (cmd_match(page, "recover")) {
-+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
-- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- } else if (cmd_match(page, "reshape")) {
- int err;
- if (mddev->pers->start_reshape == NULL)
- return -EINVAL;
- err = mddev_lock(mddev);
- if (!err) {
-+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- err = mddev->pers->start_reshape(mddev);
- mddev_unlock(mddev);
- }
-@@ -4177,6 +4178,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
- set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
- else if (!cmd_match(page, "repair"))
- return -EINVAL;
-+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
- }
-diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
-index 3b5d7f704aa3..903391ce9353 100644
---- a/drivers/md/raid0.c
-+++ b/drivers/md/raid0.c
-@@ -517,6 +517,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
- ? (sector & (chunk_sects-1))
- : sector_div(sector, chunk_sects));
-
-+ /* Restore due to sector_div */
-+ sector = bio->bi_iter.bi_sector;
-+
- if (sectors < bio_sectors(bio)) {
- split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
- bio_chain(split, bio);
-@@ -524,7 +527,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
- split = bio;
- }
-
-- sector = bio->bi_iter.bi_sector;
- zone = find_zone(mddev->private, &sector);
- tmp_dev = map_sector(mddev, zone, sector, &sector);
- split->bi_bdev = tmp_dev->bdev;
-diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index cd2f96b2c572..007ab861eca0 100644
---- a/drivers/md/raid5.c
-+++ b/drivers/md/raid5.c
-@@ -1933,7 +1933,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
-
- conf->slab_cache = sc;
- conf->active_name = 1-conf->active_name;
-- conf->pool_size = newsize;
-+ if (!err)
-+ conf->pool_size = newsize;
- return err;
- }
-
-diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
-index ae498b53ee40..46e3840c7a37 100644
---- a/drivers/mfd/da9052-core.c
-+++ b/drivers/mfd/da9052-core.c
-@@ -433,6 +433,10 @@ EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
- static const struct mfd_cell da9052_subdev_info[] = {
- {
- .name = "da9052-regulator",
-+ .id = 0,
-+ },
-+ {
-+ .name = "da9052-regulator",
- .id = 1,
- },
- {
-@@ -484,10 +488,6 @@ static const struct mfd_cell da9052_subdev_info[] = {
- .id = 13,
- },
- {
-- .name = "da9052-regulator",
-- .id = 14,
-- },
-- {
- .name = "da9052-onkey",
- },
- {
-diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
-index 03d7c7521d97..9a39e0b7e583 100644
---- a/drivers/mmc/host/atmel-mci.c
-+++ b/drivers/mmc/host/atmel-mci.c
-@@ -1304,7 +1304,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-
- if (ios->clock) {
- unsigned int clock_min = ~0U;
-- u32 clkdiv;
-+ int clkdiv;
-
- spin_lock_bh(&host->lock);
- if (!host->mode_reg) {
-@@ -1328,7 +1328,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
- /* Calculate clock divider */
- if (host->caps.has_odd_clk_div) {
- clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
-- if (clkdiv > 511) {
-+ if (clkdiv < 0) {
-+ dev_warn(&mmc->class_dev,
-+ "clock %u too fast; using %lu\n",
-+ clock_min, host->bus_hz / 2);
-+ clkdiv = 0;
-+ } else if (clkdiv > 511) {
- dev_warn(&mmc->class_dev,
- "clock %u too slow; using %lu\n",
- clock_min, host->bus_hz / (511 + 2));
-diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
-index db2c05b6fe7f..c9eb78f10a0d 100644
---- a/drivers/mtd/ubi/block.c
-+++ b/drivers/mtd/ubi/block.c
-@@ -310,6 +310,8 @@ static void ubiblock_do_work(struct work_struct *work)
- blk_rq_map_sg(req->q, req, pdu->usgl.sg);
-
- ret = ubiblock_read(pdu);
-+ rq_flush_dcache_pages(req);
-+
- blk_mq_end_request(req, ret);
- }
-
-diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
-index 6262612dec45..7a3231d8b933 100644
---- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
-+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
-@@ -512,11 +512,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
- msgbuf->rx_pktids,
- msgbuf->ioctl_resp_pktid);
- if (msgbuf->ioctl_resp_ret_len != 0) {
-- if (!skb) {
-- brcmf_err("Invalid packet id idx recv'd %d\n",
-- msgbuf->ioctl_resp_pktid);
-+ if (!skb)
- return -EBADF;
-- }
-+
- memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
- len : msgbuf->ioctl_resp_ret_len);
- }
-@@ -875,10 +873,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
- flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
- skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
- msgbuf->tx_pktids, idx);
-- if (!skb) {
-- brcmf_err("Invalid packet id idx recv'd %d\n", idx);
-+ if (!skb)
- return;
-- }
-
- set_bit(flowid, msgbuf->txstatus_done_map);
- commonring = msgbuf->flowrings[flowid];
-@@ -1157,6 +1153,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
-
- skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
- msgbuf->rx_pktids, idx);
-+ if (!skb)
-+ return;
-
- if (data_offset)
- skb_pull(skb, data_offset);
-diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
-index 14e8fd661889..fd5a0bb1493f 100644
---- a/drivers/net/wireless/iwlwifi/mvm/d3.c
-+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
-@@ -1742,8 +1742,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
- int i, j, n_matches, ret;
-
- fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
-- if (!IS_ERR_OR_NULL(fw_status))
-+ if (!IS_ERR_OR_NULL(fw_status)) {
- reasons = le32_to_cpu(fw_status->wakeup_reasons);
-+ kfree(fw_status);
-+ }
-
- if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
- wakeup.rfkill_release = true;
-@@ -1860,15 +1862,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
- /* get the BSS vif pointer again */
- vif = iwl_mvm_get_bss_vif(mvm);
- if (IS_ERR_OR_NULL(vif))
-- goto out_unlock;
-+ goto err;
-
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
- if (ret)
-- goto out_unlock;
-+ goto err;
-
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mvm, "Device was reset during suspend\n");
-- goto out_unlock;
-+ goto err;
- }
-
- /* query SRAM first in case we want event logging */
-@@ -1886,7 +1888,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
- /* has unlocked the mutex, so skip that */
- goto out;
-
-- out_unlock:
-+err:
-+ iwl_mvm_free_nd(mvm);
- mutex_unlock(&mvm->mutex);
-
- out:
-diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
-index 69935aa5a1b3..cb72edb3d16a 100644
---- a/drivers/net/wireless/iwlwifi/pcie/trans.c
-+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
-@@ -5,8 +5,8 @@
- *
- * GPL LICENSE SUMMARY
- *
-- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
-- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
-+ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
-+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
-@@ -31,8 +31,8 @@
- *
- * BSD LICENSE
- *
-- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
-- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
-+ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
-+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
-@@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
- static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
- {
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-- struct page *page;
-+ struct page *page = NULL;
- dma_addr_t phys;
- u32 size;
- u8 power;
-@@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
- DMA_FROM_DEVICE);
- if (dma_mapping_error(trans->dev, phys)) {
- __free_pages(page, order);
-+ page = NULL;
- continue;
- }
- IWL_INFO(trans,
-diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
-index 8444313eabe2..8694dddcce9a 100644
---- a/drivers/net/wireless/rt2x00/rt2800usb.c
-+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
-@@ -1040,6 +1040,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
- { USB_DEVICE(0x07d1, 0x3c17) },
- { USB_DEVICE(0x2001, 0x3317) },
- { USB_DEVICE(0x2001, 0x3c1b) },
-+ { USB_DEVICE(0x2001, 0x3c25) },
- /* Draytek */
- { USB_DEVICE(0x07fa, 0x7712) },
- /* DVICO */
-diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
-index 46ee956d0235..27cd6cabf6c5 100644
---- a/drivers/net/wireless/rtlwifi/usb.c
-+++ b/drivers/net/wireless/rtlwifi/usb.c
-@@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
-
- do {
- status = usb_control_msg(udev, pipe, request, reqtype, value,
-- index, pdata, len, 0); /*max. timeout*/
-+ index, pdata, len, 1000);
- if (status < 0) {
- /* firmware download is checksumed, don't retry */
- if ((value >= FW_8192C_START_ADDRESS &&
-diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
-index 13584e24736a..4d7d60e593b8 100644
---- a/drivers/power/reset/at91-reset.c
-+++ b/drivers/power/reset/at91-reset.c
-@@ -212,9 +212,9 @@ static int at91_reset_platform_probe(struct platform_device *pdev)
- res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 );
- at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
-- if (IS_ERR(at91_ramc_base[idx])) {
-+ if (!at91_ramc_base[idx]) {
- dev_err(&pdev->dev, "Could not map ram controller address\n");
-- return PTR_ERR(at91_ramc_base[idx]);
-+ return -ENOMEM;
- }
- }
-
-diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
-index 476171a768d6..8a029f9bc18c 100644
---- a/drivers/pwm/pwm-img.c
-+++ b/drivers/pwm/pwm-img.c
-@@ -16,6 +16,7 @@
- #include <linux/mfd/syscon.h>
- #include <linux/module.h>
- #include <linux/of.h>
-+#include <linux/of_device.h>
- #include <linux/platform_device.h>
- #include <linux/pwm.h>
- #include <linux/regmap.h>
-@@ -38,7 +39,22 @@
- #define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
- #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
-
--#define MAX_TMBASE_STEPS 65536
-+/*
-+ * PWM period is specified with a timebase register,
-+ * in number of step periods. The PWM duty cycle is also
-+ * specified in step periods, in the [0, $timebase] range.
-+ * In other words, the timebase imposes the duty cycle
-+ * resolution. Therefore, let's constraint the timebase to
-+ * a minimum value to allow a sane range of duty cycle values.
-+ * Imposing a minimum timebase, will impose a maximum PWM frequency.
-+ *
-+ * The value chosen is completely arbitrary.
-+ */
-+#define MIN_TMBASE_STEPS 16
-+
-+struct img_pwm_soc_data {
-+ u32 max_timebase;
-+};
-
- struct img_pwm_chip {
- struct device *dev;
-@@ -47,6 +63,9 @@ struct img_pwm_chip {
- struct clk *sys_clk;
- void __iomem *base;
- struct regmap *periph_regs;
-+ int max_period_ns;
-+ int min_period_ns;
-+ const struct img_pwm_soc_data *data;
- };
-
- static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
-@@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- u32 val, div, duty, timebase;
- unsigned long mul, output_clk_hz, input_clk_hz;
- struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
-+ unsigned int max_timebase = pwm_chip->data->max_timebase;
-+
-+ if (period_ns < pwm_chip->min_period_ns ||
-+ period_ns > pwm_chip->max_period_ns) {
-+ dev_err(chip->dev, "configured period not in range\n");
-+ return -ERANGE;
-+ }
-
- input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
- output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
-
- mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
-- if (mul <= MAX_TMBASE_STEPS) {
-+ if (mul <= max_timebase) {
- div = PWM_CTRL_CFG_NO_SUB_DIV;
- timebase = DIV_ROUND_UP(mul, 1);
-- } else if (mul <= MAX_TMBASE_STEPS * 8) {
-+ } else if (mul <= max_timebase * 8) {
- div = PWM_CTRL_CFG_SUB_DIV0;
- timebase = DIV_ROUND_UP(mul, 8);
-- } else if (mul <= MAX_TMBASE_STEPS * 64) {
-+ } else if (mul <= max_timebase * 64) {
- div = PWM_CTRL_CFG_SUB_DIV1;
- timebase = DIV_ROUND_UP(mul, 64);
-- } else if (mul <= MAX_TMBASE_STEPS * 512) {
-+ } else if (mul <= max_timebase * 512) {
- div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
- timebase = DIV_ROUND_UP(mul, 512);
-- } else if (mul > MAX_TMBASE_STEPS * 512) {
-+ } else if (mul > max_timebase * 512) {
- dev_err(chip->dev,
- "failed to configure timebase steps/divider value\n");
- return -EINVAL;
-@@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = {
- .owner = THIS_MODULE,
- };
-
-+static const struct img_pwm_soc_data pistachio_pwm = {
-+ .max_timebase = 255,
-+};
-+
-+static const struct of_device_id img_pwm_of_match[] = {
-+ {
-+ .compatible = "img,pistachio-pwm",
-+ .data = &pistachio_pwm,
-+ },
-+ { }
-+};
-+MODULE_DEVICE_TABLE(of, img_pwm_of_match);
-+
- static int img_pwm_probe(struct platform_device *pdev)
- {
- int ret;
-+ u64 val;
-+ unsigned long clk_rate;
- struct resource *res;
- struct img_pwm_chip *pwm;
-+ const struct of_device_id *of_dev_id;
-
- pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
- if (!pwm)
-@@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev)
- if (IS_ERR(pwm->base))
- return PTR_ERR(pwm->base);
-
-+ of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
-+ if (!of_dev_id)
-+ return -ENODEV;
-+ pwm->data = of_dev_id->data;
-+
- pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "img,cr-periph");
- if (IS_ERR(pwm->periph_regs))
-@@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev)
- goto disable_sysclk;
- }
-
-+ clk_rate = clk_get_rate(pwm->pwm_clk);
-+
-+ /* The maximum input clock divider is 512 */
-+ val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
-+ do_div(val, clk_rate);
-+ pwm->max_period_ns = val;
-+
-+ val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
-+ do_div(val, clk_rate);
-+ pwm->min_period_ns = val;
-+
- pwm->chip.dev = &pdev->dev;
- pwm->chip.ops = &img_pwm_ops;
- pwm->chip.base = -1;
-@@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev)
- return pwmchip_remove(&pwm_chip->chip);
- }
-
--static const struct of_device_id img_pwm_of_match[] = {
-- { .compatible = "img,pistachio-pwm", },
-- { }
--};
--MODULE_DEVICE_TABLE(of, img_pwm_of_match);
--
- static struct platform_driver img_pwm_driver = {
- .driver = {
- .name = "img-pwm",
-diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
-index 8a4df7a1f2ee..e628d4c2f2ae 100644
---- a/drivers/regulator/da9052-regulator.c
-+++ b/drivers/regulator/da9052-regulator.c
-@@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
-
- static int da9052_regulator_probe(struct platform_device *pdev)
- {
-+ const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct regulator_config config = { };
- struct da9052_regulator *regulator;
- struct da9052 *da9052;
-@@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
- regulator->da9052 = da9052;
-
- regulator->info = find_regulator_info(regulator->da9052->chip_id,
-- pdev->id);
-+ cell->id);
- if (regulator->info == NULL) {
- dev_err(&pdev->dev, "invalid regulator ID specified\n");
- return -EINVAL;
-@@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
- config.driver_data = regulator;
- config.regmap = da9052->regmap;
- if (pdata && pdata->regulators) {
-- config.init_data = pdata->regulators[pdev->id];
-+ config.init_data = pdata->regulators[cell->id];
- } else {
- #ifdef CONFIG_OF
- struct device_node *nproot = da9052->dev->of_node;
-diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 3290a3ed5b31..a661d339adf7 100644
---- a/drivers/scsi/sd.c
-+++ b/drivers/scsi/sd.c
-@@ -1624,6 +1624,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
- {
- u64 start_lba = blk_rq_pos(scmd->request);
- u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
-+ u64 factor = scmd->device->sector_size / 512;
- u64 bad_lba;
- int info_valid;
- /*
-@@ -1645,16 +1646,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
- if (scsi_bufflen(scmd) <= scmd->device->sector_size)
- return 0;
-
-- if (scmd->device->sector_size < 512) {
-- /* only legitimate sector_size here is 256 */
-- start_lba <<= 1;
-- end_lba <<= 1;
-- } else {
-- /* be careful ... don't want any overflows */
-- unsigned int factor = scmd->device->sector_size / 512;
-- do_div(start_lba, factor);
-- do_div(end_lba, factor);
-- }
-+ /* be careful ... don't want any overflows */
-+ do_div(start_lba, factor);
-+ do_div(end_lba, factor);
-
- /* The bad lba was reported incorrectly, we have no idea where
- * the error is.
-@@ -2212,8 +2206,7 @@ got_data:
- if (sector_size != 512 &&
- sector_size != 1024 &&
- sector_size != 2048 &&
-- sector_size != 4096 &&
-- sector_size != 256) {
-+ sector_size != 4096) {
- sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
- sector_size);
- /*
-@@ -2268,8 +2261,6 @@ got_data:
- sdkp->capacity <<= 2;
- else if (sector_size == 1024)
- sdkp->capacity <<= 1;
-- else if (sector_size == 256)
-- sdkp->capacity >>= 1;
-
- blk_queue_physical_block_size(sdp->request_queue,
- sdkp->physical_block_size);
-diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
-index bf8c5c1e254e..75efaaeb0eca 100644
---- a/drivers/scsi/storvsc_drv.c
-+++ b/drivers/scsi/storvsc_drv.c
-@@ -1565,8 +1565,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
- break;
- default:
- vm_srb->data_in = UNKNOWN_TYPE;
-- vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
-- SRB_FLAGS_DATA_OUT);
-+ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
- break;
- }
-
-diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
-index d1ab996b3305..a21a51efaad0 100644
---- a/drivers/staging/gdm724x/gdm_mux.c
-+++ b/drivers/staging/gdm724x/gdm_mux.c
-@@ -158,7 +158,7 @@ static int up_to_host(struct mux_rx *r)
- unsigned int start_flag;
- unsigned int payload_size;
- unsigned short packet_type;
-- int dummy_cnt;
-+ int total_len;
- u32 packet_size_sum = r->offset;
- int index;
- int ret = TO_HOST_INVALID_PACKET;
-@@ -176,10 +176,10 @@ static int up_to_host(struct mux_rx *r)
- break;
- }
-
-- dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
-+ total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
-
- if (len - packet_size_sum <
-- MUX_HEADER_SIZE + payload_size + dummy_cnt) {
-+ total_len) {
- pr_err("invalid payload : %d %d %04x\n",
- payload_size, len, packet_type);
- break;
-@@ -202,7 +202,7 @@ static int up_to_host(struct mux_rx *r)
- break;
- }
-
-- packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
-+ packet_size_sum += total_len;
- if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
- ret = r->callback(NULL,
- 0,
-@@ -361,7 +361,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
- struct mux_pkt_header *mux_header;
- struct mux_tx *t = NULL;
- static u32 seq_num = 1;
-- int dummy_cnt;
- int total_len;
- int ret;
- unsigned long flags;
-@@ -374,9 +373,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
-
- spin_lock_irqsave(&mux_dev->write_lock, flags);
-
-- dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
--
-- total_len = len + MUX_HEADER_SIZE + dummy_cnt;
-+ total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
-
- t = alloc_mux_tx(total_len);
- if (!t) {
-@@ -392,7 +389,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
- mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
-
- memcpy(t->buf+MUX_HEADER_SIZE, data, len);
-- memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
-+ memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE -
-+ len);
-
- t->len = total_len;
- t->callback = cb;
-diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
-index 03b2a90b9ac0..992236f605d8 100644
---- a/drivers/staging/vt6655/device_main.c
-+++ b/drivers/staging/vt6655/device_main.c
-@@ -911,7 +911,11 @@ static int vnt_int_report_rate(struct vnt_private *priv,
-
- if (!(tsr1 & TSR1_TERR)) {
- info->status.rates[0].idx = idx;
-- info->flags |= IEEE80211_TX_STAT_ACK;
-+
-+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
-+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
-+ else
-+ info->flags |= IEEE80211_TX_STAT_ACK;
- }
-
- return 0;
-@@ -936,9 +940,6 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
- //Only the status of first TD in the chain is correct
- if (pTD->m_td1TD1.byTCR & TCR_STP) {
- if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
--
-- vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
--
- if (!(byTsr1 & TSR1_TERR)) {
- if (byTsr0 != 0) {
- pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
-@@ -957,6 +958,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
- (int)uIdx, byTsr1, byTsr0);
- }
- }
-+
-+ vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
-+
- device_free_tx_buf(pDevice, pTD);
- pDevice->iTDUsed[uIdx]--;
- }
-@@ -988,10 +992,8 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
- PCI_DMA_TODEVICE);
- }
-
-- if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
-+ if (skb)
- ieee80211_tx_status_irqsafe(pDevice->hw, skb);
-- else
-- dev_kfree_skb_irq(skb);
-
- pTDInfo->skb_dma = 0;
- pTDInfo->skb = NULL;
-@@ -1201,14 +1203,6 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
- if (dma_idx == TYPE_AC0DMA)
- head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
-
-- priv->iTDUsed[dma_idx]++;
--
-- /* Take ownership */
-- wmb();
-- head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
--
-- /* get Next */
-- wmb();
- priv->apCurrTD[dma_idx] = head_td->next;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-@@ -1229,11 +1223,18 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
-
- head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
-
-+ /* Poll Transmit the adapter */
-+ wmb();
-+ head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
-+ wmb(); /* second memory barrier */
-+
- if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
- MACvTransmitAC0(priv->PortOffset);
- else
- MACvTransmit0(priv->PortOffset);
-
-+ priv->iTDUsed[dma_idx]++;
-+
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-@@ -1413,9 +1414,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
-
- priv->current_aid = conf->aid;
-
-- if (changed & BSS_CHANGED_BSSID)
-+ if (changed & BSS_CHANGED_BSSID) {
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&priv->lock, flags);
-+
- MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
-
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+ }
-+
- if (changed & BSS_CHANGED_BASIC_RATES) {
- priv->basic_rates = conf->basic_rates;
-
-diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
-index 33baf26de4b5..ee9ce165dcde 100644
---- a/drivers/staging/vt6656/rxtx.c
-+++ b/drivers/staging/vt6656/rxtx.c
-@@ -805,10 +805,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
- vnt_schedule_command(priv, WLAN_CMD_SETPOWER);
- }
-
-- if (current_rate > RATE_11M)
-- pkt_type = priv->packet_type;
-- else
-+ if (current_rate > RATE_11M) {
-+ if (info->band == IEEE80211_BAND_5GHZ) {
-+ pkt_type = PK_TYPE_11A;
-+ } else {
-+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
-+ pkt_type = PK_TYPE_11GB;
-+ else
-+ pkt_type = PK_TYPE_11GA;
-+ }
-+ } else {
- pkt_type = PK_TYPE_11B;
-+ }
-
- spin_lock_irqsave(&priv->lock, flags);
-
-diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
-index f6c954c4635f..4073869d2090 100644
---- a/drivers/target/target_core_pscsi.c
-+++ b/drivers/target/target_core_pscsi.c
-@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
- " pdv_host_id: %d\n", pdv->pdv_host_id);
- return -EINVAL;
- }
-+ pdv->pdv_lld_host = sh;
- }
- } else {
- if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
-@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev)
- if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
- (phv->phv_lld_host != NULL))
- scsi_host_put(phv->phv_lld_host);
-+ else if (pdv->pdv_lld_host)
-+ scsi_host_put(pdv->pdv_lld_host);
-
- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
-diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
-index 1bd757dff8ee..820d3052b775 100644
---- a/drivers/target/target_core_pscsi.h
-+++ b/drivers/target/target_core_pscsi.h
-@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
- int pdv_lun_id;
- struct block_device *pdv_bd;
- struct scsi_device *pdv_sd;
-+ struct Scsi_Host *pdv_lld_host;
- } ____cacheline_aligned;
-
- typedef enum phv_modes {
-diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
-index c2556cf5186b..01255fd65135 100644
---- a/drivers/thermal/armada_thermal.c
-+++ b/drivers/thermal/armada_thermal.c
-@@ -224,9 +224,9 @@ static const struct armada_thermal_data armada380_data = {
- .is_valid_shift = 10,
- .temp_shift = 0,
- .temp_mask = 0x3ff,
-- .coef_b = 1169498786UL,
-- .coef_m = 2000000UL,
-- .coef_div = 4289,
-+ .coef_b = 2931108200UL,
-+ .coef_m = 5000000UL,
-+ .coef_div = 10502,
- .inverted = true,
- };
-
-diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
-index 5bab1c684bb1..7a3d146a5f0e 100644
---- a/drivers/tty/hvc/hvc_xen.c
-+++ b/drivers/tty/hvc/hvc_xen.c
-@@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void)
- return -ENOMEM;
- }
-
-- info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
-+ info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
- info->vtermno = HVC_COOKIE;
-
- spin_lock(&xencons_lock);
-diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
-index c4343764cc5b..bce16e405d59 100644
---- a/drivers/tty/n_gsm.c
-+++ b/drivers/tty/n_gsm.c
-@@ -3170,7 +3170,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
- return gsmtty_modem_update(dlci, encode);
- }
-
--static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
-+static void gsmtty_cleanup(struct tty_struct *tty)
- {
- struct gsm_dlci *dlci = tty->driver_data;
- struct gsm_mux *gsm = dlci->gsm;
-@@ -3178,7 +3178,6 @@ static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
- dlci_put(dlci);
- dlci_put(gsm->dlci[0]);
- mux_put(gsm);
-- driver->ttys[tty->index] = NULL;
- }
-
- /* Virtual ttys for the demux */
-@@ -3199,7 +3198,7 @@ static const struct tty_operations gsmtty_ops = {
- .tiocmget = gsmtty_tiocmget,
- .tiocmset = gsmtty_tiocmset,
- .break_ctl = gsmtty_break_ctl,
-- .remove = gsmtty_remove,
-+ .cleanup = gsmtty_cleanup,
- };
-
-
-diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
-index 644ddb841d9f..bbc4ce66c2c1 100644
---- a/drivers/tty/n_hdlc.c
-+++ b/drivers/tty/n_hdlc.c
-@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
- add_wait_queue(&tty->read_wait, &wait);
-
- for (;;) {
-- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
-+ if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
- ret = -EIO;
- break;
- }
-@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
- /* set bits for operations that won't block */
- if (n_hdlc->rx_buf_list.head)
- mask |= POLLIN | POLLRDNORM; /* readable */
-- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-+ if (test_bit(TTY_OTHER_DONE, &tty->flags))
- mask |= POLLHUP;
- if (tty_hung_up_p(filp))
- mask |= POLLHUP;
-diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index cf6e0f2e1331..cc57a3a6b02b 100644
---- a/drivers/tty/n_tty.c
-+++ b/drivers/tty/n_tty.c
-@@ -1949,6 +1949,18 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
- return ldata->commit_head - ldata->read_tail >= amt;
- }
-
-+static inline int check_other_done(struct tty_struct *tty)
-+{
-+ int done = test_bit(TTY_OTHER_DONE, &tty->flags);
-+ if (done) {
-+ /* paired with cmpxchg() in check_other_closed(); ensures
-+ * read buffer head index is not stale
-+ */
-+ smp_mb__after_atomic();
-+ }
-+ return done;
-+}
-+
- /**
- * copy_from_read_buf - copy read data directly
- * @tty: terminal device
-@@ -2167,7 +2179,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
- struct n_tty_data *ldata = tty->disc_data;
- unsigned char __user *b = buf;
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
-- int c;
-+ int c, done;
- int minimum, time;
- ssize_t retval = 0;
- long timeout;
-@@ -2235,8 +2247,10 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
- ((minimum - (b - buf)) >= 1))
- ldata->minimum_to_wake = (minimum - (b - buf));
-
-+ done = check_other_done(tty);
-+
- if (!input_available_p(tty, 0)) {
-- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
-+ if (done) {
- retval = -EIO;
- break;
- }
-@@ -2443,12 +2457,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
-
- poll_wait(file, &tty->read_wait, wait);
- poll_wait(file, &tty->write_wait, wait);
-+ if (check_other_done(tty))
-+ mask |= POLLHUP;
- if (input_available_p(tty, 1))
- mask |= POLLIN | POLLRDNORM;
- if (tty->packet && tty->link->ctrl_status)
- mask |= POLLPRI | POLLIN | POLLRDNORM;
-- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-- mask |= POLLHUP;
- if (tty_hung_up_p(file))
- mask |= POLLHUP;
- if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
-diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
-index e72ee629cead..4d5e8409769c 100644
---- a/drivers/tty/pty.c
-+++ b/drivers/tty/pty.c
-@@ -53,9 +53,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
- /* Review - krefs on tty_link ?? */
- if (!tty->link)
- return;
-- tty_flush_to_ldisc(tty->link);
- set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
-- wake_up_interruptible(&tty->link->read_wait);
-+ tty_flip_buffer_push(tty->link->port);
- wake_up_interruptible(&tty->link->write_wait);
- if (tty->driver->subtype == PTY_TYPE_MASTER) {
- set_bit(TTY_OTHER_CLOSED, &tty->flags);
-@@ -243,7 +242,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
- goto out;
-
- clear_bit(TTY_IO_ERROR, &tty->flags);
-+ /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
- clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
-+ clear_bit(TTY_OTHER_DONE, &tty->link->flags);
- set_bit(TTY_THROTTLED, &tty->flags);
- return 0;
-
-diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
-index 75661641f5fe..2f78b77f0f81 100644
---- a/drivers/tty/tty_buffer.c
-+++ b/drivers/tty/tty_buffer.c
-@@ -37,6 +37,28 @@
-
- #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
-
-+/*
-+ * If all tty flip buffers have been processed by flush_to_ldisc() or
-+ * dropped by tty_buffer_flush(), check if the linked pty has been closed.
-+ * If so, wake the reader/poll to process
-+ */
-+static inline void check_other_closed(struct tty_struct *tty)
-+{
-+ unsigned long flags, old;
-+
-+ /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
-+ for (flags = ACCESS_ONCE(tty->flags);
-+ test_bit(TTY_OTHER_CLOSED, &flags);
-+ ) {
-+ old = flags;
-+ __set_bit(TTY_OTHER_DONE, &flags);
-+ flags = cmpxchg(&tty->flags, old, flags);
-+ if (old == flags) {
-+ wake_up_interruptible(&tty->read_wait);
-+ break;
-+ }
-+ }
-+}
-
- /**
- * tty_buffer_lock_exclusive - gain exclusive access to buffer
-@@ -229,6 +251,8 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
- if (ld && ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
-
-+ check_other_closed(tty);
-+
- atomic_dec(&buf->priority);
- mutex_unlock(&buf->lock);
- }
-@@ -471,8 +495,10 @@ static void flush_to_ldisc(struct work_struct *work)
- smp_rmb();
- count = head->commit - head->read;
- if (!count) {
-- if (next == NULL)
-+ if (next == NULL) {
-+ check_other_closed(tty);
- break;
-+ }
- buf->head = next;
- tty_buffer_free(port, head);
- continue;
-@@ -489,19 +515,6 @@ static void flush_to_ldisc(struct work_struct *work)
- }
-
- /**
-- * tty_flush_to_ldisc
-- * @tty: tty to push
-- *
-- * Push the terminal flip buffers to the line discipline.
-- *
-- * Must not be called from IRQ context.
-- */
--void tty_flush_to_ldisc(struct tty_struct *tty)
--{
-- flush_work(&tty->port->buf.work);
--}
--
--/**
- * tty_flip_buffer_push - terminal
- * @port: tty port to push
- *
-diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
-index c42765b3a060..0495c94a23d7 100644
---- a/drivers/usb/gadget/configfs.c
-+++ b/drivers/usb/gadget/configfs.c
-@@ -1295,6 +1295,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
- }
- }
- c->next_interface_id = 0;
-+ memset(c->interface, 0, sizeof(c->interface));
- c->superspeed = 0;
- c->highspeed = 0;
- c->fullspeed = 0;
-diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
-index eeedde8c435a..6994c99e58a6 100644
---- a/drivers/usb/host/xhci-ring.c
-+++ b/drivers/usb/host/xhci-ring.c
-@@ -2026,8 +2026,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
- break;
- case COMP_DEV_ERR:
- case COMP_STALL:
-+ frame->status = -EPROTO;
-+ skip_td = true;
-+ break;
- case COMP_TX_ERR:
- frame->status = -EPROTO;
-+ if (event_trb != td->last_trb)
-+ return 0;
- skip_td = true;
- break;
- case COMP_STOP:
-@@ -2640,7 +2645,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
- xhci_halt(xhci);
- hw_died:
- spin_unlock(&xhci->lock);
-- return -ESHUTDOWN;
-+ return IRQ_HANDLED;
- }
-
- /*
-diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
-index 8e421b89632d..ea75e8ccd3c1 100644
---- a/drivers/usb/host/xhci.h
-+++ b/drivers/usb/host/xhci.h
-@@ -1267,7 +1267,7 @@ union xhci_trb {
- * since the command ring is 64-byte aligned.
- * It must also be greater than 16.
- */
--#define TRBS_PER_SEGMENT 64
-+#define TRBS_PER_SEGMENT 256
- /* Allow two commands + a link TRB, along with any reserved command TRBs */
- #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
- #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
-diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
-index 84ce2d74894c..9031750e7404 100644
---- a/drivers/usb/serial/cp210x.c
-+++ b/drivers/usb/serial/cp210x.c
-@@ -127,6 +127,7 @@ static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
- { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
- { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
-+ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
- { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
- { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
- { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
-diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
-index 829604d11f3f..f5257af33ecf 100644
---- a/drivers/usb/serial/pl2303.c
-+++ b/drivers/usb/serial/pl2303.c
-@@ -61,7 +61,6 @@ static const struct usb_device_id id_table[] = {
- { USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
- { USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
- { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
-- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
- { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1),
- .driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
- { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65),
-diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
-index 71fd9da1d6e7..e3b7af8adfb7 100644
---- a/drivers/usb/serial/pl2303.h
-+++ b/drivers/usb/serial/pl2303.h
-@@ -62,10 +62,6 @@
- #define ALCATEL_VENDOR_ID 0x11f7
- #define ALCATEL_PRODUCT_ID 0x02df
-
--/* Samsung I330 phone cradle */
--#define SAMSUNG_VENDOR_ID 0x04e8
--#define SAMSUNG_PRODUCT_ID 0x8001
--
- #define SIEMENS_VENDOR_ID 0x11f5
- #define SIEMENS_PRODUCT_ID_SX1 0x0001
- #define SIEMENS_PRODUCT_ID_X65 0x0003
-diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
-index bf2bd40e5f2a..60afb39eb73c 100644
---- a/drivers/usb/serial/visor.c
-+++ b/drivers/usb/serial/visor.c
-@@ -95,7 +95,7 @@ static const struct usb_device_id id_table[] = {
- .driver_info = (kernel_ulong_t)&palm_os_4_probe },
- { USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
- .driver_info = (kernel_ulong_t)&palm_os_4_probe },
-- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
-+ { USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
- .driver_info = (kernel_ulong_t)&palm_os_4_probe },
- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
- .driver_info = (kernel_ulong_t)&palm_os_4_probe },
-diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
-index d684b4b8108f..caf188800c67 100644
---- a/drivers/usb/storage/unusual_devs.h
-+++ b/drivers/usb/storage/unusual_devs.h
-@@ -766,6 +766,13 @@ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_GO_SLOW ),
-
-+/* Reported by Christian Schaller <cschalle@redhat.com> */
-+UNUSUAL_DEV( 0x059f, 0x0651, 0x0000, 0x0000,
-+ "LaCie",
-+ "External HDD",
-+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-+ US_FL_NO_WP_DETECT ),
-+
- /* Submitted by Joel Bourquard <numlock@freesurf.ch>
- * Some versions of this device need the SubClass and Protocol overrides
- * while others don't.
-diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
-index 2b8553bd8715..38387950490e 100644
---- a/drivers/xen/events/events_base.c
-+++ b/drivers/xen/events/events_base.c
-@@ -957,7 +957,7 @@ unsigned xen_evtchn_nr_channels(void)
- }
- EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
-
--int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
-+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
- {
- struct evtchn_bind_virq bind_virq;
- int evtchn, irq, ret;
-@@ -971,8 +971,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
- if (irq < 0)
- goto out;
-
-- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
-- handle_percpu_irq, "virq");
-+ if (percpu)
-+ irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
-+ handle_percpu_irq, "virq");
-+ else
-+ irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
-+ handle_edge_irq, "virq");
-
- bind_virq.virq = virq;
- bind_virq.vcpu = cpu;
-@@ -1062,7 +1066,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
- {
- int irq, retval;
-
-- irq = bind_virq_to_irq(virq, cpu);
-+ irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
- if (irq < 0)
- return irq;
- retval = request_irq(irq, handler, irqflags, devname, dev_id);
-diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index d925f55e4857..8081aba116a7 100644
---- a/fs/binfmt_elf.c
-+++ b/fs/binfmt_elf.c
-@@ -928,7 +928,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
- total_size = total_mapping_size(elf_phdata,
- loc->elf_ex.e_phnum);
- if (!total_size) {
-- error = -EINVAL;
-+ retval = -EINVAL;
- goto out_free_dentry;
- }
- }
-diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
-index 0a795c969c78..8b33da6ec3dd 100644
---- a/fs/btrfs/extent-tree.c
-+++ b/fs/btrfs/extent-tree.c
-@@ -8548,7 +8548,9 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
- out:
- if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
- alloc_flags = update_block_group_flags(root, cache->flags);
-+ lock_chunks(root->fs_info->chunk_root);
- check_system_chunk(trans, root, alloc_flags);
-+ unlock_chunks(root->fs_info->chunk_root);
- }
-
- btrfs_end_transaction(trans, root);
-diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
-index 8222f6f74147..44a7e0398d97 100644
---- a/fs/btrfs/volumes.c
-+++ b/fs/btrfs/volumes.c
-@@ -4626,6 +4626,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- {
- u64 chunk_offset;
-
-+ ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
- chunk_offset = find_next_chunk(extent_root->fs_info);
- return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
- }
-diff --git a/fs/dcache.c b/fs/dcache.c
-index c71e3732e53b..922f23ef6041 100644
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -1205,13 +1205,13 @@ ascend:
- /* might go back up the wrong parent if we have had a rename. */
- if (need_seqretry(&rename_lock, seq))
- goto rename_retry;
-- next = child->d_child.next;
-- while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
-+ /* go into the first sibling still alive */
-+ do {
-+ next = child->d_child.next;
- if (next == &this_parent->d_subdirs)
- goto ascend;
- child = list_entry(next, struct dentry, d_child);
-- next = next->next;
-- }
-+ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
- rcu_read_unlock();
- goto resume;
- }
-diff --git a/fs/exec.c b/fs/exec.c
-index 00400cf522dc..120244523647 100644
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -659,6 +659,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
- if (stack_base > STACK_SIZE_MAX)
- stack_base = STACK_SIZE_MAX;
-
-+ /* Add space for stack randomization. */
-+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
-+
- /* Make sure we didn't let the argument array grow too large. */
- if (vma->vm_end - vma->vm_start > stack_base)
- return -ENOMEM;
-diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
-index 3445035c7e01..d41843181818 100644
---- a/fs/ext4/ext4_jbd2.c
-+++ b/fs/ext4/ext4_jbd2.c
-@@ -87,6 +87,12 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
- ext4_put_nojournal(handle);
- return 0;
- }
-+
-+ if (!handle->h_transaction) {
-+ err = jbd2_journal_stop(handle);
-+ return handle->h_err ? handle->h_err : err;
-+ }
-+
- sb = handle->h_transaction->t_journal->j_private;
- err = handle->h_err;
- rc = jbd2_journal_stop(handle);
-diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
-index 16f6365f65e7..ea4ee1732143 100644
---- a/fs/ext4/extents.c
-+++ b/fs/ext4/extents.c
-@@ -377,7 +377,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
- ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
- ext4_lblk_t last = lblock + len - 1;
-
-- if (lblock > last)
-+ if (len == 0 || lblock > last)
- return 0;
- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
- }
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 852cc521f327..1f252b4e0f51 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -4233,7 +4233,7 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
- int inode_size = EXT4_INODE_SIZE(sb);
-
- oi.orig_ino = orig_ino;
-- ino = orig_ino & ~(inodes_per_block - 1);
-+ ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
- for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
- if (ino == orig_ino)
- continue;
-diff --git a/fs/fhandle.c b/fs/fhandle.c
-index 999ff5c3cab0..d59712dfa3e7 100644
---- a/fs/fhandle.c
-+++ b/fs/fhandle.c
-@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
- goto out_err;
- }
- /* copy the full handle */
-- if (copy_from_user(handle, ufh,
-- sizeof(struct file_handle) +
-+ *handle = f_handle;
-+ if (copy_from_user(&handle->f_handle,
-+ &ufh->f_handle,
- f_handle.handle_bytes)) {
- retval = -EFAULT;
- goto out_handle;
-diff --git a/fs/fs_pin.c b/fs/fs_pin.c
-index b06c98796afb..611b5408f6ec 100644
---- a/fs/fs_pin.c
-+++ b/fs/fs_pin.c
-@@ -9,8 +9,8 @@ static DEFINE_SPINLOCK(pin_lock);
- void pin_remove(struct fs_pin *pin)
- {
- spin_lock(&pin_lock);
-- hlist_del(&pin->m_list);
-- hlist_del(&pin->s_list);
-+ hlist_del_init(&pin->m_list);
-+ hlist_del_init(&pin->s_list);
- spin_unlock(&pin_lock);
- spin_lock_irq(&pin->wait.lock);
- pin->done = 1;
-diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
-index b5128c6e63ad..a9079d035ae5 100644
---- a/fs/jbd2/recovery.c
-+++ b/fs/jbd2/recovery.c
-@@ -842,15 +842,23 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
- {
- jbd2_journal_revoke_header_t *header;
- int offset, max;
-+ int csum_size = 0;
-+ __u32 rcount;
- int record_len = 4;
-
- header = (jbd2_journal_revoke_header_t *) bh->b_data;
- offset = sizeof(jbd2_journal_revoke_header_t);
-- max = be32_to_cpu(header->r_count);
-+ rcount = be32_to_cpu(header->r_count);
-
- if (!jbd2_revoke_block_csum_verify(journal, header))
- return -EINVAL;
-
-+ if (jbd2_journal_has_csum_v2or3(journal))
-+ csum_size = sizeof(struct jbd2_journal_revoke_tail);
-+ if (rcount > journal->j_blocksize - csum_size)
-+ return -EINVAL;
-+ max = rcount;
-+
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
- record_len = 8;
-
-diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
-index c6cbaef2bda1..14214da80eb8 100644
---- a/fs/jbd2/revoke.c
-+++ b/fs/jbd2/revoke.c
-@@ -577,7 +577,7 @@ static void write_one_revoke_record(journal_t *journal,
- {
- int csum_size = 0;
- struct buffer_head *descriptor;
-- int offset;
-+ int sz, offset;
- journal_header_t *header;
-
- /* If we are already aborting, this all becomes a noop. We
-@@ -594,9 +594,14 @@ static void write_one_revoke_record(journal_t *journal,
- if (jbd2_journal_has_csum_v2or3(journal))
- csum_size = sizeof(struct jbd2_journal_revoke_tail);
-
-+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
-+ sz = 8;
-+ else
-+ sz = 4;
-+
- /* Make sure we have a descriptor with space left for the record */
- if (descriptor) {
-- if (offset >= journal->j_blocksize - csum_size) {
-+ if (offset + sz > journal->j_blocksize - csum_size) {
- flush_descriptor(journal, descriptor, offset, write_op);
- descriptor = NULL;
- }
-@@ -619,16 +624,13 @@ static void write_one_revoke_record(journal_t *journal,
- *descriptorp = descriptor;
- }
-
-- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
-+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
- * ((__be64 *)(&descriptor->b_data[offset])) =
- cpu_to_be64(record->blocknr);
-- offset += 8;
--
-- } else {
-+ else
- * ((__be32 *)(&descriptor->b_data[offset])) =
- cpu_to_be32(record->blocknr);
-- offset += 4;
-- }
-+ offset += sz;
-
- *offsetp = offset;
- }
-diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
-index 5f09370c90a8..ff2f2e6ad311 100644
---- a/fs/jbd2/transaction.c
-+++ b/fs/jbd2/transaction.c
-@@ -551,7 +551,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
- int result;
- int wanted;
-
-- WARN_ON(!transaction);
- if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
-@@ -627,7 +626,6 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
- tid_t tid;
- int need_to_start, ret;
-
-- WARN_ON(!transaction);
- /* If we've had an abort of any type, don't even think about
- * actually doing the restart! */
- if (is_handle_aborted(handle))
-@@ -785,7 +783,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
- int need_copy = 0;
- unsigned long start_lock, time_lock;
-
-- WARN_ON(!transaction);
- if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
-@@ -1051,7 +1048,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
- int err;
-
- jbd_debug(5, "journal_head %p\n", jh);
-- WARN_ON(!transaction);
- err = -EROFS;
- if (is_handle_aborted(handle))
- goto out;
-@@ -1266,7 +1262,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
- struct journal_head *jh;
- int ret = 0;
-
-- WARN_ON(!transaction);
- if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
-@@ -1397,7 +1392,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
- int err = 0;
- int was_modified = 0;
-
-- WARN_ON(!transaction);
- if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
-@@ -1530,8 +1524,22 @@ int jbd2_journal_stop(handle_t *handle)
- tid_t tid;
- pid_t pid;
-
-- if (!transaction)
-- goto free_and_exit;
-+ if (!transaction) {
-+ /*
-+ * Handle is already detached from the transaction so
-+ * there is nothing to do other than decrease a refcount,
-+ * or free the handle if refcount drops to zero
-+ */
-+ if (--handle->h_ref > 0) {
-+ jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
-+ handle->h_ref);
-+ return err;
-+ } else {
-+ if (handle->h_rsv_handle)
-+ jbd2_free_handle(handle->h_rsv_handle);
-+ goto free_and_exit;
-+ }
-+ }
- journal = transaction->t_journal;
-
- J_ASSERT(journal_current_handle() == handle);
-@@ -2373,7 +2381,6 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
- transaction_t *transaction = handle->h_transaction;
- journal_t *journal;
-
-- WARN_ON(!transaction);
- if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
-diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
-index 6acc9648f986..345b35fd329d 100644
---- a/fs/kernfs/dir.c
-+++ b/fs/kernfs/dir.c
-@@ -518,7 +518,14 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
- if (!kn)
- goto err_out1;
-
-- ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
-+ /*
-+ * If the ino of the sysfs entry created for a kmem cache gets
-+ * allocated from an ida layer, which is accounted to the memcg that
-+ * owns the cache, the memcg will get pinned forever. So do not account
-+ * ino ida allocations.
-+ */
-+ ret = ida_simple_get(&root->ino_ida, 1, 0,
-+ GFP_KERNEL | __GFP_NOACCOUNT);
- if (ret < 0)
- goto err_out2;
- kn->ino = ret;
-diff --git a/fs/namespace.c b/fs/namespace.c
-index 38ed1e1bed41..13b0f7bfc096 100644
---- a/fs/namespace.c
-+++ b/fs/namespace.c
-@@ -1709,8 +1709,11 @@ struct vfsmount *collect_mounts(struct path *path)
- {
- struct mount *tree;
- namespace_lock();
-- tree = copy_tree(real_mount(path->mnt), path->dentry,
-- CL_COPY_ALL | CL_PRIVATE);
-+ if (!check_mnt(real_mount(path->mnt)))
-+ tree = ERR_PTR(-EINVAL);
-+ else
-+ tree = copy_tree(real_mount(path->mnt), path->dentry,
-+ CL_COPY_ALL | CL_PRIVATE);
- namespace_unlock();
- if (IS_ERR(tree))
- return ERR_CAST(tree);
-diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
-index 03d647bf195d..cdefaa331a07 100644
---- a/fs/nfsd/blocklayout.c
-+++ b/fs/nfsd/blocklayout.c
-@@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
- }
-
- const struct nfsd4_layout_ops bl_layout_ops = {
-+ /*
-+ * Pretend that we send notification to the client. This is a blatant
-+ * lie to force recent Linux clients to cache our device IDs.
-+ * We rarely ever change the device ID, so the harm of leaking deviceids
-+ * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
-+ * in this regard, but I filed errata 4119 for this a while ago, and
-+ * hopefully the Linux client will eventually start caching deviceids
-+ * without this again.
-+ */
-+ .notify_types =
-+ NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
- .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
- .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
- .proc_layoutget = nfsd4_block_proc_layoutget,
-diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index ee1cccdb083a..b4541ede7cb8 100644
---- a/fs/nfsd/nfs4state.c
-+++ b/fs/nfsd/nfs4state.c
-@@ -4386,10 +4386,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
- return nfserr_old_stateid;
- }
-
-+static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
-+{
-+ if (ols->st_stateowner->so_is_open_owner &&
-+ !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
-+ return nfserr_bad_stateid;
-+ return nfs_ok;
-+}
-+
- static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
- {
- struct nfs4_stid *s;
-- struct nfs4_ol_stateid *ols;
- __be32 status = nfserr_bad_stateid;
-
- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
-@@ -4419,13 +4426,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
- break;
- case NFS4_OPEN_STID:
- case NFS4_LOCK_STID:
-- ols = openlockstateid(s);
-- if (ols->st_stateowner->so_is_open_owner
-- && !(openowner(ols->st_stateowner)->oo_flags
-- & NFS4_OO_CONFIRMED))
-- status = nfserr_bad_stateid;
-- else
-- status = nfs_ok;
-+ status = nfsd4_check_openowner_confirmed(openlockstateid(s));
- break;
- default:
- printk("unknown stateid type %x\n", s->sc_type);
-@@ -4517,8 +4518,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
- status = nfs4_check_fh(current_fh, stp);
- if (status)
- goto out;
-- if (stp->st_stateowner->so_is_open_owner
-- && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
-+ status = nfsd4_check_openowner_confirmed(stp);
-+ if (status)
- goto out;
- status = nfs4_check_openmode(stp, flags);
- if (status)
-diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
-index 138321b0c6c2..454111a3308e 100644
---- a/fs/omfs/inode.c
-+++ b/fs/omfs/inode.c
-@@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = {
- */
- static int omfs_get_imap(struct super_block *sb)
- {
-- unsigned int bitmap_size, count, array_size;
-+ unsigned int bitmap_size, array_size;
-+ int count;
- struct omfs_sb_info *sbi = OMFS_SB(sb);
- struct buffer_head *bh;
- unsigned long **ptr;
-@@ -359,7 +360,7 @@ nomem:
- }
-
- enum {
-- Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
-+ Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
- };
-
- static const match_table_t tokens = {
-@@ -368,6 +369,7 @@ static const match_table_t tokens = {
- {Opt_umask, "umask=%o"},
- {Opt_dmask, "dmask=%o"},
- {Opt_fmask, "fmask=%o"},
-+ {Opt_err, NULL},
- };
-
- static int parse_options(char *options, struct omfs_sb_info *sbi)
-diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
-index 24f640441bd9..84d693d37428 100644
---- a/fs/overlayfs/copy_up.c
-+++ b/fs/overlayfs/copy_up.c
-@@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
- struct cred *override_cred;
- char *link = NULL;
-
-+ if (WARN_ON(!workdir))
-+ return -EROFS;
-+
- ovl_path_upper(parent, &parentpath);
- upperdir = parentpath.dentry;
-
-diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
-index d139405d2bfa..692ceda3bc21 100644
---- a/fs/overlayfs/dir.c
-+++ b/fs/overlayfs/dir.c
-@@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
- struct kstat stat;
- int err;
-
-+ if (WARN_ON(!workdir))
-+ return ERR_PTR(-EROFS);
-+
- err = ovl_lock_rename_workdir(workdir, upperdir);
- if (err)
- goto out;
-@@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
- struct dentry *newdentry;
- int err;
-
-+ if (WARN_ON(!workdir))
-+ return -EROFS;
-+
- err = ovl_lock_rename_workdir(workdir, upperdir);
- if (err)
- goto out;
-@@ -506,11 +512,28 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
- struct dentry *opaquedir = NULL;
- int err;
-
-- if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
-- opaquedir = ovl_check_empty_and_clear(dentry);
-- err = PTR_ERR(opaquedir);
-- if (IS_ERR(opaquedir))
-- goto out;
-+ if (WARN_ON(!workdir))
-+ return -EROFS;
-+
-+ if (is_dir) {
-+ if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
-+ opaquedir = ovl_check_empty_and_clear(dentry);
-+ err = PTR_ERR(opaquedir);
-+ if (IS_ERR(opaquedir))
-+ goto out;
-+ } else {
-+ LIST_HEAD(list);
-+
-+ /*
-+ * When removing an empty opaque directory, then it
-+ * makes no sense to replace it with an exact replica of
-+ * itself. But emptiness still needs to be checked.
-+ */
-+ err = ovl_check_empty_dir(dentry, &list);
-+ ovl_cache_free(&list);
-+ if (err)
-+ goto out;
-+ }
- }
-
- err = ovl_lock_rename_workdir(workdir, upperdir);
-diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
-index 5f0d1993e6e3..bf8537c7f455 100644
---- a/fs/overlayfs/super.c
-+++ b/fs/overlayfs/super.c
-@@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
- {
- struct ovl_fs *ufs = sb->s_fs_info;
-
-- if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
-+ if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir))
- return -EROFS;
-
- return 0;
-@@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
- ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
- err = PTR_ERR(ufs->workdir);
- if (IS_ERR(ufs->workdir)) {
-- pr_err("overlayfs: failed to create directory %s/%s\n",
-- ufs->config.workdir, OVL_WORKDIR_NAME);
-- goto out_put_upper_mnt;
-+ pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
-+ ufs->config.workdir, OVL_WORKDIR_NAME, -err);
-+ sb->s_flags |= MS_RDONLY;
-+ ufs->workdir = NULL;
- }
- }
-
-@@ -997,7 +998,6 @@ out_put_lower_mnt:
- kfree(ufs->lower_mnt);
- out_put_workdir:
- dput(ufs->workdir);
--out_put_upper_mnt:
- mntput(ufs->upper_mnt);
- out_put_lowerpath:
- for (i = 0; i < numlower; i++)
-diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
-index 15105dbc9e28..0166e7e829a7 100644
---- a/fs/xfs/libxfs/xfs_attr_leaf.c
-+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
-@@ -498,8 +498,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
- * After the last attribute is removed revert to original inode format,
- * making all literal area available to the data fork once more.
- */
--STATIC void
--xfs_attr_fork_reset(
-+void
-+xfs_attr_fork_remove(
- struct xfs_inode *ip,
- struct xfs_trans *tp)
- {
-@@ -565,7 +565,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
- (mp->m_flags & XFS_MOUNT_ATTR2) &&
- (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
- !(args->op_flags & XFS_DA_OP_ADDNAME)) {
-- xfs_attr_fork_reset(dp, args->trans);
-+ xfs_attr_fork_remove(dp, args->trans);
- } else {
- xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
- dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
-@@ -828,7 +828,7 @@ xfs_attr3_leaf_to_shortform(
- if (forkoff == -1) {
- ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
- ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
-- xfs_attr_fork_reset(dp, args->trans);
-+ xfs_attr_fork_remove(dp, args->trans);
- goto out;
- }
-
-diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
-index e2929da7c3ba..4f3a60aa93d4 100644
---- a/fs/xfs/libxfs/xfs_attr_leaf.h
-+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
-@@ -53,7 +53,7 @@ int xfs_attr_shortform_remove(struct xfs_da_args *args);
- int xfs_attr_shortform_list(struct xfs_attr_list_context *context);
- int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
- int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
--
-+void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
-
- /*
- * Internal routines when attribute fork size == XFS_LBSIZE(mp).
-diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
-index 83af4c149635..487c8374a1e0 100644
---- a/fs/xfs/xfs_attr_inactive.c
-+++ b/fs/xfs/xfs_attr_inactive.c
-@@ -379,23 +379,31 @@ xfs_attr3_root_inactive(
- return error;
- }
-
-+/*
-+ * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
-+ * removes both the on-disk and in-memory inode fork. Note that this also has to
-+ * handle the condition of inodes without attributes but with an attribute fork
-+ * configured, so we can't use xfs_inode_hasattr() here.
-+ *
-+ * The in-memory attribute fork is removed even on error.
-+ */
- int
--xfs_attr_inactive(xfs_inode_t *dp)
-+xfs_attr_inactive(
-+ struct xfs_inode *dp)
- {
-- xfs_trans_t *trans;
-- xfs_mount_t *mp;
-- int error;
-+ struct xfs_trans *trans;
-+ struct xfs_mount *mp;
-+ int cancel_flags = 0;
-+ int lock_mode = XFS_ILOCK_SHARED;
-+ int error = 0;
-
- mp = dp->i_mount;
- ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
-
-- xfs_ilock(dp, XFS_ILOCK_SHARED);
-- if (!xfs_inode_hasattr(dp) ||
-- dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-- xfs_iunlock(dp, XFS_ILOCK_SHARED);
-- return 0;
-- }
-- xfs_iunlock(dp, XFS_ILOCK_SHARED);
-+ xfs_ilock(dp, lock_mode);
-+ if (!XFS_IFORK_Q(dp))
-+ goto out_destroy_fork;
-+ xfs_iunlock(dp, lock_mode);
-
- /*
- * Start our first transaction of the day.
-@@ -407,13 +415,18 @@ xfs_attr_inactive(xfs_inode_t *dp)
- * the inode in every transaction to let it float upward through
- * the log.
- */
-+ lock_mode = 0;
- trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
- error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
-- if (error) {
-- xfs_trans_cancel(trans, 0);
-- return error;
-- }
-- xfs_ilock(dp, XFS_ILOCK_EXCL);
-+ if (error)
-+ goto out_cancel;
-+
-+ lock_mode = XFS_ILOCK_EXCL;
-+ cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT;
-+ xfs_ilock(dp, lock_mode);
-+
-+ if (!XFS_IFORK_Q(dp))
-+ goto out_cancel;
-
- /*
- * No need to make quota reservations here. We expect to release some
-@@ -421,29 +434,31 @@ xfs_attr_inactive(xfs_inode_t *dp)
- */
- xfs_trans_ijoin(trans, dp, 0);
-
-- /*
-- * Decide on what work routines to call based on the inode size.
-- */
-- if (!xfs_inode_hasattr(dp) ||
-- dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-- error = 0;
-- goto out;
-+ /* invalidate and truncate the attribute fork extents */
-+ if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
-+ error = xfs_attr3_root_inactive(&trans, dp);
-+ if (error)
-+ goto out_cancel;
-+
-+ error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
-+ if (error)
-+ goto out_cancel;
- }
-- error = xfs_attr3_root_inactive(&trans, dp);
-- if (error)
-- goto out;
-
-- error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
-- if (error)
-- goto out;
-+ /* Reset the attribute fork - this also destroys the in-core fork */
-+ xfs_attr_fork_remove(dp, trans);
-
- error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
-- xfs_iunlock(dp, XFS_ILOCK_EXCL);
--
-+ xfs_iunlock(dp, lock_mode);
- return error;
-
--out:
-- xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-- xfs_iunlock(dp, XFS_ILOCK_EXCL);
-+out_cancel:
-+ xfs_trans_cancel(trans, cancel_flags);
-+out_destroy_fork:
-+ /* kill the in-core attr fork before we drop the inode lock */
-+ if (dp->i_afp)
-+ xfs_idestroy_fork(dp, XFS_ATTR_FORK);
-+ if (lock_mode)
-+ xfs_iunlock(dp, lock_mode);
- return error;
- }
-diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
-index a2e1cb8a568b..f3ba637a8ece 100644
---- a/fs/xfs/xfs_file.c
-+++ b/fs/xfs/xfs_file.c
-@@ -125,7 +125,7 @@ xfs_iozero(
- status = 0;
- } while (count);
-
-- return (-status);
-+ return status;
- }
-
- int
-diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
-index 6163767aa856..b1edda7890f4 100644
---- a/fs/xfs/xfs_inode.c
-+++ b/fs/xfs/xfs_inode.c
-@@ -1889,21 +1889,17 @@ xfs_inactive(
- /*
- * If there are attributes associated with the file then blow them away
- * now. The code calls a routine that recursively deconstructs the
-- * attribute fork. We need to just commit the current transaction
-- * because we can't use it for xfs_attr_inactive().
-+ * attribute fork. If also blows away the in-core attribute fork.
- */
-- if (ip->i_d.di_anextents > 0) {
-- ASSERT(ip->i_d.di_forkoff != 0);
--
-+ if (XFS_IFORK_Q(ip)) {
- error = xfs_attr_inactive(ip);
- if (error)
- return;
- }
-
-- if (ip->i_afp)
-- xfs_idestroy_fork(ip, XFS_ATTR_FORK);
--
-+ ASSERT(!ip->i_afp);
- ASSERT(ip->i_d.di_anextents == 0);
-+ ASSERT(ip->i_d.di_forkoff == 0);
-
- /*
- * Free the inode.
-diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
-index 2dd405c9be78..45c39a37f924 100644
---- a/include/drm/drm_pciids.h
-+++ b/include/drm/drm_pciids.h
-@@ -186,6 +186,7 @@
- {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
-+ {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
-index 9dc4e0384bfb..3886b3bffd7f 100644
---- a/include/linux/fs_pin.h
-+++ b/include/linux/fs_pin.h
-@@ -13,6 +13,8 @@ struct vfsmount;
- static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
- {
- init_waitqueue_head(&p->wait);
-+ INIT_HLIST_NODE(&p->s_list);
-+ INIT_HLIST_NODE(&p->m_list);
- p->kill = kill;
- }
-
-diff --git a/include/linux/gfp.h b/include/linux/gfp.h
-index 51bd1e72a917..eb6fafe66bec 100644
---- a/include/linux/gfp.h
-+++ b/include/linux/gfp.h
-@@ -30,6 +30,7 @@ struct vm_area_struct;
- #define ___GFP_HARDWALL 0x20000u
- #define ___GFP_THISNODE 0x40000u
- #define ___GFP_RECLAIMABLE 0x80000u
-+#define ___GFP_NOACCOUNT 0x100000u
- #define ___GFP_NOTRACK 0x200000u
- #define ___GFP_NO_KSWAPD 0x400000u
- #define ___GFP_OTHER_NODE 0x800000u
-@@ -85,6 +86,7 @@ struct vm_area_struct;
- #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
- #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
- #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
-+#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
- #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
-
- #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
-diff --git a/include/linux/ktime.h b/include/linux/ktime.h
-index 5fc3d1083071..2b6a204bd8d4 100644
---- a/include/linux/ktime.h
-+++ b/include/linux/ktime.h
-@@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
- }
-
- #if BITS_PER_LONG < 64
--extern u64 __ktime_divns(const ktime_t kt, s64 div);
--static inline u64 ktime_divns(const ktime_t kt, s64 div)
-+extern s64 __ktime_divns(const ktime_t kt, s64 div);
-+static inline s64 ktime_divns(const ktime_t kt, s64 div)
- {
-+ /*
-+ * Negative divisors could cause an inf loop,
-+ * so bug out here.
-+ */
-+ BUG_ON(div < 0);
- if (__builtin_constant_p(div) && !(div >> 32)) {
-- u64 ns = kt.tv64;
-- do_div(ns, div);
-- return ns;
-+ s64 ns = kt.tv64;
-+ u64 tmp = ns < 0 ? -ns : ns;
-+
-+ do_div(tmp, div);
-+ return ns < 0 ? -tmp : tmp;
- } else {
- return __ktime_divns(kt, div);
- }
- }
- #else /* BITS_PER_LONG < 64 */
--# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
-+static inline s64 ktime_divns(const ktime_t kt, s64 div)
-+{
-+ /*
-+ * 32-bit implementation cannot handle negative divisors,
-+ * so catch them on 64bit as well.
-+ */
-+ WARN_ON(div < 0);
-+ return kt.tv64 / div;
-+}
- #endif
-
- static inline s64 ktime_to_us(const ktime_t kt)
-diff --git a/include/linux/libata.h b/include/linux/libata.h
-index 6b08cc106c21..f8994b4b122c 100644
---- a/include/linux/libata.h
-+++ b/include/linux/libata.h
-@@ -205,6 +205,7 @@ enum {
- ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
- ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
- ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
-+ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
-
- /* struct ata_port flags */
- ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
-@@ -310,6 +311,12 @@ enum {
- */
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
-
-+ /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
-+ * be a spurious PHY event, so ignore the first PHY event that
-+ * occurs within 10s after the policy change.
-+ */
-+ ATA_TMOUT_SPURIOUS_PHY = 10000,
-+
- /* ATA bus states */
- BUS_UNKNOWN = 0,
- BUS_DMA = 1,
-@@ -789,6 +796,8 @@ struct ata_link {
- struct ata_eh_context eh_context;
-
- struct ata_device device[ATA_MAX_DEVICES];
-+
-+ unsigned long last_lpm_change; /* when last LPM change happened */
- };
- #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
- #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
-@@ -1202,6 +1211,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
- extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
- extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
- extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
-+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
-
- extern int ata_cable_40wire(struct ata_port *ap);
- extern int ata_cable_80wire(struct ata_port *ap);
-diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
-index 72dff5fb0d0c..6c8918114804 100644
---- a/include/linux/memcontrol.h
-+++ b/include/linux/memcontrol.h
-@@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
- if (!memcg_kmem_enabled())
- return true;
-
-+ if (gfp & __GFP_NOACCOUNT)
-+ return true;
- /*
- * __GFP_NOFAIL allocations will move on even if charging is not
- * possible. Therefore we don't even try, and have this allocation
-@@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
- {
- if (!memcg_kmem_enabled())
- return cachep;
-+ if (gfp & __GFP_NOACCOUNT)
-+ return cachep;
- if (gfp & __GFP_NOFAIL)
- return cachep;
- if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
-diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
-index 6341f5be6e24..a30b172df6e1 100644
---- a/include/linux/sched/rt.h
-+++ b/include/linux/sched/rt.h
-@@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p)
- #ifdef CONFIG_RT_MUTEXES
- extern int rt_mutex_getprio(struct task_struct *p);
- extern void rt_mutex_setprio(struct task_struct *p, int prio);
--extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
-+extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
- extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
- extern void rt_mutex_adjust_pi(struct task_struct *p);
- static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-@@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
- return p->normal_prio;
- }
-
--static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
-+static inline int rt_mutex_get_effective_prio(struct task_struct *task,
-+ int newprio)
- {
-- return 0;
-+ return newprio;
- }
-
- static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
-diff --git a/include/linux/tty.h b/include/linux/tty.h
-index 358a337af598..790752ac074a 100644
---- a/include/linux/tty.h
-+++ b/include/linux/tty.h
-@@ -339,6 +339,7 @@ struct tty_file_private {
- #define TTY_EXCLUSIVE 3 /* Exclusive open mode */
- #define TTY_DEBUG 4 /* Debugging */
- #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
-+#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
- #define TTY_LDISC_OPEN 11 /* Line discipline is open */
- #define TTY_PTY_LOCK 16 /* pty private */
- #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
-@@ -462,7 +463,6 @@ extern int tty_hung_up_p(struct file *filp);
- extern void do_SAK(struct tty_struct *tty);
- extern void __do_SAK(struct tty_struct *tty);
- extern void no_tty(void);
--extern void tty_flush_to_ldisc(struct tty_struct *tty);
- extern void tty_buffer_free_all(struct tty_port *port);
- extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
- extern void tty_buffer_init(struct tty_port *port);
-diff --git a/include/xen/events.h b/include/xen/events.h
-index 5321cd9636e6..7d95fdf9cf3e 100644
---- a/include/xen/events.h
-+++ b/include/xen/events.h
-@@ -17,7 +17,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
- irq_handler_t handler,
- unsigned long irqflags, const char *devname,
- void *dev_id);
--int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
-+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
- int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
- irq_handler_t handler,
- unsigned long irqflags, const char *devname,
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 6357265a31ad..ce9108c059fb 100644
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
- }
-
- /*
-- * Called by sched_setscheduler() to check whether the priority change
-- * is overruled by a possible priority boosting.
-+ * Called by sched_setscheduler() to get the priority which will be
-+ * effective after the change.
- */
--int rt_mutex_check_prio(struct task_struct *task, int newprio)
-+int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
- {
- if (!task_has_pi_waiters(task))
-- return 0;
-+ return newprio;
-
-- return task_top_pi_waiter(task)->task->prio <= newprio;
-+ if (task_top_pi_waiter(task)->task->prio <= newprio)
-+ return task_top_pi_waiter(task)->task->prio;
-+ return newprio;
- }
-
- /*
-diff --git a/kernel/module.c b/kernel/module.c
-index ec53f594e9c9..538794ce3cc7 100644
---- a/kernel/module.c
-+++ b/kernel/module.c
-@@ -3366,6 +3366,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
- module_bug_cleanup(mod);
- mutex_unlock(&module_mutex);
-
-+ blocking_notifier_call_chain(&module_notify_list,
-+ MODULE_STATE_GOING, mod);
-+
- /* we can't deallocate the module until we clear memory protection */
- unset_module_init_ro_nx(mod);
- unset_module_core_ro_nx(mod);
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 3d5f6f6d14c2..f4da2cbbfd7f 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3295,15 +3295,18 @@ static void __setscheduler_params(struct task_struct *p,
-
- /* Actually do priority change: must hold pi & rq lock. */
- static void __setscheduler(struct rq *rq, struct task_struct *p,
-- const struct sched_attr *attr)
-+ const struct sched_attr *attr, bool keep_boost)
- {
- __setscheduler_params(p, attr);
-
- /*
-- * If we get here, there was no pi waiters boosting the
-- * task. It is safe to use the normal prio.
-+ * Keep a potential priority boosting if called from
-+ * sched_setscheduler().
- */
-- p->prio = normal_prio(p);
-+ if (keep_boost)
-+ p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
-+ else
-+ p->prio = normal_prio(p);
-
- if (dl_prio(p->prio))
- p->sched_class = &dl_sched_class;
-@@ -3403,7 +3406,7 @@ static int __sched_setscheduler(struct task_struct *p,
- int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
- MAX_RT_PRIO - 1 - attr->sched_priority;
- int retval, oldprio, oldpolicy = -1, queued, running;
-- int policy = attr->sched_policy;
-+ int new_effective_prio, policy = attr->sched_policy;
- unsigned long flags;
- const struct sched_class *prev_class;
- struct rq *rq;
-@@ -3585,15 +3588,14 @@ change:
- oldprio = p->prio;
-
- /*
-- * Special case for priority boosted tasks.
-- *
-- * If the new priority is lower or equal (user space view)
-- * than the current (boosted) priority, we just store the new
-+ * Take priority boosted tasks into account. If the new
-+ * effective priority is unchanged, we just store the new
- * normal parameters and do not touch the scheduler class and
- * the runqueue. This will be done when the task deboost
- * itself.
- */
-- if (rt_mutex_check_prio(p, newprio)) {
-+ new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
-+ if (new_effective_prio == oldprio) {
- __setscheduler_params(p, attr);
- task_rq_unlock(rq, p, &flags);
- return 0;
-@@ -3607,7 +3609,7 @@ change:
- put_prev_task(rq, p);
-
- prev_class = p->sched_class;
-- __setscheduler(rq, p, attr);
-+ __setscheduler(rq, p, attr, true);
-
- if (running)
- p->sched_class->set_curr_task(rq);
-@@ -4382,10 +4384,7 @@ long __sched io_schedule_timeout(long timeout)
- long ret;
-
- current->in_iowait = 1;
-- if (old_iowait)
-- blk_schedule_flush_plug(current);
-- else
-- blk_flush_plug(current);
-+ blk_schedule_flush_plug(current);
-
- delayacct_blkio_start();
- rq = raw_rq();
-@@ -7357,7 +7356,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
- queued = task_on_rq_queued(p);
- if (queued)
- dequeue_task(rq, p, 0);
-- __setscheduler(rq, p, &attr);
-+ __setscheduler(rq, p, &attr, false);
- if (queued) {
- enqueue_task(rq, p, 0);
- resched_curr(rq);
-diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index bee0c1f78091..38f586c076fe 100644
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
- /*
- * Divide a ktime value by a nanosecond value
- */
--u64 __ktime_divns(const ktime_t kt, s64 div)
-+s64 __ktime_divns(const ktime_t kt, s64 div)
- {
-- u64 dclc;
- int sft = 0;
-+ s64 dclc;
-+ u64 tmp;
-
- dclc = ktime_to_ns(kt);
-+ tmp = dclc < 0 ? -dclc : dclc;
-+
- /* Make sure the divisor is less than 2^32: */
- while (div >> 32) {
- sft++;
- div >>= 1;
- }
-- dclc >>= sft;
-- do_div(dclc, (unsigned long) div);
--
-- return dclc;
-+ tmp >>= sft;
-+ do_div(tmp, (unsigned long) div);
-+ return dclc < 0 ? -tmp : tmp;
- }
- EXPORT_SYMBOL_GPL(__ktime_divns);
- #endif /* BITS_PER_LONG >= 64 */
-diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
-index a28df5206d95..11649615c505 100644
---- a/lib/strnlen_user.c
-+++ b/lib/strnlen_user.c
-@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
- return res + find_zero(data) + 1 - align;
- }
- res += sizeof(unsigned long);
-- if (unlikely(max < sizeof(unsigned long)))
-+ /* We already handled 'unsigned long' bytes. Did we do it all ? */
-+ if (unlikely(max <= sizeof(unsigned long)))
- break;
- max -= sizeof(unsigned long);
- if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
-diff --git a/mm/kmemleak.c b/mm/kmemleak.c
-index 5405aff5a590..f0fe4f2c1fa7 100644
---- a/mm/kmemleak.c
-+++ b/mm/kmemleak.c
-@@ -115,7 +115,8 @@
- #define BYTES_PER_POINTER sizeof(void *)
-
- /* GFP bitmask for kmemleak internal allocations */
--#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
-+#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
-+ __GFP_NOACCOUNT)) | \
- __GFP_NORETRY | __GFP_NOMEMALLOC | \
- __GFP_NOWARN)
-
-diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index de5dc5e12691..0f7d73b3e4b1 100644
---- a/mm/mempolicy.c
-+++ b/mm/mempolicy.c
-@@ -2517,7 +2517,7 @@ static void __init check_numabalancing_enable(void)
- if (numabalancing_override)
- set_numabalancing_state(numabalancing_override == 1);
-
-- if (nr_node_ids > 1 && !numabalancing_override) {
-+ if (num_online_nodes() > 1 && !numabalancing_override) {
- pr_info("%s automatic NUMA balancing. "
- "Configure with numa_balancing= or the "
- "kernel.numa_balancing sysctl",
-diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
-index 41a4abc7e98e..c4ec9239249a 100644
---- a/net/ceph/osd_client.c
-+++ b/net/ceph/osd_client.c
-@@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
- if (list_empty(&req->r_osd_item))
- req->r_osd = NULL;
- }
--
-- list_del_init(&req->r_req_lru_item); /* can be on notarget */
- ceph_osdc_put_request(req);
- }
-
-@@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
- err = __map_request(osdc, req,
- force_resend || force_resend_writes);
- dout("__map_request returned %d\n", err);
-- if (err == 0)
-- continue; /* no change and no osd was specified */
- if (err < 0)
- continue; /* hrm! */
-- if (req->r_osd == NULL) {
-- dout("tid %llu maps to no valid osd\n", req->r_tid);
-- needmap++; /* request a newer map */
-- continue;
-- }
-+ if (req->r_osd == NULL || err > 0) {
-+ if (req->r_osd == NULL) {
-+ dout("lingering %p tid %llu maps to no osd\n",
-+ req, req->r_tid);
-+ /*
-+ * A homeless lingering request makes
-+ * no sense, as it's job is to keep
-+ * a particular OSD connection open.
-+ * Request a newer map and kick the
-+ * request, knowing that it won't be
-+ * resent until we actually get a map
-+ * that can tell us where to send it.
-+ */
-+ needmap++;
-+ }
-
-- dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
-- req->r_osd ? req->r_osd->o_osd : -1);
-- __register_request(osdc, req);
-- __unregister_linger_request(osdc, req);
-+ dout("kicking lingering %p tid %llu osd%d\n", req,
-+ req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
-+ __register_request(osdc, req);
-+ __unregister_linger_request(osdc, req);
-+ }
- }
- reset_changed_osds(osdc);
- mutex_unlock(&osdc->request_mutex);
-diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
-index 8d53d65bd2ab..81e8dc5cb7f9 100644
---- a/net/mac80211/ieee80211_i.h
-+++ b/net/mac80211/ieee80211_i.h
-@@ -204,6 +204,8 @@ enum ieee80211_packet_rx_flags {
- * @IEEE80211_RX_CMNTR: received on cooked monitor already
- * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
- * to cfg80211_report_obss_beacon().
-+ * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
-+ * reorder buffer timeout timer, not the normal RX path
- *
- * These flags are used across handling multiple interfaces
- * for a single frame.
-@@ -211,6 +213,7 @@ enum ieee80211_packet_rx_flags {
- enum ieee80211_rx_flags {
- IEEE80211_RX_CMNTR = BIT(0),
- IEEE80211_RX_BEACON_REPORTED = BIT(1),
-+ IEEE80211_RX_REORDER_TIMER = BIT(2),
- };
-
- struct ieee80211_rx_data {
-diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
-index 1eb730bf8752..4c887d053333 100644
---- a/net/mac80211/rx.c
-+++ b/net/mac80211/rx.c
-@@ -2106,7 +2106,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
- /* deliver to local stack */
- skb->protocol = eth_type_trans(skb, dev);
- memset(skb->cb, 0, sizeof(skb->cb));
-- if (rx->local->napi)
-+ if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
-+ rx->local->napi)
- napi_gro_receive(rx->local->napi, skb);
- else
- netif_receive_skb(skb);
-@@ -3215,7 +3216,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
- /* This is OK -- must be QoS data frame */
- .security_idx = tid,
- .seqno_idx = tid,
-- .flags = 0,
-+ .flags = IEEE80211_RX_REORDER_TIMER,
- };
- struct tid_ampdu_rx *tid_agg_rx;
-
-diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
-index a4220e92f0cc..efa3f48f1ec5 100644
---- a/net/mac80211/wep.c
-+++ b/net/mac80211/wep.c
-@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
-
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-
-- if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
-- skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
-+ if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
- return NULL;
-
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
-@@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
- size_t len;
- u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
-
-+ if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
-+ return -1;
-+
- iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
- if (!iv)
- return -1;
-diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
-index 1ec19f6f0c2b..eeeba5adee6d 100644
---- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
-+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
-@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
- {
- u32 value_follows;
- int err;
-+ struct page *scratch;
-+
-+ scratch = alloc_page(GFP_KERNEL);
-+ if (!scratch)
-+ return -ENOMEM;
-+ xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
-
- /* res->status */
- err = gssx_dec_status(xdr, &res->status);
- if (err)
-- return err;
-+ goto out_free;
-
- /* res->context_handle */
- err = gssx_dec_bool(xdr, &value_follows);
- if (err)
-- return err;
-+ goto out_free;
- if (value_follows) {
- err = gssx_dec_ctx(xdr, res->context_handle);
- if (err)
-- return err;
-+ goto out_free;
- } else {
- res->context_handle = NULL;
- }
-@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
- /* res->output_token */
- err = gssx_dec_bool(xdr, &value_follows);
- if (err)
-- return err;
-+ goto out_free;
- if (value_follows) {
- err = gssx_dec_buffer(xdr, res->output_token);
- if (err)
-- return err;
-+ goto out_free;
- } else {
- res->output_token = NULL;
- }
-@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
- /* res->delegated_cred_handle */
- err = gssx_dec_bool(xdr, &value_follows);
- if (err)
-- return err;
-+ goto out_free;
- if (value_follows) {
- /* we do not support upcall servers sending this data. */
-- return -EINVAL;
-+ err = -EINVAL;
-+ goto out_free;
- }
-
- /* res->options */
- err = gssx_dec_option_array(xdr, &res->options);
-
-+out_free:
-+ __free_page(scratch);
- return err;
- }
-diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
-index a8a1e14272a1..a002a6d1e6da 100644
---- a/sound/pci/hda/hda_intel.c
-+++ b/sound/pci/hda/hda_intel.c
-@@ -2108,6 +2108,8 @@ static const struct pci_device_id azx_ids[] = {
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
- { PCI_DEVICE(0x1002, 0xaab0),
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
-+ { PCI_DEVICE(0x1002, 0xaac8),
-+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
- /* VIA VT8251/VT8237A */
- { PCI_DEVICE(0x1106, 0x3288),
- .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
-diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
-index da67ea8645a6..e27298bdcd6d 100644
---- a/sound/pci/hda/patch_conexant.c
-+++ b/sound/pci/hda/patch_conexant.c
-@@ -973,6 +973,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
- .patch = patch_conexant_auto },
- { .id = 0x14f150b9, .name = "CX20665",
- .patch = patch_conexant_auto },
-+ { .id = 0x14f150f1, .name = "CX20721",
-+ .patch = patch_conexant_auto },
-+ { .id = 0x14f150f2, .name = "CX20722",
-+ .patch = patch_conexant_auto },
-+ { .id = 0x14f150f3, .name = "CX20723",
-+ .patch = patch_conexant_auto },
-+ { .id = 0x14f150f4, .name = "CX20724",
-+ .patch = patch_conexant_auto },
- { .id = 0x14f1510f, .name = "CX20751/2",
- .patch = patch_conexant_auto },
- { .id = 0x14f15110, .name = "CX20751/2",
-@@ -1007,6 +1015,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
- MODULE_ALIAS("snd-hda-codec-id:14f150ac");
- MODULE_ALIAS("snd-hda-codec-id:14f150b8");
- MODULE_ALIAS("snd-hda-codec-id:14f150b9");
-+MODULE_ALIAS("snd-hda-codec-id:14f150f1");
-+MODULE_ALIAS("snd-hda-codec-id:14f150f2");
-+MODULE_ALIAS("snd-hda-codec-id:14f150f3");
-+MODULE_ALIAS("snd-hda-codec-id:14f150f4");
- MODULE_ALIAS("snd-hda-codec-id:14f1510f");
- MODULE_ALIAS("snd-hda-codec-id:14f15110");
- MODULE_ALIAS("snd-hda-codec-id:14f15111");
-diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
-index 2fd490b1764b..93c78c3c4b95 100644
---- a/sound/pci/hda/patch_realtek.c
-+++ b/sound/pci/hda/patch_realtek.c
-@@ -5027,6 +5027,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
- SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
- SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
-+ SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
- SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
- SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
- SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
-@@ -5056,6 +5057,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
-+ SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
- SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
-@@ -5246,6 +5248,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
- {0x17, 0x40000000},
- {0x1d, 0x40700001},
- {0x21, 0x02211050}),
-+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-+ ALC255_STANDARD_PINS,
-+ {0x12, 0x90a60180},
-+ {0x14, 0x90170130},
-+ {0x17, 0x40000000},
-+ {0x1d, 0x40700001},
-+ {0x21, 0x02211040}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- ALC256_STANDARD_PINS,
- {0x13, 0x40000000}),
-diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
-index 2341fc334163..6ba0b5517c40 100644
---- a/sound/pci/hda/thinkpad_helper.c
-+++ b/sound/pci/hda/thinkpad_helper.c
-@@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
- if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
- old_vmaster_hook = spec->vmaster_mute.hook;
- spec->vmaster_mute.hook = update_tpacpi_mute_led;
-- spec->vmaster_mute_enum = 1;
- removefunc = false;
- }
- if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
-diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
-index 2ffb9a0570dc..3d44fc50e4d0 100644
---- a/sound/soc/codecs/mc13783.c
-+++ b/sound/soc/codecs/mc13783.c
-@@ -623,14 +623,14 @@ static int mc13783_probe(struct snd_soc_codec *codec)
- AUDIO_SSI_SEL, 0);
- else
- mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
-- 0, AUDIO_SSI_SEL);
-+ AUDIO_SSI_SEL, AUDIO_SSI_SEL);
-
- if (priv->dac_ssi_port == MC13783_SSI1_PORT)
- mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
- AUDIO_SSI_SEL, 0);
- else
- mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
-- 0, AUDIO_SSI_SEL);
-+ AUDIO_SSI_SEL, AUDIO_SSI_SEL);
-
- return 0;
- }
-diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
-index dc7778b6dd7f..c3c33bd0df1c 100644
---- a/sound/soc/codecs/uda1380.c
-+++ b/sound/soc/codecs/uda1380.c
-@@ -437,7 +437,7 @@ static int uda1380_set_dai_fmt_both(struct snd_soc_dai *codec_dai,
- if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
- return -EINVAL;
-
-- uda1380_write(codec, UDA1380_IFACE, iface);
-+ uda1380_write_reg_cache(codec, UDA1380_IFACE, iface);
-
- return 0;
- }
-diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
-index 3035d9856415..e97a7615df85 100644
---- a/sound/soc/codecs/wm8960.c
-+++ b/sound/soc/codecs/wm8960.c
-@@ -395,7 +395,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
- { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
- { "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */
- { "Right Input Mixer", NULL, "RINPUT2" },
-- { "Right Input Mixer", NULL, "LINPUT3" },
-+ { "Right Input Mixer", NULL, "RINPUT3" },
-
- { "Left ADC", NULL, "Left Input Mixer" },
- { "Right ADC", NULL, "Right Input Mixer" },
-diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
-index 4fbc7689339a..a1c04dab6684 100644
---- a/sound/soc/codecs/wm8994.c
-+++ b/sound/soc/codecs/wm8994.c
-@@ -2754,7 +2754,7 @@ static struct {
- };
-
- static int fs_ratios[] = {
-- 64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
-+ 64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
- };
-
- static int bclk_divs[] = {
-diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
-index b6f88202b8c9..e19a6765bd8a 100644
---- a/sound/soc/soc-dapm.c
-+++ b/sound/soc/soc-dapm.c
-@@ -3074,11 +3074,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
- }
-
- prefix = soc_dapm_prefix(dapm);
-- if (prefix)
-+ if (prefix) {
- w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
-- else
-+ if (widget->sname)
-+ w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
-+ widget->sname);
-+ } else {
- w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
--
-+ if (widget->sname)
-+ w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
-+ }
- if (w->name == NULL) {
- kfree(w);
- return NULL;
-diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
-index 32631a86078b..e21ec5abcc3a 100644
---- a/sound/usb/quirks.c
-+++ b/sound/usb/quirks.c
-@@ -1117,6 +1117,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
- switch (chip->usb_id) {
- case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
- case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
-+ case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
-+ case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
- case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
- return true;
- }
-diff --git a/tools/vm/Makefile b/tools/vm/Makefile
-index ac884b65a072..93aadaf7ff63 100644
---- a/tools/vm/Makefile
-+++ b/tools/vm/Makefile
-@@ -3,7 +3,7 @@
- TARGETS=page-types slabinfo page_owner_sort
-
- LIB_DIR = ../lib/api
--LIBS = $(LIB_DIR)/libapikfs.a
-+LIBS = $(LIB_DIR)/libapi.a
-
- CC = $(CROSS_COMPILE)gcc
- CFLAGS = -Wall -Wextra -I../lib/
diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
deleted file mode 100644
index cc15cd51..00000000
--- a/1500_XATTR_USER_PREFIX.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From: Anthony G. Basile <blueness@gentoo.org>
-
-This patch adds support for a restricted user-controlled namespace on
-tmpfs filesystem used to house PaX flags. The namespace must be of the
-form user.pax.* and its value cannot exceed a size of 8 bytes.
-
-This is needed even on all Gentoo systems so that XATTR_PAX flags
-are preserved for users who might build packages using portage on
-a tmpfs system with a non-hardened kernel and then switch to a
-hardened kernel with XATTR_PAX enabled.
-
-The namespace is added to any user with Extended Attribute support
-enabled for tmpfs. Users who do not enable xattrs will not have
-the XATTR_PAX flags preserved.
-
-diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
-index e4629b9..6958086 100644
---- a/include/uapi/linux/xattr.h
-+++ b/include/uapi/linux/xattr.h
-@@ -63,5 +63,9 @@
- #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
- #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
-
-+/* User namespace */
-+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
-+#define XATTR_PAX_FLAGS_SUFFIX "flags"
-+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
-
- #endif /* _UAPI_LINUX_XATTR_H */
-diff --git a/mm/shmem.c b/mm/shmem.c
-index 1c44af7..f23bb1b 100644
---- a/mm/shmem.c
-+++ b/mm/shmem.c
-@@ -2201,6 +2201,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
- static int shmem_xattr_validate(const char *name)
- {
- struct { const char *prefix; size_t len; } arr[] = {
-+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
- { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
- { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
- };
-@@ -2256,6 +2257,12 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
- if (err)
- return err;
-
-+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
-+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
-+ return -EOPNOTSUPP;
-+ if (size > 8)
-+ return -EINVAL;
-+ }
- return simple_xattr_set(&info->xattrs, name, value, size, flags);
- }
-
diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
deleted file mode 100644
index 639fb3c3..00000000
--- a/1510_fs-enable-link-security-restrictions-by-default.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-From: Ben Hutchings <ben@decadent.org.uk>
-Subject: fs: Enable link security restrictions by default
-Date: Fri, 02 Nov 2012 05:32:06 +0000
-Bug-Debian: https://bugs.debian.org/609455
-Forwarded: not-needed
-
-This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
-('VFS: don't do protected {sym,hard}links by default').
-
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -651,8 +651,8 @@ static inline void put_link(struct namei
- path_put(link);
- }
-
--int sysctl_protected_symlinks __read_mostly = 0;
--int sysctl_protected_hardlinks __read_mostly = 0;
-+int sysctl_protected_symlinks __read_mostly = 1;
-+int sysctl_protected_hardlinks __read_mostly = 1;
-
- /**
- * may_follow_link - Check symlink following for unsafe situations
diff --git a/2600_select-REGMAP_IRQ-for-rt5033.patch b/2600_select-REGMAP_IRQ-for-rt5033.patch
deleted file mode 100644
index 92fb2e0b..00000000
--- a/2600_select-REGMAP_IRQ-for-rt5033.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 23a2a22a3f3f17de094f386a893f7047c10e44a0 Mon Sep 17 00:00:00 2001
-From: Artem Savkov <asavkov@redhat.com>
-Date: Thu, 5 Mar 2015 12:42:27 +0100
-Subject: mfd: rt5033: MFD_RT5033 needs to select REGMAP_IRQ
-
-Since commit 0b2712585(linux-next.git) this driver uses regmap_irq and so needs
-to select REGMAP_IRQ.
-
-This fixes the following compilation errors:
-ERROR: "regmap_irq_get_domain" [drivers/mfd/rt5033.ko] undefined!
-ERROR: "regmap_add_irq_chip" [drivers/mfd/rt5033.ko] undefined!
-
-Signed-off-by: Artem Savkov <asavkov@redhat.com>
-Signed-off-by: Lee Jones <lee.jones@linaro.org>
-
-diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
-index f8ef77d9a..f49f404 100644
---- a/drivers/mfd/Kconfig
-+++ b/drivers/mfd/Kconfig
-@@ -680,6 +680,7 @@ config MFD_RT5033
- depends on I2C=y
- select MFD_CORE
- select REGMAP_I2C
-+ select REGMAP_IRQ
- help
- This driver provides for the Richtek RT5033 Power Management IC,
- which includes the I2C driver and the Core APIs. This driver provides
---
-cgit v0.10.2
-
diff --git a/2700_ThinkPad-30-brightness-control-fix.patch b/2700_ThinkPad-30-brightness-control-fix.patch
deleted file mode 100644
index b548c6dc..00000000
--- a/2700_ThinkPad-30-brightness-control-fix.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
-index cb96296..6c242ed 100644
---- a/drivers/acpi/blacklist.c
-+++ b/drivers/acpi/blacklist.c
-@@ -269,6 +276,61 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
- },
-
- /*
-+ * The following Lenovo models have a broken workaround in the
-+ * acpi_video backlight implementation to meet the Windows 8
-+ * requirement of 101 backlight levels. Reverting to pre-Win8
-+ * behavior fixes the problem.
-+ */
-+ {
-+ .callback = dmi_disable_osi_win8,
-+ .ident = "Lenovo ThinkPad L430",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
-+ },
-+ },
-+ {
-+ .callback = dmi_disable_osi_win8,
-+ .ident = "Lenovo ThinkPad T430s",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
-+ },
-+ },
-+ {
-+ .callback = dmi_disable_osi_win8,
-+ .ident = "Lenovo ThinkPad T530",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
-+ },
-+ },
-+ {
-+ .callback = dmi_disable_osi_win8,
-+ .ident = "Lenovo ThinkPad W530",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
-+ },
-+ },
-+ {
-+ .callback = dmi_disable_osi_win8,
-+ .ident = "Lenovo ThinkPad X1 Carbon",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
-+ },
-+ },
-+ {
-+ .callback = dmi_disable_osi_win8,
-+ .ident = "Lenovo ThinkPad X230",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
-+ },
-+ },
-+
-+ /*
- * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
- * Linux ignores it, except for the machines enumerated below.
- */
-
diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
deleted file mode 100644
index 4cd558e7..00000000
--- a/2900_dev-root-proc-mount-fix.patch
+++ /dev/null
@@ -1,30 +0,0 @@
---- a/init/do_mounts.c 2014-08-26 08:03:30.000013100 -0400
-+++ b/init/do_mounts.c 2014-08-26 08:11:19.720014712 -0400
-@@ -484,7 +484,10 @@ void __init change_floppy(char *fmt, ...
- va_start(args, fmt);
- vsprintf(buf, fmt, args);
- va_end(args);
-- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
-+ if (saved_root_name[0])
-+ fd = sys_open(saved_root_name, O_RDWR | O_NDELAY, 0);
-+ else
-+ fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
- if (fd >= 0) {
- sys_ioctl(fd, FDEJECT, 0);
- sys_close(fd);
-@@ -527,8 +530,13 @@ void __init mount_root(void)
- }
- #endif
- #ifdef CONFIG_BLOCK
-- create_dev("/dev/root", ROOT_DEV);
-- mount_block_root("/dev/root", root_mountflags);
-+ if (saved_root_name[0] == '/') {
-+ create_dev(saved_root_name, ROOT_DEV);
-+ mount_block_root(saved_root_name, root_mountflags);
-+ } else {
-+ create_dev("/dev/root", ROOT_DEV);
-+ mount_block_root("/dev/root", root_mountflags);
-+ }
- #endif
- }
-
diff --git a/2905_2disk-resume-image-fix.patch b/2905_2disk-resume-image-fix.patch
deleted file mode 100644
index 7e95d298..00000000
--- a/2905_2disk-resume-image-fix.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-diff --git a/kernel/kmod.c b/kernel/kmod.c
-index fb32636..d968882 100644
---- a/kernel/kmod.c
-+++ b/kernel/kmod.c
-@@ -575,7 +575,8 @@
- call_usermodehelper_freeinfo(sub_info);
- return -EINVAL;
- }
-- helper_lock();
-+ if (!(current->flags & PF_FREEZER_SKIP))
-+ helper_lock();
- if (!khelper_wq || usermodehelper_disabled) {
- retval = -EBUSY;
- goto out;
-@@ -611,7 +612,8 @@ wait_done:
- out:
- call_usermodehelper_freeinfo(sub_info);
- unlock:
-- helper_unlock();
-+ if (!(current->flags & PF_FREEZER_SKIP))
-+ helper_unlock();
- return retval;
- }
- EXPORT_SYMBOL(call_usermodehelper_exec);
diff --git a/2910_lz4-compression-fix.patch b/2910_lz4-compression-fix.patch
deleted file mode 100644
index 1c55f322..00000000
--- a/2910_lz4-compression-fix.patch
+++ /dev/null
@@ -1,30 +0,0 @@
---- a/lib/lz4/lz4_decompress.c 2015-04-13 16:20:04.896315560 +0800
-+++ b/lib/lz4/lz4_decompress.c 2015-04-13 16:27:08.929317053 +0800
-@@ -139,8 +139,12 @@
- /* Error: request to write beyond destination buffer */
- if (cpy > oend)
- goto _output_error;
-+#if LZ4_ARCH64
-+ if ((ref + COPYLENGTH) > oend)
-+#else
- if ((ref + COPYLENGTH) > oend ||
- (op + COPYLENGTH) > oend)
-+#endif
- goto _output_error;
- LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
- while (op < cpy)
-@@ -270,7 +274,13 @@
- if (cpy > oend - COPYLENGTH) {
- if (cpy > oend)
- goto _output_error; /* write outside of buf */
--
-+#if LZ4_ARCH64
-+ if ((ref + COPYLENGTH) > oend)
-+#else
-+ if ((ref + COPYLENGTH) > oend ||
-+ (op + COPYLENGTH) > oend)
-+#endif
-+ goto _output_error;
- LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
- while (op < cpy)
- *op++ = *ref++;
diff --git a/4200_fbcondecor-3.19.patch b/4200_fbcondecor-3.19.patch
deleted file mode 100644
index 29c379fe..00000000
--- a/4200_fbcondecor-3.19.patch
+++ /dev/null
@@ -1,2119 +0,0 @@
-diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
-index fe85e7c..2230930 100644
---- a/Documentation/fb/00-INDEX
-+++ b/Documentation/fb/00-INDEX
-@@ -23,6 +23,8 @@ ep93xx-fb.txt
- - info on the driver for EP93xx LCD controller.
- fbcon.txt
- - intro to and usage guide for the framebuffer console (fbcon).
-+fbcondecor.txt
-+ - info on the Framebuffer Console Decoration
- framebuffer.txt
- - introduction to frame buffer devices.
- gxfb.txt
-diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
-new file mode 100644
-index 0000000..3388c61
---- /dev/null
-+++ b/Documentation/fb/fbcondecor.txt
-@@ -0,0 +1,207 @@
-+What is it?
-+-----------
-+
-+The framebuffer decorations are a kernel feature which allows displaying a
-+background picture on selected consoles.
-+
-+What do I need to get it to work?
-+---------------------------------
-+
-+To get fbcondecor up-and-running you will have to:
-+ 1) get a copy of splashutils [1] or a similar program
-+ 2) get some fbcondecor themes
-+ 3) build the kernel helper program
-+ 4) build your kernel with the FB_CON_DECOR option enabled.
-+
-+To get fbcondecor operational right after fbcon initialization is finished, you
-+will have to include a theme and the kernel helper into your initramfs image.
-+Please refer to splashutils documentation for instructions on how to do that.
-+
-+[1] The splashutils package can be downloaded from:
-+ http://github.com/alanhaggai/fbsplash
-+
-+The userspace helper
-+--------------------
-+
-+The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
-+kernel whenever an important event occurs and the kernel needs some kind of
-+job to be carried out. Important events include console switches and video
-+mode switches (the kernel requests background images and configuration
-+parameters for the current console). The fbcondecor helper must be accessible at
-+all times. If it's not, fbcondecor will be switched off automatically.
-+
-+It's possible to set path to the fbcondecor helper by writing it to
-+/proc/sys/kernel/fbcondecor.
-+
-+*****************************************************************************
-+
-+The information below is mostly technical stuff. There's probably no need to
-+read it unless you plan to develop a userspace helper.
-+
-+The fbcondecor protocol
-+-----------------------
-+
-+The fbcondecor protocol defines a communication interface between the kernel and
-+the userspace fbcondecor helper.
-+
-+The kernel side is responsible for:
-+
-+ * rendering console text, using an image as a background (instead of a
-+ standard solid color fbcon uses),
-+ * accepting commands from the user via ioctls on the fbcondecor device,
-+ * calling the userspace helper to set things up as soon as the fb subsystem
-+ is initialized.
-+
-+The userspace helper is responsible for everything else, including parsing
-+configuration files, decompressing the image files whenever the kernel needs
-+it, and communicating with the kernel if necessary.
-+
-+The fbcondecor protocol specifies how communication is done in both ways:
-+kernel->userspace and userspace->helper.
-+
-+Kernel -> Userspace
-+-------------------
-+
-+The kernel communicates with the userspace helper by calling it and specifying
-+the task to be done in a series of arguments.
-+
-+The arguments follow the pattern:
-+<fbcondecor protocol version> <command> <parameters>
-+
-+All commands defined in fbcondecor protocol v2 have the following parameters:
-+ virtual console
-+ framebuffer number
-+ theme
-+
-+Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
-+framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
-+
-+Fbcondecor protocol v2 specifies the following commands:
-+
-+getpic
-+------
-+ The kernel issues this command to request image data. It's up to the
-+ userspace helper to find a background image appropriate for the specified
-+ theme and the current resolution. The userspace helper should respond by
-+ issuing the FBIOCONDECOR_SETPIC ioctl.
-+
-+init
-+----
-+ The kernel issues this command after the fbcondecor device is created and
-+ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
-+ helper should parse the kernel command line (/proc/cmdline) or otherwise
-+ decide whether fbcondecor is to be activated.
-+
-+ To activate fbcondecor on the first console the helper should issue the
-+ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
-+ in the above-mentioned order.
-+
-+ When the userspace helper is called in an early phase of the boot process
-+ (right after the initialization of fbcon), no filesystems will be mounted.
-+ The helper program should mount sysfs and then create the appropriate
-+ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
-+ current display settings and to be able to communicate with the kernel side.
-+ It should probably also mount the procfs to be able to parse the kernel
-+ command line parameters.
-+
-+ Note that the console sem is not held when the kernel calls fbcondecor_helper
-+ with the 'init' command. The fbcondecor helper should perform all ioctls with
-+ origin set to FBCON_DECOR_IO_ORIG_USER.
-+
-+modechange
-+----------
-+ The kernel issues this command on a mode change. The helper's response should
-+ be similar to the response to the 'init' command. Note that this time the
-+ console sem is held and all ioctls must be performed with origin set to
-+ FBCON_DECOR_IO_ORIG_KERNEL.
-+
-+
-+Userspace -> Kernel
-+-------------------
-+
-+Userspace programs can communicate with fbcondecor via ioctls on the
-+fbcondecor device. These ioctls are to be used by both the userspace helper
-+(called only by the kernel) and userspace configuration tools (run by the users).
-+
-+The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
-+when doing the appropriate ioctls. All userspace configuration tools should
-+use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
-+field when performing ioctls from the kernel helper will most likely result
-+in a console deadlock.
-+
-+FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
-+semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
-+the console sem.
-+
-+The framebuffer console decoration provides the following ioctls (all defined in
-+linux/fb.h):
-+
-+FBIOCONDECOR_SETPIC
-+description: loads a background picture for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
-+notes:
-+If called for consoles other than the current foreground one, the picture data
-+will be ignored.
-+
-+If the current virtual console is running in a 8-bpp mode, the cmap substruct
-+of fb_image has to be filled appropriately: start should be set to 16 (first
-+16 colors are reserved for fbcon), len to a value <= 240 and red, green and
-+blue should point to valid cmap data. The transp field is ingored. The fields
-+dx, dy, bg_color, fg_color in fb_image are ignored as well.
-+
-+FBIOCONDECOR_SETCFG
-+description: sets the fbcondecor config for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
-+notes: The structure has to be filled with valid data.
-+
-+FBIOCONDECOR_GETCFG
-+description: gets the fbcondecor config for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
-+
-+FBIOCONDECOR_SETSTATE
-+description: sets the fbcondecor state for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: unsigned int*
-+ values: 0 = disabled, 1 = enabled.
-+
-+FBIOCONDECOR_GETSTATE
-+description: gets the fbcondecor state for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: unsigned int*
-+ values: as in FBIOCONDECOR_SETSTATE
-+
-+Info on used structures:
-+
-+Definition of struct vc_decor can be found in linux/console_decor.h. It's
-+heavily commented. Note that the 'theme' field should point to a string
-+no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
-+performed, the theme field should point to a char buffer of length
-+FBCON_DECOR_THEME_LEN.
-+
-+Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
-+The fields in this struct have the following meaning:
-+
-+vc:
-+Virtual console number.
-+
-+origin:
-+Specifies if the ioctl is performed as a response to a kernel request. The
-+fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
-+programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
-+avoid console semaphore deadlocks.
-+
-+data:
-+Pointer to a data structure appropriate for the performed ioctl. Type of
-+the data struct is specified in the ioctls description.
-+
-+*****************************************************************************
-+
-+Credit
-+------
-+
-+Original 'bootsplash' project & implementation by:
-+ Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
-+ Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
-+ Ken Wimer <wimer@suse.de>.
-+
-+Fbcondecor, fbcondecor protocol design, current implementation & docs by:
-+ Michal Januszewski <michalj+fbcondecor@gmail.com>
-+
-diff --git a/drivers/Makefile b/drivers/Makefile
-index 7183b6a..d576148 100644
---- a/drivers/Makefile
-+++ b/drivers/Makefile
-@@ -17,6 +17,10 @@ obj-y += pwm/
- obj-$(CONFIG_PCI) += pci/
- obj-$(CONFIG_PARISC) += parisc/
- obj-$(CONFIG_RAPIDIO) += rapidio/
-+# tty/ comes before char/ so that the VT console is the boot-time
-+# default.
-+obj-y += tty/
-+obj-y += char/
- obj-y += video/
- obj-y += idle/
-
-@@ -42,11 +46,6 @@ obj-$(CONFIG_REGULATOR) += regulator/
- # reset controllers early, since gpu drivers might rely on them to initialize
- obj-$(CONFIG_RESET_CONTROLLER) += reset/
-
--# tty/ comes before char/ so that the VT console is the boot-time
--# default.
--obj-y += tty/
--obj-y += char/
--
- # iommu/ comes before gpu as gpu are using iommu controllers
- obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
-
-diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
-index fe1cd01..6d2e87a 100644
---- a/drivers/video/console/Kconfig
-+++ b/drivers/video/console/Kconfig
-@@ -126,6 +126,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
- such that other users of the framebuffer will remain normally
- oriented.
-
-+config FB_CON_DECOR
-+ bool "Support for the Framebuffer Console Decorations"
-+ depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
-+ default n
-+ ---help---
-+ This option enables support for framebuffer console decorations which
-+ makes it possible to display images in the background of the system
-+ consoles. Note that userspace utilities are necessary in order to take
-+ advantage of these features. Refer to Documentation/fb/fbcondecor.txt
-+ for more information.
-+
-+ If unsure, say N.
-+
- config STI_CONSOLE
- bool "STI text console"
- depends on PARISC
-diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
-index 43bfa48..cc104b6f 100644
---- a/drivers/video/console/Makefile
-+++ b/drivers/video/console/Makefile
-@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
- fbcon_ccw.o
- endif
-
-+obj-$(CONFIG_FB_CON_DECOR) += fbcondecor.o cfbcondecor.o
- obj-$(CONFIG_FB_STI) += sticore.o
-diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
-index 61b182b..984384b 100644
---- a/drivers/video/console/bitblit.c
-+++ b/drivers/video/console/bitblit.c
-@@ -18,6 +18,7 @@
- #include <linux/console.h>
- #include <asm/types.h>
- #include "fbcon.h"
-+#include "fbcondecor.h"
-
- /*
- * Accelerated handlers.
-@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
- area.height = height * vc->vc_font.height;
- area.width = width * vc->vc_font.width;
-
-+ if (fbcon_decor_active(info, vc)) {
-+ area.sx += vc->vc_decor.tx;
-+ area.sy += vc->vc_decor.ty;
-+ area.dx += vc->vc_decor.tx;
-+ area.dy += vc->vc_decor.ty;
-+ }
-+
- info->fbops->fb_copyarea(info, &area);
- }
-
-@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- cursor.image.depth = 1;
- cursor.rop = ROP_XOR;
-
-- if (info->fbops->fb_cursor)
-- err = info->fbops->fb_cursor(info, &cursor);
-+ if (fbcon_decor_active(info, vc)) {
-+ fbcon_decor_cursor(info, &cursor);
-+ } else {
-+ if (info->fbops->fb_cursor)
-+ err = info->fbops->fb_cursor(info, &cursor);
-
-- if (err)
-- soft_cursor(info, &cursor);
-+ if (err)
-+ soft_cursor(info, &cursor);
-+ }
-
- ops->cursor_reset = 0;
- }
-diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
-new file mode 100644
-index 0000000..a2b4497
---- /dev/null
-+++ b/drivers/video/console/cfbcondecor.c
-@@ -0,0 +1,471 @@
-+/*
-+ * linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
-+ *
-+ * Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
-+ *
-+ * Code based upon "Bootdecor" (C) 2001-2003
-+ * Volker Poplawski <volker@poplawski.de>,
-+ * Stefan Reinauer <stepan@suse.de>,
-+ * Steffen Winterfeldt <snwint@suse.de>,
-+ * Michael Schroeder <mls@suse.de>,
-+ * Ken Wimer <wimer@suse.de>.
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License. See the file COPYING in the main directory of this archive for
-+ * more details.
-+ */
-+#include <linux/module.h>
-+#include <linux/types.h>
-+#include <linux/fb.h>
-+#include <linux/selection.h>
-+#include <linux/slab.h>
-+#include <linux/vt_kern.h>
-+#include <asm/irq.h>
-+
-+#include "fbcon.h"
-+#include "fbcondecor.h"
-+
-+#define parse_pixel(shift,bpp,type) \
-+ do { \
-+ if (d & (0x80 >> (shift))) \
-+ dd2[(shift)] = fgx; \
-+ else \
-+ dd2[(shift)] = transparent ? *(type *)decor_src : bgx; \
-+ decor_src += (bpp); \
-+ } while (0) \
-+
-+extern int get_color(struct vc_data *vc, struct fb_info *info,
-+ u16 c, int is_fg);
-+
-+void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
-+{
-+ int i, j, k;
-+ int minlen = min(min(info->var.red.length, info->var.green.length),
-+ info->var.blue.length);
-+ u32 col;
-+
-+ for (j = i = 0; i < 16; i++) {
-+ k = color_table[i];
-+
-+ col = ((vc->vc_palette[j++] >> (8-minlen))
-+ << info->var.red.offset);
-+ col |= ((vc->vc_palette[j++] >> (8-minlen))
-+ << info->var.green.offset);
-+ col |= ((vc->vc_palette[j++] >> (8-minlen))
-+ << info->var.blue.offset);
-+ ((u32 *)info->pseudo_palette)[k] = col;
-+ }
-+}
-+
-+void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
-+ int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
-+{
-+ unsigned int x, y;
-+ u32 dd;
-+ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
-+ unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
-+ unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
-+ u16 dd2[4];
-+
-+ u8* decor_src = (u8 *)(info->bgdecor.data + ds);
-+ u8* dst = (u8 *)(info->screen_base + d);
-+
-+ if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
-+ return;
-+
-+ for (y = 0; y < height; y++) {
-+ switch (info->var.bits_per_pixel) {
-+
-+ case 32:
-+ for (x = 0; x < width; x++) {
-+
-+ if ((x & 7) == 0)
-+ d = *src++;
-+ if (d & 0x80)
-+ dd = fgx;
-+ else
-+ dd = transparent ?
-+ *(u32 *)decor_src : bgx;
-+
-+ d <<= 1;
-+ decor_src += 4;
-+ fb_writel(dd, dst);
-+ dst += 4;
-+ }
-+ break;
-+ case 24:
-+ for (x = 0; x < width; x++) {
-+
-+ if ((x & 7) == 0)
-+ d = *src++;
-+ if (d & 0x80)
-+ dd = fgx;
-+ else
-+ dd = transparent ?
-+ (*(u32 *)decor_src & 0xffffff) : bgx;
-+
-+ d <<= 1;
-+ decor_src += 3;
-+#ifdef __LITTLE_ENDIAN
-+ fb_writew(dd & 0xffff, dst);
-+ dst += 2;
-+ fb_writeb((dd >> 16), dst);
-+#else
-+ fb_writew(dd >> 8, dst);
-+ dst += 2;
-+ fb_writeb(dd & 0xff, dst);
-+#endif
-+ dst++;
-+ }
-+ break;
-+ case 16:
-+ for (x = 0; x < width; x += 2) {
-+ if ((x & 7) == 0)
-+ d = *src++;
-+
-+ parse_pixel(0, 2, u16);
-+ parse_pixel(1, 2, u16);
-+#ifdef __LITTLE_ENDIAN
-+ dd = dd2[0] | (dd2[1] << 16);
-+#else
-+ dd = dd2[1] | (dd2[0] << 16);
-+#endif
-+ d <<= 2;
-+ fb_writel(dd, dst);
-+ dst += 4;
-+ }
-+ break;
-+
-+ case 8:
-+ for (x = 0; x < width; x += 4) {
-+ if ((x & 7) == 0)
-+ d = *src++;
-+
-+ parse_pixel(0, 1, u8);
-+ parse_pixel(1, 1, u8);
-+ parse_pixel(2, 1, u8);
-+ parse_pixel(3, 1, u8);
-+
-+#ifdef __LITTLE_ENDIAN
-+ dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
-+#else
-+ dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
-+#endif
-+ d <<= 4;
-+ fb_writel(dd, dst);
-+ dst += 4;
-+ }
-+ }
-+
-+ dst += info->fix.line_length - width * bytespp;
-+ decor_src += (info->var.xres - width) * bytespp;
-+ }
-+}
-+
-+#define cc2cx(a) \
-+ ((info->fix.visual == FB_VISUAL_TRUECOLOR || \
-+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? \
-+ ((u32*)info->pseudo_palette)[a] : a)
-+
-+void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
-+ const unsigned short *s, int count, int yy, int xx)
-+{
-+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
-+ struct fbcon_ops *ops = info->fbcon_par;
-+ int fg_color, bg_color, transparent;
-+ u8 *src;
-+ u32 bgx, fgx;
-+ u16 c = scr_readw(s);
-+
-+ fg_color = get_color(vc, info, c, 1);
-+ bg_color = get_color(vc, info, c, 0);
-+
-+ /* Don't paint the background image if console is blanked */
-+ transparent = ops->blank_state ? 0 :
-+ (vc->vc_decor.bg_color == bg_color);
-+
-+ xx = xx * vc->vc_font.width + vc->vc_decor.tx;
-+ yy = yy * vc->vc_font.height + vc->vc_decor.ty;
-+
-+ fgx = cc2cx(fg_color);
-+ bgx = cc2cx(bg_color);
-+
-+ while (count--) {
-+ c = scr_readw(s++);
-+ src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
-+ ((vc->vc_font.width + 7) >> 3);
-+
-+ fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
-+ vc->vc_font.width, src, fgx, bgx, transparent);
-+ xx += vc->vc_font.width;
-+ }
-+}
-+
-+void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
-+{
-+ int i;
-+ unsigned int dsize, s_pitch;
-+ struct fbcon_ops *ops = info->fbcon_par;
-+ struct vc_data* vc;
-+ u8 *src;
-+
-+ /* we really don't need any cursors while the console is blanked */
-+ if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
-+ return;
-+
-+ vc = vc_cons[ops->currcon].d;
-+
-+ src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
-+ if (!src)
-+ return;
-+
-+ s_pitch = (cursor->image.width + 7) >> 3;
-+ dsize = s_pitch * cursor->image.height;
-+ if (cursor->enable) {
-+ switch (cursor->rop) {
-+ case ROP_XOR:
-+ for (i = 0; i < dsize; i++)
-+ src[i] = cursor->image.data[i] ^ cursor->mask[i];
-+ break;
-+ case ROP_COPY:
-+ default:
-+ for (i = 0; i < dsize; i++)
-+ src[i] = cursor->image.data[i] & cursor->mask[i];
-+ break;
-+ }
-+ } else
-+ memcpy(src, cursor->image.data, dsize);
-+
-+ fbcon_decor_renderc(info,
-+ cursor->image.dy + vc->vc_decor.ty,
-+ cursor->image.dx + vc->vc_decor.tx,
-+ cursor->image.height,
-+ cursor->image.width,
-+ (u8*)src,
-+ cc2cx(cursor->image.fg_color),
-+ cc2cx(cursor->image.bg_color),
-+ cursor->image.bg_color == vc->vc_decor.bg_color);
-+
-+ kfree(src);
-+}
-+
-+static void decorset(u8 *dst, int height, int width, int dstbytes,
-+ u32 bgx, int bpp)
-+{
-+ int i;
-+
-+ if (bpp == 8)
-+ bgx |= bgx << 8;
-+ if (bpp == 16 || bpp == 8)
-+ bgx |= bgx << 16;
-+
-+ while (height-- > 0) {
-+ u8 *p = dst;
-+
-+ switch (bpp) {
-+
-+ case 32:
-+ for (i=0; i < width; i++) {
-+ fb_writel(bgx, p); p += 4;
-+ }
-+ break;
-+ case 24:
-+ for (i=0; i < width; i++) {
-+#ifdef __LITTLE_ENDIAN
-+ fb_writew((bgx & 0xffff),(u16*)p); p += 2;
-+ fb_writeb((bgx >> 16),p++);
-+#else
-+ fb_writew((bgx >> 8),(u16*)p); p += 2;
-+ fb_writeb((bgx & 0xff),p++);
-+#endif
-+ }
-+ case 16:
-+ for (i=0; i < width/4; i++) {
-+ fb_writel(bgx,p); p += 4;
-+ fb_writel(bgx,p); p += 4;
-+ }
-+ if (width & 2) {
-+ fb_writel(bgx,p); p += 4;
-+ }
-+ if (width & 1)
-+ fb_writew(bgx,(u16*)p);
-+ break;
-+ case 8:
-+ for (i=0; i < width/4; i++) {
-+ fb_writel(bgx,p); p += 4;
-+ }
-+
-+ if (width & 2) {
-+ fb_writew(bgx,p); p += 2;
-+ }
-+ if (width & 1)
-+ fb_writeb(bgx,(u8*)p);
-+ break;
-+
-+ }
-+ dst += dstbytes;
-+ }
-+}
-+
-+void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
-+ int srclinebytes, int bpp)
-+{
-+ int i;
-+
-+ while (height-- > 0) {
-+ u32 *p = (u32 *)dst;
-+ u32 *q = (u32 *)src;
-+
-+ switch (bpp) {
-+
-+ case 32:
-+ for (i=0; i < width; i++)
-+ fb_writel(*q++, p++);
-+ break;
-+ case 24:
-+ for (i=0; i < (width*3/4); i++)
-+ fb_writel(*q++, p++);
-+ if ((width*3) % 4) {
-+ if (width & 2) {
-+ fb_writeb(*(u8*)q, (u8*)p);
-+ } else if (width & 1) {
-+ fb_writew(*(u16*)q, (u16*)p);
-+ fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
-+ }
-+ }
-+ break;
-+ case 16:
-+ for (i=0; i < width/4; i++) {
-+ fb_writel(*q++, p++);
-+ fb_writel(*q++, p++);
-+ }
-+ if (width & 2)
-+ fb_writel(*q++, p++);
-+ if (width & 1)
-+ fb_writew(*(u16*)q, (u16*)p);
-+ break;
-+ case 8:
-+ for (i=0; i < width/4; i++)
-+ fb_writel(*q++, p++);
-+
-+ if (width & 2) {
-+ fb_writew(*(u16*)q, (u16*)p);
-+ q = (u32*) ((u16*)q + 1);
-+ p = (u32*) ((u16*)p + 1);
-+ }
-+ if (width & 1)
-+ fb_writeb(*(u8*)q, (u8*)p);
-+ break;
-+ }
-+
-+ dst += linebytes;
-+ src += srclinebytes;
-+ }
-+}
-+
-+static void decorfill(struct fb_info *info, int sy, int sx, int height,
-+ int width)
-+{
-+ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
-+ int d = sy * info->fix.line_length + sx * bytespp;
-+ int ds = (sy * info->var.xres + sx) * bytespp;
-+
-+ fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
-+ height, width, info->fix.line_length, info->var.xres * bytespp,
-+ info->var.bits_per_pixel);
-+}
-+
-+void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
-+ int height, int width)
-+{
-+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
-+ struct fbcon_ops *ops = info->fbcon_par;
-+ u8 *dst;
-+ int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
-+
-+ transparent = (vc->vc_decor.bg_color == bg_color);
-+ sy = sy * vc->vc_font.height + vc->vc_decor.ty;
-+ sx = sx * vc->vc_font.width + vc->vc_decor.tx;
-+ height *= vc->vc_font.height;
-+ width *= vc->vc_font.width;
-+
-+ /* Don't paint the background image if console is blanked */
-+ if (transparent && !ops->blank_state) {
-+ decorfill(info, sy, sx, height, width);
-+ } else {
-+ dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
-+ sx * ((info->var.bits_per_pixel + 7) >> 3));
-+ decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
-+ info->var.bits_per_pixel);
-+ }
-+}
-+
-+void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
-+ int bottom_only)
-+{
-+ unsigned int tw = vc->vc_cols*vc->vc_font.width;
-+ unsigned int th = vc->vc_rows*vc->vc_font.height;
-+
-+ if (!bottom_only) {
-+ /* top margin */
-+ decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
-+ /* left margin */
-+ decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
-+ /* right margin */
-+ decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th,
-+ info->var.xres - vc->vc_decor.tx - tw);
-+ }
-+ decorfill(info, vc->vc_decor.ty + th, 0,
-+ info->var.yres - vc->vc_decor.ty - th, info->var.xres);
-+}
-+
-+void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y,
-+ int sx, int dx, int width)
-+{
-+ u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
-+ u16 *s = d + (dx - sx);
-+ u16 *start = d;
-+ u16 *ls = d;
-+ u16 *le = d + width;
-+ u16 c;
-+ int x = dx;
-+ u16 attr = 1;
-+
-+ do {
-+ c = scr_readw(d);
-+ if (attr != (c & 0xff00)) {
-+ attr = c & 0xff00;
-+ if (d > start) {
-+ fbcon_decor_putcs(vc, info, start, d - start, y, x);
-+ x += d - start;
-+ start = d;
-+ }
-+ }
-+ if (s >= ls && s < le && c == scr_readw(s)) {
-+ if (d > start) {
-+ fbcon_decor_putcs(vc, info, start, d - start, y, x);
-+ x += d - start + 1;
-+ start = d + 1;
-+ } else {
-+ x++;
-+ start++;
-+ }
-+ }
-+ s++;
-+ d++;
-+ } while (d < le);
-+ if (d > start)
-+ fbcon_decor_putcs(vc, info, start, d - start, y, x);
-+}
-+
-+void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
-+{
-+ if (blank) {
-+ decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
-+ info->fix.line_length, 0, info->var.bits_per_pixel);
-+ } else {
-+ update_screen(vc);
-+ fbcon_decor_clear_margins(vc, info, 0);
-+ }
-+}
-+
-diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
-index f447734..da50d61 100644
---- a/drivers/video/console/fbcon.c
-+++ b/drivers/video/console/fbcon.c
-@@ -79,6 +79,7 @@
- #include <asm/irq.h>
-
- #include "fbcon.h"
-+#include "../console/fbcondecor.h"
-
- #ifdef FBCONDEBUG
- # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
-@@ -94,7 +95,7 @@ enum {
-
- static struct display fb_display[MAX_NR_CONSOLES];
-
--static signed char con2fb_map[MAX_NR_CONSOLES];
-+signed char con2fb_map[MAX_NR_CONSOLES];
- static signed char con2fb_map_boot[MAX_NR_CONSOLES];
-
- static int logo_lines;
-@@ -286,7 +287,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
- !vt_force_oops_output(vc);
- }
-
--static int get_color(struct vc_data *vc, struct fb_info *info,
-+int get_color(struct vc_data *vc, struct fb_info *info,
- u16 c, int is_fg)
- {
- int depth = fb_get_color_depth(&info->var, &info->fix);
-@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
- info_idx = -1;
- } else {
- fbcon_has_console_bind = 1;
-+#ifdef CONFIG_FB_CON_DECOR
-+ fbcon_decor_init();
-+#endif
- }
-
- return err;
-@@ -1007,6 +1011,12 @@ static const char *fbcon_startup(void)
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- cols /= vc->vc_font.width;
- rows /= vc->vc_font.height;
-+
-+ if (fbcon_decor_active(info, vc)) {
-+ cols = vc->vc_decor.twidth / vc->vc_font.width;
-+ rows = vc->vc_decor.theight / vc->vc_font.height;
-+ }
-+
- vc_resize(vc, cols, rows);
-
- DPRINTK("mode: %s\n", info->fix.id);
-@@ -1036,7 +1046,7 @@ static void fbcon_init(struct vc_data *vc, int init)
- cap = info->flags;
-
- if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
-- (info->fix.type == FB_TYPE_TEXT))
-+ (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
- logo = 0;
-
- if (var_to_display(p, &info->var, info))
-@@ -1260,6 +1270,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
- fbcon_clear_margins(vc, 0);
- }
-
-+ if (fbcon_decor_active(info, vc)) {
-+ fbcon_decor_clear(vc, info, sy, sx, height, width);
-+ return;
-+ }
-+
- /* Split blits that cross physical y_wrap boundary */
-
- y_break = p->vrows - p->yscroll;
-@@ -1279,10 +1294,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
- struct display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
-
-- if (!fbcon_is_inactive(vc, info))
-- ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
-- get_color(vc, info, scr_readw(s), 1),
-- get_color(vc, info, scr_readw(s), 0));
-+ if (!fbcon_is_inactive(vc, info)) {
-+
-+ if (fbcon_decor_active(info, vc))
-+ fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
-+ else
-+ ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
-+ get_color(vc, info, scr_readw(s), 1),
-+ get_color(vc, info, scr_readw(s), 0));
-+ }
- }
-
- static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
-@@ -1298,8 +1318,13 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
-
-- if (!fbcon_is_inactive(vc, info))
-- ops->clear_margins(vc, info, bottom_only);
-+ if (!fbcon_is_inactive(vc, info)) {
-+ if (fbcon_decor_active(info, vc)) {
-+ fbcon_decor_clear_margins(vc, info, bottom_only);
-+ } else {
-+ ops->clear_margins(vc, info, bottom_only);
-+ }
-+ }
- }
-
- static void fbcon_cursor(struct vc_data *vc, int mode)
-@@ -1819,7 +1844,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
- count = vc->vc_rows;
- if (softback_top)
- fbcon_softback_note(vc, t, count);
-- if (logo_shown >= 0)
-+ if (logo_shown >= 0 || fbcon_decor_active(info, vc))
- goto redraw_up;
- switch (p->scrollmode) {
- case SCROLL_MOVE:
-@@ -1912,6 +1937,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
- count = vc->vc_rows;
- if (logo_shown >= 0)
- goto redraw_down;
-+ if (fbcon_decor_active(info, vc))
-+ goto redraw_down;
- switch (p->scrollmode) {
- case SCROLL_MOVE:
- fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
-@@ -2060,6 +2087,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
- }
- return;
- }
-+
-+ if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
-+ /* must use slower redraw bmove to keep background pic intact */
-+ fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
-+ return;
-+ }
-+
- ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
- height, width);
- }
-@@ -2130,8 +2164,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
- var.yres = virt_h * virt_fh;
- x_diff = info->var.xres - var.xres;
- y_diff = info->var.yres - var.yres;
-- if (x_diff < 0 || x_diff > virt_fw ||
-- y_diff < 0 || y_diff > virt_fh) {
-+ if ((x_diff < 0 || x_diff > virt_fw ||
-+ y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
- const struct fb_videomode *mode;
-
- DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
-@@ -2167,6 +2201,21 @@ static int fbcon_switch(struct vc_data *vc)
-
- info = registered_fb[con2fb_map[vc->vc_num]];
- ops = info->fbcon_par;
-+ prev_console = ops->currcon;
-+ if (prev_console != -1)
-+ old_info = registered_fb[con2fb_map[prev_console]];
-+
-+#ifdef CONFIG_FB_CON_DECOR
-+ if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
-+ struct vc_data *vc_curr = vc_cons[prev_console].d;
-+ if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
-+ /* Clear the screen to avoid displaying funky colors during
-+ * palette updates. */
-+ memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
-+ 0, info->var.yres * info->fix.line_length);
-+ }
-+ }
-+#endif
-
- if (softback_top) {
- if (softback_lines)
-@@ -2185,9 +2234,6 @@ static int fbcon_switch(struct vc_data *vc)
- logo_shown = FBCON_LOGO_CANSHOW;
- }
-
-- prev_console = ops->currcon;
-- if (prev_console != -1)
-- old_info = registered_fb[con2fb_map[prev_console]];
- /*
- * FIXME: If we have multiple fbdev's loaded, we need to
- * update all info->currcon. Perhaps, we can place this
-@@ -2231,6 +2277,18 @@ static int fbcon_switch(struct vc_data *vc)
- fbcon_del_cursor_timer(old_info);
- }
-
-+ if (fbcon_decor_active_vc(vc)) {
-+ struct vc_data *vc_curr = vc_cons[prev_console].d;
-+
-+ if (!vc_curr->vc_decor.theme ||
-+ strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
-+ (fbcon_decor_active_nores(info, vc_curr) &&
-+ !fbcon_decor_active(info, vc_curr))) {
-+ fbcon_decor_disable(vc, 0);
-+ fbcon_decor_call_helper("modechange", vc->vc_num);
-+ }
-+ }
-+
- if (fbcon_is_inactive(vc, info) ||
- ops->blank_state != FB_BLANK_UNBLANK)
- fbcon_del_cursor_timer(info);
-@@ -2339,15 +2397,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
- }
- }
-
-- if (!fbcon_is_inactive(vc, info)) {
-+ if (!fbcon_is_inactive(vc, info)) {
- if (ops->blank_state != blank) {
- ops->blank_state = blank;
- fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
- ops->cursor_flash = (!blank);
-
-- if (!(info->flags & FBINFO_MISC_USEREVENT))
-- if (fb_blank(info, blank))
-- fbcon_generic_blank(vc, info, blank);
-+ if (!(info->flags & FBINFO_MISC_USEREVENT)) {
-+ if (fb_blank(info, blank)) {
-+ if (fbcon_decor_active(info, vc))
-+ fbcon_decor_blank(vc, info, blank);
-+ else
-+ fbcon_generic_blank(vc, info, blank);
-+ }
-+ }
- }
-
- if (!blank)
-@@ -2522,13 +2585,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
- }
-
- if (resize) {
-+ /* reset wrap/pan */
- int cols, rows;
-
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
-+
-+ if (fbcon_decor_active(info, vc)) {
-+ info->var.xoffset = info->var.yoffset = p->yscroll = 0;
-+ cols = vc->vc_decor.twidth;
-+ rows = vc->vc_decor.theight;
-+ }
- cols /= w;
- rows /= h;
-+
- vc_resize(vc, cols, rows);
-+
- if (CON_IS_VISIBLE(vc) && softback_buf)
- fbcon_update_softback(vc);
- } else if (CON_IS_VISIBLE(vc)
-@@ -2657,7 +2729,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
- int i, j, k, depth;
- u8 val;
-
-- if (fbcon_is_inactive(vc, info))
-+ if (fbcon_is_inactive(vc, info)
-+#ifdef CONFIG_FB_CON_DECOR
-+ || vc->vc_num != fg_console
-+#endif
-+ )
- return -EINVAL;
-
- if (!CON_IS_VISIBLE(vc))
-@@ -2683,14 +2759,56 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
- } else
- fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
-
-- return fb_set_cmap(&palette_cmap, info);
-+ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
-+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
-+
-+ u16 *red, *green, *blue;
-+ int minlen = min(min(info->var.red.length, info->var.green.length),
-+ info->var.blue.length);
-+ int h;
-+
-+ struct fb_cmap cmap = {
-+ .start = 0,
-+ .len = (1 << minlen),
-+ .red = NULL,
-+ .green = NULL,
-+ .blue = NULL,
-+ .transp = NULL
-+ };
-+
-+ red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
-+
-+ if (!red)
-+ goto out;
-+
-+ green = red + 256;
-+ blue = green + 256;
-+ cmap.red = red;
-+ cmap.green = green;
-+ cmap.blue = blue;
-+
-+ for (i = 0; i < cmap.len; i++) {
-+ red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
-+ }
-+
-+ h = fb_set_cmap(&cmap, info);
-+ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
-+ kfree(red);
-+
-+ return h;
-+
-+ } else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
-+ info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
-+ fb_set_cmap(&info->bgdecor.cmap, info);
-+
-+out: return fb_set_cmap(&palette_cmap, info);
- }
-
- static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
- {
- unsigned long p;
- int line;
--
-+
- if (vc->vc_num != fg_console || !softback_lines)
- return (u16 *) (vc->vc_origin + offset);
- line = offset / vc->vc_size_row;
-@@ -2909,7 +3027,14 @@ static void fbcon_modechanged(struct fb_info *info)
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- cols /= vc->vc_font.width;
- rows /= vc->vc_font.height;
-- vc_resize(vc, cols, rows);
-+
-+ if (!fbcon_decor_active_nores(info, vc)) {
-+ vc_resize(vc, cols, rows);
-+ } else {
-+ fbcon_decor_disable(vc, 0);
-+ fbcon_decor_call_helper("modechange", vc->vc_num);
-+ }
-+
- updatescrollmode(p, info, vc);
- scrollback_max = 0;
- scrollback_current = 0;
-@@ -2954,7 +3079,9 @@ static void fbcon_set_all_vcs(struct fb_info *info)
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- cols /= vc->vc_font.width;
- rows /= vc->vc_font.height;
-- vc_resize(vc, cols, rows);
-+ if (!fbcon_decor_active_nores(info, vc)) {
-+ vc_resize(vc, cols, rows);
-+ }
- }
-
- if (fg != -1)
-@@ -3596,6 +3723,7 @@ static void fbcon_exit(void)
- }
- }
-
-+ fbcon_decor_exit();
- fbcon_has_exited = 1;
- }
-
-diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
-new file mode 100644
-index 0000000..babc8c5
---- /dev/null
-+++ b/drivers/video/console/fbcondecor.c
-@@ -0,0 +1,555 @@
-+/*
-+ * linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
-+ *
-+ * Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
-+ *
-+ * Code based upon "Bootsplash" (C) 2001-2003
-+ * Volker Poplawski <volker@poplawski.de>,
-+ * Stefan Reinauer <stepan@suse.de>,
-+ * Steffen Winterfeldt <snwint@suse.de>,
-+ * Michael Schroeder <mls@suse.de>,
-+ * Ken Wimer <wimer@suse.de>.
-+ *
-+ * Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License. See the file COPYING in the main directory of this archive for
-+ * more details.
-+ *
-+ */
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/fb.h>
-+#include <linux/vt_kern.h>
-+#include <linux/vmalloc.h>
-+#include <linux/unistd.h>
-+#include <linux/syscalls.h>
-+#include <linux/init.h>
-+#include <linux/proc_fs.h>
-+#include <linux/workqueue.h>
-+#include <linux/kmod.h>
-+#include <linux/miscdevice.h>
-+#include <linux/device.h>
-+#include <linux/fs.h>
-+#include <linux/compat.h>
-+#include <linux/console.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/irq.h>
-+
-+#include "fbcon.h"
-+#include "fbcondecor.h"
-+
-+extern signed char con2fb_map[];
-+static int fbcon_decor_enable(struct vc_data *vc);
-+char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
-+static int initialized = 0;
-+
-+int fbcon_decor_call_helper(char* cmd, unsigned short vc)
-+{
-+ char *envp[] = {
-+ "HOME=/",
-+ "PATH=/sbin:/bin",
-+ NULL
-+ };
-+
-+ char tfb[5];
-+ char tcons[5];
-+ unsigned char fb = (int) con2fb_map[vc];
-+
-+ char *argv[] = {
-+ fbcon_decor_path,
-+ "2",
-+ cmd,
-+ tcons,
-+ tfb,
-+ vc_cons[vc].d->vc_decor.theme,
-+ NULL
-+ };
-+
-+ snprintf(tfb,5,"%d",fb);
-+ snprintf(tcons,5,"%d",vc);
-+
-+ return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
-+}
-+
-+/* Disables fbcondecor on a virtual console; called with console sem held. */
-+int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
-+{
-+ struct fb_info* info;
-+
-+ if (!vc->vc_decor.state)
-+ return -EINVAL;
-+
-+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
-+
-+ if (info == NULL)
-+ return -EINVAL;
-+
-+ vc->vc_decor.state = 0;
-+ vc_resize(vc, info->var.xres / vc->vc_font.width,
-+ info->var.yres / vc->vc_font.height);
-+
-+ if (fg_console == vc->vc_num && redraw) {
-+ redraw_screen(vc, 0);
-+ update_region(vc, vc->vc_origin +
-+ vc->vc_size_row * vc->vc_top,
-+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
-+ }
-+
-+ printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
-+ vc->vc_num);
-+
-+ return 0;
-+}
-+
-+/* Enables fbcondecor on a virtual console; called with console sem held. */
-+static int fbcon_decor_enable(struct vc_data *vc)
-+{
-+ struct fb_info* info;
-+
-+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
-+
-+ if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
-+ info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
-+ vc->vc_num == fg_console))
-+ return -EINVAL;
-+
-+ vc->vc_decor.state = 1;
-+ vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
-+ vc->vc_decor.theight / vc->vc_font.height);
-+
-+ if (fg_console == vc->vc_num) {
-+ redraw_screen(vc, 0);
-+ update_region(vc, vc->vc_origin +
-+ vc->vc_size_row * vc->vc_top,
-+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
-+ fbcon_decor_clear_margins(vc, info, 0);
-+ }
-+
-+ printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
-+ vc->vc_num);
-+
-+ return 0;
-+}
-+
-+static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
-+{
-+ int ret;
-+
-+// if (origin == FBCON_DECOR_IO_ORIG_USER)
-+ console_lock();
-+ if (!state)
-+ ret = fbcon_decor_disable(vc, 1);
-+ else
-+ ret = fbcon_decor_enable(vc);
-+// if (origin == FBCON_DECOR_IO_ORIG_USER)
-+ console_unlock();
-+
-+ return ret;
-+}
-+
-+static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
-+{
-+ *state = vc->vc_decor.state;
-+}
-+
-+static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
-+{
-+ struct fb_info *info;
-+ int len;
-+ char *tmp;
-+
-+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
-+
-+ if (info == NULL || !cfg->twidth || !cfg->theight ||
-+ cfg->tx + cfg->twidth > info->var.xres ||
-+ cfg->ty + cfg->theight > info->var.yres)
-+ return -EINVAL;
-+
-+ len = strlen_user(cfg->theme);
-+ if (!len || len > FBCON_DECOR_THEME_LEN)
-+ return -EINVAL;
-+ tmp = kmalloc(len, GFP_KERNEL);
-+ if (!tmp)
-+ return -ENOMEM;
-+ if (copy_from_user(tmp, (void __user *)cfg->theme, len))
-+ return -EFAULT;
-+ cfg->theme = tmp;
-+ cfg->state = 0;
-+
-+ /* If this ioctl is a response to a request from kernel, the console sem
-+ * is already held; we also don't need to disable decor because either the
-+ * new config and background picture will be successfully loaded, and the
-+ * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
-+// if (origin == FBCON_DECOR_IO_ORIG_USER) {
-+ console_lock();
-+ if (vc->vc_decor.state)
-+ fbcon_decor_disable(vc, 1);
-+// }
-+
-+ if (vc->vc_decor.theme)
-+ kfree(vc->vc_decor.theme);
-+
-+ vc->vc_decor = *cfg;
-+
-+// if (origin == FBCON_DECOR_IO_ORIG_USER)
-+ console_unlock();
-+
-+ printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
-+ vc->vc_num, vc->vc_decor.theme);
-+ return 0;
-+}
-+
-+static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
-+{
-+ char __user *tmp;
-+
-+ tmp = decor->theme;
-+ *decor = vc->vc_decor;
-+ decor->theme = tmp;
-+
-+ if (vc->vc_decor.theme) {
-+ if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
-+ return -EFAULT;
-+ } else
-+ if (put_user(0, tmp))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
-+{
-+ struct fb_info *info;
-+ int len;
-+ u8 *tmp;
-+
-+ if (vc->vc_num != fg_console)
-+ return -EINVAL;
-+
-+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
-+
-+ if (info == NULL)
-+ return -EINVAL;
-+
-+ if (img->width != info->var.xres || img->height != info->var.yres) {
-+ printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
-+ printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
-+ return -EINVAL;
-+ }
-+
-+ if (img->depth != info->var.bits_per_pixel) {
-+ printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
-+ return -EINVAL;
-+ }
-+
-+ if (img->depth == 8) {
-+ if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
-+ !img->cmap.blue)
-+ return -EINVAL;
-+
-+ tmp = vmalloc(img->cmap.len * 3 * 2);
-+ if (!tmp)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp,
-+ (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
-+ copy_from_user(tmp + (img->cmap.len << 1),
-+ (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
-+ copy_from_user(tmp + (img->cmap.len << 2),
-+ (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
-+ vfree(tmp);
-+ return -EFAULT;
-+ }
-+
-+ img->cmap.transp = NULL;
-+ img->cmap.red = (u16*)tmp;
-+ img->cmap.green = img->cmap.red + img->cmap.len;
-+ img->cmap.blue = img->cmap.green + img->cmap.len;
-+ } else {
-+ img->cmap.red = NULL;
-+ }
-+
-+ len = ((img->depth + 7) >> 3) * img->width * img->height;
-+
-+ /*
-+ * Allocate an additional byte so that we never go outside of the
-+ * buffer boundaries in the rendering functions in a 24 bpp mode.
-+ */
-+ tmp = vmalloc(len + 1);
-+
-+ if (!tmp)
-+ goto out;
-+
-+ if (copy_from_user(tmp, (void __user*)img->data, len))
-+ goto out;
-+
-+ img->data = tmp;
-+
-+ /* If this ioctl is a response to a request from kernel, the console sem
-+ * is already held. */
-+// if (origin == FBCON_DECOR_IO_ORIG_USER)
-+ console_lock();
-+
-+ if (info->bgdecor.data)
-+ vfree((u8*)info->bgdecor.data);
-+ if (info->bgdecor.cmap.red)
-+ vfree(info->bgdecor.cmap.red);
-+
-+ info->bgdecor = *img;
-+
-+ if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
-+ redraw_screen(vc, 0);
-+ update_region(vc, vc->vc_origin +
-+ vc->vc_size_row * vc->vc_top,
-+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
-+ fbcon_decor_clear_margins(vc, info, 0);
-+ }
-+
-+// if (origin == FBCON_DECOR_IO_ORIG_USER)
-+ console_unlock();
-+
-+ return 0;
-+
-+out: if (img->cmap.red)
-+ vfree(img->cmap.red);
-+
-+ if (tmp)
-+ vfree(tmp);
-+ return -ENOMEM;
-+}
-+
-+static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
-+{
-+ struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
-+ struct vc_data *vc = NULL;
-+ unsigned short vc_num = 0;
-+ unsigned char origin = 0;
-+ void __user *data = NULL;
-+
-+ if (!access_ok(VERIFY_READ, wrapper,
-+ sizeof(struct fbcon_decor_iowrapper)))
-+ return -EFAULT;
-+
-+ __get_user(vc_num, &wrapper->vc);
-+ __get_user(origin, &wrapper->origin);
-+ __get_user(data, &wrapper->data);
-+
-+ if (!vc_cons_allocated(vc_num))
-+ return -EINVAL;
-+
-+ vc = vc_cons[vc_num].d;
-+
-+ switch (cmd) {
-+ case FBIOCONDECOR_SETPIC:
-+ {
-+ struct fb_image img;
-+ if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
-+ return -EFAULT;
-+
-+ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
-+ }
-+ case FBIOCONDECOR_SETCFG:
-+ {
-+ struct vc_decor cfg;
-+ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
-+ return -EFAULT;
-+
-+ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
-+ }
-+ case FBIOCONDECOR_GETCFG:
-+ {
-+ int rval;
-+ struct vc_decor cfg;
-+
-+ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
-+ return -EFAULT;
-+
-+ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
-+
-+ if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
-+ return -EFAULT;
-+ return rval;
-+ }
-+ case FBIOCONDECOR_SETSTATE:
-+ {
-+ unsigned int state = 0;
-+ if (get_user(state, (unsigned int __user *)data))
-+ return -EFAULT;
-+ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
-+ }
-+ case FBIOCONDECOR_GETSTATE:
-+ {
-+ unsigned int state = 0;
-+ fbcon_decor_ioctl_dogetstate(vc, &state);
-+ return put_user(state, (unsigned int __user *)data);
-+ }
-+
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+#ifdef CONFIG_COMPAT
-+
-+static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
-+
-+ struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
-+ struct vc_data *vc = NULL;
-+ unsigned short vc_num = 0;
-+ unsigned char origin = 0;
-+ compat_uptr_t data_compat = 0;
-+ void __user *data = NULL;
-+
-+ if (!access_ok(VERIFY_READ, wrapper,
-+ sizeof(struct fbcon_decor_iowrapper32)))
-+ return -EFAULT;
-+
-+ __get_user(vc_num, &wrapper->vc);
-+ __get_user(origin, &wrapper->origin);
-+ __get_user(data_compat, &wrapper->data);
-+ data = compat_ptr(data_compat);
-+
-+ if (!vc_cons_allocated(vc_num))
-+ return -EINVAL;
-+
-+ vc = vc_cons[vc_num].d;
-+
-+ switch (cmd) {
-+ case FBIOCONDECOR_SETPIC32:
-+ {
-+ struct fb_image32 img_compat;
-+ struct fb_image img;
-+
-+ if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
-+ return -EFAULT;
-+
-+ fb_image_from_compat(img, img_compat);
-+
-+ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
-+ }
-+
-+ case FBIOCONDECOR_SETCFG32:
-+ {
-+ struct vc_decor32 cfg_compat;
-+ struct vc_decor cfg;
-+
-+ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
-+ return -EFAULT;
-+
-+ vc_decor_from_compat(cfg, cfg_compat);
-+
-+ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
-+ }
-+
-+ case FBIOCONDECOR_GETCFG32:
-+ {
-+ int rval;
-+ struct vc_decor32 cfg_compat;
-+ struct vc_decor cfg;
-+
-+ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
-+ return -EFAULT;
-+ cfg.theme = compat_ptr(cfg_compat.theme);
-+
-+ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
-+
-+ vc_decor_to_compat(cfg_compat, cfg);
-+
-+ if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
-+ return -EFAULT;
-+ return rval;
-+ }
-+
-+ case FBIOCONDECOR_SETSTATE32:
-+ {
-+ compat_uint_t state_compat = 0;
-+ unsigned int state = 0;
-+
-+ if (get_user(state_compat, (compat_uint_t __user *)data))
-+ return -EFAULT;
-+
-+ state = (unsigned int)state_compat;
-+
-+ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
-+ }
-+
-+ case FBIOCONDECOR_GETSTATE32:
-+ {
-+ compat_uint_t state_compat = 0;
-+ unsigned int state = 0;
-+
-+ fbcon_decor_ioctl_dogetstate(vc, &state);
-+ state_compat = (compat_uint_t)state;
-+
-+ return put_user(state_compat, (compat_uint_t __user *)data);
-+ }
-+
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+#else
-+ #define fbcon_decor_compat_ioctl NULL
-+#endif
-+
-+static struct file_operations fbcon_decor_ops = {
-+ .owner = THIS_MODULE,
-+ .unlocked_ioctl = fbcon_decor_ioctl,
-+ .compat_ioctl = fbcon_decor_compat_ioctl
-+};
-+
-+static struct miscdevice fbcon_decor_dev = {
-+ .minor = MISC_DYNAMIC_MINOR,
-+ .name = "fbcondecor",
-+ .fops = &fbcon_decor_ops
-+};
-+
-+void fbcon_decor_reset(void)
-+{
-+ int i;
-+
-+ for (i = 0; i < num_registered_fb; i++) {
-+ registered_fb[i]->bgdecor.data = NULL;
-+ registered_fb[i]->bgdecor.cmap.red = NULL;
-+ }
-+
-+ for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
-+ vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
-+ vc_cons[i].d->vc_decor.theight = 0;
-+ vc_cons[i].d->vc_decor.theme = NULL;
-+ }
-+
-+ return;
-+}
-+
-+int fbcon_decor_init(void)
-+{
-+ int i;
-+
-+ fbcon_decor_reset();
-+
-+ if (initialized)
-+ return 0;
-+
-+ i = misc_register(&fbcon_decor_dev);
-+ if (i) {
-+ printk(KERN_ERR "fbcondecor: failed to register device\n");
-+ return i;
-+ }
-+
-+ fbcon_decor_call_helper("init", 0);
-+ initialized = 1;
-+ return 0;
-+}
-+
-+int fbcon_decor_exit(void)
-+{
-+ fbcon_decor_reset();
-+ return 0;
-+}
-+
-+EXPORT_SYMBOL(fbcon_decor_path);
-diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
-new file mode 100644
-index 0000000..3b3724b
---- /dev/null
-+++ b/drivers/video/console/fbcondecor.h
-@@ -0,0 +1,78 @@
-+/*
-+ * linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
-+ *
-+ * Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
-+ *
-+ */
-+
-+#ifndef __FBCON_DECOR_H
-+#define __FBCON_DECOR_H
-+
-+#ifndef _LINUX_FB_H
-+#include <linux/fb.h>
-+#endif
-+
-+/* This is needed for vc_cons in fbcmap.c */
-+#include <linux/vt_kern.h>
-+
-+struct fb_cursor;
-+struct fb_info;
-+struct vc_data;
-+
-+#ifdef CONFIG_FB_CON_DECOR
-+/* fbcondecor.c */
-+int fbcon_decor_init(void);
-+int fbcon_decor_exit(void);
-+int fbcon_decor_call_helper(char* cmd, unsigned short cons);
-+int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
-+
-+/* cfbcondecor.c */
-+void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
-+void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
-+void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
-+void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
-+void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
-+void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
-+void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
-+void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
-+
-+/* vt.c */
-+void acquire_console_sem(void);
-+void release_console_sem(void);
-+void do_unblank_screen(int entering_gfx);
-+
-+/* struct vc_data *y */
-+#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme)
-+
-+/* struct fb_info *x, struct vc_data *y */
-+#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
-+
-+/* struct fb_info *x, struct vc_data *y */
-+#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) && \
-+ x->bgdecor.width == x->var.xres && \
-+ x->bgdecor.height == x->var.yres && \
-+ x->bgdecor.depth == x->var.bits_per_pixel)
-+
-+
-+#else /* CONFIG_FB_CON_DECOR */
-+
-+static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
-+static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
-+static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
-+static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
-+static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
-+static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
-+static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
-+static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
-+static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
-+static inline int fbcon_decor_init(void) { return 0; }
-+static inline int fbcon_decor_exit(void) { return 0; }
-+static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
-+
-+#define fbcon_decor_active_vc(y) (0)
-+#define fbcon_decor_active_nores(x,y) (0)
-+#define fbcon_decor_active(x,y) (0)
-+
-+#endif /* CONFIG_FB_CON_DECOR */
-+
-+#endif /* __FBCON_DECOR_H */
-diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
-index e1f4727..2952e33 100644
---- a/drivers/video/fbdev/Kconfig
-+++ b/drivers/video/fbdev/Kconfig
-@@ -1204,7 +1204,6 @@ config FB_MATROX
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
-- select FB_TILEBLITTING
- select FB_MACMODES if PPC_PMAC
- ---help---
- Say Y here if you have a Matrox Millennium, Matrox Millennium II,
-diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
-index f89245b..05e036c 100644
---- a/drivers/video/fbdev/core/fbcmap.c
-+++ b/drivers/video/fbdev/core/fbcmap.c
-@@ -17,6 +17,8 @@
- #include <linux/slab.h>
- #include <linux/uaccess.h>
-
-+#include "../../console/fbcondecor.h"
-+
- static u16 red2[] __read_mostly = {
- 0x0000, 0xaaaa
- };
-@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
- if (transp)
- htransp = *transp++;
- if (info->fbops->fb_setcolreg(start++,
-- hred, hgreen, hblue,
-+ hred, hgreen, hblue,
- htransp, info))
- break;
- }
- }
-- if (rc == 0)
-+ if (rc == 0) {
- fb_copy_cmap(cmap, &info->cmap);
--
-+ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
-+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
-+ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
-+ }
- return rc;
- }
-
-diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
-index b6d5008..d6703f2 100644
---- a/drivers/video/fbdev/core/fbmem.c
-+++ b/drivers/video/fbdev/core/fbmem.c
-@@ -1250,15 +1250,6 @@ struct fb_fix_screeninfo32 {
- u16 reserved[3];
- };
-
--struct fb_cmap32 {
-- u32 start;
-- u32 len;
-- compat_caddr_t red;
-- compat_caddr_t green;
-- compat_caddr_t blue;
-- compat_caddr_t transp;
--};
--
- static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
- {
-diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
-new file mode 100644
-index 0000000..04b8d80
---- /dev/null
-+++ b/include/linux/console_decor.h
-@@ -0,0 +1,46 @@
-+#ifndef _LINUX_CONSOLE_DECOR_H_
-+#define _LINUX_CONSOLE_DECOR_H_ 1
-+
-+/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
-+struct vc_decor {
-+ __u8 bg_color; /* The color that is to be treated as transparent */
-+ __u8 state; /* Current decor state: 0 = off, 1 = on */
-+ __u16 tx, ty; /* Top left corner coordinates of the text field */
-+ __u16 twidth, theight; /* Width and height of the text field */
-+ char* theme;
-+};
-+
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+#include <linux/compat.h>
-+
-+struct vc_decor32 {
-+ __u8 bg_color; /* The color that is to be treated as transparent */
-+ __u8 state; /* Current decor state: 0 = off, 1 = on */
-+ __u16 tx, ty; /* Top left corner coordinates of the text field */
-+ __u16 twidth, theight; /* Width and height of the text field */
-+ compat_uptr_t theme;
-+};
-+
-+#define vc_decor_from_compat(to, from) \
-+ (to).bg_color = (from).bg_color; \
-+ (to).state = (from).state; \
-+ (to).tx = (from).tx; \
-+ (to).ty = (from).ty; \
-+ (to).twidth = (from).twidth; \
-+ (to).theight = (from).theight; \
-+ (to).theme = compat_ptr((from).theme)
-+
-+#define vc_decor_to_compat(to, from) \
-+ (to).bg_color = (from).bg_color; \
-+ (to).state = (from).state; \
-+ (to).tx = (from).tx; \
-+ (to).ty = (from).ty; \
-+ (to).twidth = (from).twidth; \
-+ (to).theight = (from).theight; \
-+ (to).theme = ptr_to_compat((from).theme)
-+
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
-+#endif
-diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
-index 7f0c329..98f5d60 100644
---- a/include/linux/console_struct.h
-+++ b/include/linux/console_struct.h
-@@ -19,6 +19,7 @@
- struct vt_struct;
-
- #define NPAR 16
-+#include <linux/console_decor.h>
-
- struct vc_data {
- struct tty_port port; /* Upper level data */
-@@ -107,6 +108,8 @@ struct vc_data {
- unsigned long vc_uni_pagedir;
- unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
- bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
-+
-+ struct vc_decor vc_decor;
- /* additional information is in vt_kern.h */
- };
-
-diff --git a/include/linux/fb.h b/include/linux/fb.h
-index fe6ac95..1e36b03 100644
---- a/include/linux/fb.h
-+++ b/include/linux/fb.h
-@@ -219,6 +219,34 @@ struct fb_deferred_io {
- };
- #endif
-
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+struct fb_image32 {
-+ __u32 dx; /* Where to place image */
-+ __u32 dy;
-+ __u32 width; /* Size of image */
-+ __u32 height;
-+ __u32 fg_color; /* Only used when a mono bitmap */
-+ __u32 bg_color;
-+ __u8 depth; /* Depth of the image */
-+ const compat_uptr_t data; /* Pointer to image data */
-+ struct fb_cmap32 cmap; /* color map info */
-+};
-+
-+#define fb_image_from_compat(to, from) \
-+ (to).dx = (from).dx; \
-+ (to).dy = (from).dy; \
-+ (to).width = (from).width; \
-+ (to).height = (from).height; \
-+ (to).fg_color = (from).fg_color; \
-+ (to).bg_color = (from).bg_color; \
-+ (to).depth = (from).depth; \
-+ (to).data = compat_ptr((from).data); \
-+ fb_cmap_from_compat((to).cmap, (from).cmap)
-+
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
- /*
- * Frame buffer operations
- *
-@@ -489,6 +517,9 @@ struct fb_info {
- #define FBINFO_STATE_SUSPENDED 1
- u32 state; /* Hardware state i.e suspend */
- void *fbcon_par; /* fbcon use-only private area */
-+
-+ struct fb_image bgdecor;
-+
- /* From here on everything is device dependent */
- void *par;
- /* we need the PCI or similar aperture base/size not
-diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
-index fb795c3..dc77a03 100644
---- a/include/uapi/linux/fb.h
-+++ b/include/uapi/linux/fb.h
-@@ -8,6 +8,25 @@
-
- #define FB_MAX 32 /* sufficient for now */
-
-+struct fbcon_decor_iowrapper
-+{
-+ unsigned short vc; /* Virtual console */
-+ unsigned char origin; /* Point of origin of the request */
-+ void *data;
-+};
-+
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+#include <linux/compat.h>
-+struct fbcon_decor_iowrapper32
-+{
-+ unsigned short vc; /* Virtual console */
-+ unsigned char origin; /* Point of origin of the request */
-+ compat_uptr_t data;
-+};
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
- /* ioctls
- 0x46 is 'F' */
- #define FBIOGET_VSCREENINFO 0x4600
-@@ -35,6 +54,25 @@
- #define FBIOGET_DISPINFO 0x4618
- #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
-
-+#define FBIOCONDECOR_SETCFG _IOWR('F', 0x19, struct fbcon_decor_iowrapper)
-+#define FBIOCONDECOR_GETCFG _IOR('F', 0x1A, struct fbcon_decor_iowrapper)
-+#define FBIOCONDECOR_SETSTATE _IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
-+#define FBIOCONDECOR_GETSTATE _IOR('F', 0x1C, struct fbcon_decor_iowrapper)
-+#define FBIOCONDECOR_SETPIC _IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+#define FBIOCONDECOR_SETCFG32 _IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
-+#define FBIOCONDECOR_GETCFG32 _IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
-+#define FBIOCONDECOR_SETSTATE32 _IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
-+#define FBIOCONDECOR_GETSTATE32 _IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
-+#define FBIOCONDECOR_SETPIC32 _IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
-+#define FBCON_DECOR_THEME_LEN 128 /* Maximum lenght of a theme name */
-+#define FBCON_DECOR_IO_ORIG_KERNEL 0 /* Kernel ioctl origin */
-+#define FBCON_DECOR_IO_ORIG_USER 1 /* User ioctl origin */
-+
- #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */
- #define FB_TYPE_PLANES 1 /* Non interleaved planes */
- #define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */
-@@ -277,6 +315,29 @@ struct fb_var_screeninfo {
- __u32 reserved[4]; /* Reserved for future compatibility */
- };
-
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+struct fb_cmap32 {
-+ __u32 start;
-+ __u32 len; /* Number of entries */
-+ compat_uptr_t red; /* Red values */
-+ compat_uptr_t green;
-+ compat_uptr_t blue;
-+ compat_uptr_t transp; /* transparency, can be NULL */
-+};
-+
-+#define fb_cmap_from_compat(to, from) \
-+ (to).start = (from).start; \
-+ (to).len = (from).len; \
-+ (to).red = compat_ptr((from).red); \
-+ (to).green = compat_ptr((from).green); \
-+ (to).blue = compat_ptr((from).blue); \
-+ (to).transp = compat_ptr((from).transp)
-+
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
-+
- struct fb_cmap {
- __u32 start; /* First entry */
- __u32 len; /* Number of entries */
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 74f5b58..6386ab0 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -146,6 +146,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
- static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
- #endif
-
-+#ifdef CONFIG_FB_CON_DECOR
-+extern char fbcon_decor_path[];
-+#endif
-+
- #ifdef CONFIG_INOTIFY_USER
- #include <linux/inotify.h>
- #endif
-@@ -255,6 +259,15 @@ static struct ctl_table sysctl_base_table[] = {
- .mode = 0555,
- .child = dev_table,
- },
-+#ifdef CONFIG_FB_CON_DECOR
-+ {
-+ .procname = "fbcondecor",
-+ .data = &fbcon_decor_path,
-+ .maxlen = KMOD_PATH_LEN,
-+ .mode = 0644,
-+ .proc_handler = &proc_dostring,
-+ },
-+#endif
- { }
- };
-
diff --git a/5000_enable-additional-cpu-optimizations-for-gcc.patch b/5000_enable-additional-cpu-optimizations-for-gcc.patch
deleted file mode 100644
index f7ab6f0f..00000000
--- a/5000_enable-additional-cpu-optimizations-for-gcc.patch
+++ /dev/null
@@ -1,327 +0,0 @@
-This patch has been tested on and known to work with kernel versions from 3.2
-up to the latest git version (pulled on 12/14/2013).
-
-This patch will expand the number of microarchitectures to include new
-processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
-14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
-Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 2nd Gen Core
-i3/i5/i7 (Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), and Intel 4th
-Gen Core i3/i5/i7 (Haswell). It also offers the compiler the 'native' flag.
-
-Small but real speed increases are measurable using a make endpoint comparing
-a generic kernel to one built with one of the respective microarchs.
-
-See the following experimental evidence supporting this statement:
-https://github.com/graysky2/kernel_gcc_patch
-
-REQUIREMENTS
-linux version >=3.15
-gcc version <4.9
-
----
-diff -uprN a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
---- a/arch/x86/include/asm/module.h 2013-11-03 18:41:51.000000000 -0500
-+++ b/arch/x86/include/asm/module.h 2013-12-15 06:21:24.351122516 -0500
-@@ -15,6 +15,16 @@
- #define MODULE_PROC_FAMILY "586MMX "
- #elif defined CONFIG_MCORE2
- #define MODULE_PROC_FAMILY "CORE2 "
-+#elif defined CONFIG_MNATIVE
-+#define MODULE_PROC_FAMILY "NATIVE "
-+#elif defined CONFIG_MCOREI7
-+#define MODULE_PROC_FAMILY "COREI7 "
-+#elif defined CONFIG_MCOREI7AVX
-+#define MODULE_PROC_FAMILY "COREI7AVX "
-+#elif defined CONFIG_MCOREAVXI
-+#define MODULE_PROC_FAMILY "COREAVXI "
-+#elif defined CONFIG_MCOREAVX2
-+#define MODULE_PROC_FAMILY "COREAVX2 "
- #elif defined CONFIG_MATOM
- #define MODULE_PROC_FAMILY "ATOM "
- #elif defined CONFIG_M686
-@@ -33,6 +43,18 @@
- #define MODULE_PROC_FAMILY "K7 "
- #elif defined CONFIG_MK8
- #define MODULE_PROC_FAMILY "K8 "
-+#elif defined CONFIG_MK10
-+#define MODULE_PROC_FAMILY "K10 "
-+#elif defined CONFIG_MBARCELONA
-+#define MODULE_PROC_FAMILY "BARCELONA "
-+#elif defined CONFIG_MBOBCAT
-+#define MODULE_PROC_FAMILY "BOBCAT "
-+#elif defined CONFIG_MBULLDOZER
-+#define MODULE_PROC_FAMILY "BULLDOZER "
-+#elif defined CONFIG_MPILEDRIVER
-+#define MODULE_PROC_FAMILY "PILEDRIVER "
-+#elif defined CONFIG_MJAGUAR
-+#define MODULE_PROC_FAMILY "JAGUAR "
- #elif defined CONFIG_MELAN
- #define MODULE_PROC_FAMILY "ELAN "
- #elif defined CONFIG_MCRUSOE
-diff -uprN a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
---- a/arch/x86/Kconfig.cpu 2013-11-03 18:41:51.000000000 -0500
-+++ b/arch/x86/Kconfig.cpu 2013-12-15 06:21:24.351122516 -0500
-@@ -139,7 +139,7 @@ config MPENTIUM4
-
-
- config MK6
-- bool "K6/K6-II/K6-III"
-+ bool "AMD K6/K6-II/K6-III"
- depends on X86_32
- ---help---
- Select this for an AMD K6-family processor. Enables use of
-@@ -147,7 +147,7 @@ config MK6
- flags to GCC.
-
- config MK7
-- bool "Athlon/Duron/K7"
-+ bool "AMD Athlon/Duron/K7"
- depends on X86_32
- ---help---
- Select this for an AMD Athlon K7-family processor. Enables use of
-@@ -155,12 +155,55 @@ config MK7
- flags to GCC.
-
- config MK8
-- bool "Opteron/Athlon64/Hammer/K8"
-+ bool "AMD Opteron/Athlon64/Hammer/K8"
- ---help---
- Select this for an AMD Opteron or Athlon64 Hammer-family processor.
- Enables use of some extended instructions, and passes appropriate
- optimization flags to GCC.
-
-+config MK10
-+ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
-+ ---help---
-+ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
-+ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
-+ Enables use of some extended instructions, and passes appropriate
-+ optimization flags to GCC.
-+
-+config MBARCELONA
-+ bool "AMD Barcelona"
-+ ---help---
-+ Select this for AMD Barcelona and newer processors.
-+
-+ Enables -march=barcelona
-+
-+config MBOBCAT
-+ bool "AMD Bobcat"
-+ ---help---
-+ Select this for AMD Bobcat processors.
-+
-+ Enables -march=btver1
-+
-+config MBULLDOZER
-+ bool "AMD Bulldozer"
-+ ---help---
-+ Select this for AMD Bulldozer processors.
-+
-+ Enables -march=bdver1
-+
-+config MPILEDRIVER
-+ bool "AMD Piledriver"
-+ ---help---
-+ Select this for AMD Piledriver processors.
-+
-+ Enables -march=bdver2
-+
-+config MJAGUAR
-+ bool "AMD Jaguar"
-+ ---help---
-+ Select this for AMD Jaguar processors.
-+
-+ Enables -march=btver2
-+
- config MCRUSOE
- bool "Crusoe"
- depends on X86_32
-@@ -251,8 +294,17 @@ config MPSC
- using the cpu family field
- in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
-
-+config MATOM
-+ bool "Intel Atom"
-+ ---help---
-+
-+ Select this for the Intel Atom platform. Intel Atom CPUs have an
-+ in-order pipelining architecture and thus can benefit from
-+ accordingly optimized code. Use a recent GCC with specific Atom
-+ support in order to fully benefit from selecting this option.
-+
- config MCORE2
-- bool "Core 2/newer Xeon"
-+ bool "Intel Core 2"
- ---help---
-
- Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -260,14 +312,40 @@ config MCORE2
- family in /proc/cpuinfo. Newer ones have 6 and older ones 15
- (not a typo)
-
--config MATOM
-- bool "Intel Atom"
-+ Enables -march=core2
-+
-+config MCOREI7
-+ bool "Intel Core i7"
- ---help---
-
-- Select this for the Intel Atom platform. Intel Atom CPUs have an
-- in-order pipelining architecture and thus can benefit from
-- accordingly optimized code. Use a recent GCC with specific Atom
-- support in order to fully benefit from selecting this option.
-+ Select this for the Intel Nehalem platform. Intel Nehalem proecessors
-+ include Core i3, i5, i7, Xeon: 34xx, 35xx, 55xx, 56xx, 75xx processors.
-+
-+ Enables -march=corei7
-+
-+config MCOREI7AVX
-+ bool "Intel Core 2nd Gen AVX"
-+ ---help---
-+
-+ Select this for 2nd Gen Core processors including Sandy Bridge.
-+
-+ Enables -march=corei7-avx
-+
-+config MCOREAVXI
-+ bool "Intel Core 3rd Gen AVX"
-+ ---help---
-+
-+ Select this for 3rd Gen Core processors including Ivy Bridge.
-+
-+ Enables -march=core-avx-i
-+
-+config MCOREAVX2
-+ bool "Intel Core AVX2"
-+ ---help---
-+
-+ Select this for AVX2 enabled processors including Haswell.
-+
-+ Enables -march=core-avx2
-
- config GENERIC_CPU
- bool "Generic-x86-64"
-@@ -276,6 +354,19 @@ config GENERIC_CPU
- Generic x86-64 CPU.
- Run equally well on all x86-64 CPUs.
-
-+config MNATIVE
-+ bool "Native optimizations autodetected by GCC"
-+ ---help---
-+
-+ GCC 4.2 and above support -march=native, which automatically detects
-+ the optimum settings to use based on your processor. -march=native
-+ also detects and applies additional settings beyond -march specific
-+ to your CPU, (eg. -msse4). Unless you have a specific reason not to
-+ (e.g. distcc cross-compiling), you should probably be using
-+ -march=native rather than anything listed below.
-+
-+ Enables -march=native
-+
- endchoice
-
- config X86_GENERIC
-@@ -300,7 +391,7 @@ config X86_INTERNODE_CACHE_SHIFT
- config X86_L1_CACHE_SHIFT
- int
- default "7" if MPENTIUM4 || MPSC
-- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+ default "6" if MK7 || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MVIAC7 || X86_GENERIC || MNATIVE || GENERIC_CPU
- default "4" if MELAN || M486 || MGEODEGX1
- default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-
-@@ -331,11 +422,11 @@ config X86_ALIGNMENT_16
-
- config X86_INTEL_USERCOPY
- def_bool y
-- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || MNATIVE || X86_GENERIC || MK8 || MK7 || MK10 || MBARCELONA || MEFFICEON || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2
-
- config X86_USE_PPRO_CHECKSUM
- def_bool y
-- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MNATIVE
-
- config X86_USE_3DNOW
- def_bool y
-@@ -363,17 +454,17 @@ config X86_P6_NOP
-
- config X86_TSC
- def_bool y
-- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7-AVX || MATOM) || X86_64 || MNATIVE
-
- config X86_CMPXCHG64
- def_bool y
-- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
-+ depends on X86_PAE || X86_64 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
-
- # this should be set for all -march=.. options where the compiler
- # generates cmov.
- config X86_CMOV
- def_bool y
-- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+ depends on (MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
-
- config X86_MINIMUM_CPU_FAMILY
- int
-diff -uprN a/arch/x86/Makefile b/arch/x86/Makefile
---- a/arch/x86/Makefile 2013-11-03 18:41:51.000000000 -0500
-+++ b/arch/x86/Makefile 2013-12-15 06:21:24.354455723 -0500
-@@ -61,11 +61,26 @@ else
- KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
-
- # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-+ cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
- cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-+ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
-+ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
-+ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
-+ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
-+ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
-+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
- cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
-
- cflags-$(CONFIG_MCORE2) += \
-- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
-+ $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
-+ cflags-$(CONFIG_MCOREI7) += \
-+ $(call cc-option,-march=corei7,$(call cc-option,-mtune=corei7))
-+ cflags-$(CONFIG_MCOREI7AVX) += \
-+ $(call cc-option,-march=corei7-avx,$(call cc-option,-mtune=corei7-avx))
-+ cflags-$(CONFIG_MCOREAVXI) += \
-+ $(call cc-option,-march=core-avx-i,$(call cc-option,-mtune=core-avx-i))
-+ cflags-$(CONFIG_MCOREAVX2) += \
-+ $(call cc-option,-march=core-avx2,$(call cc-option,-mtune=core-avx2))
- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
- cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
-diff -uprN a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
---- a/arch/x86/Makefile_32.cpu 2013-11-03 18:41:51.000000000 -0500
-+++ b/arch/x86/Makefile_32.cpu 2013-12-15 06:21:24.354455723 -0500
-@@ -23,7 +23,14 @@ cflags-$(CONFIG_MK6) += -march=k6
- # Please note, that patches that add -march=athlon-xp and friends are pointless.
- # They make zero difference whatsosever to performance at this time.
- cflags-$(CONFIG_MK7) += -march=athlon
-+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
- cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
-+cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon)
-+cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
-+cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon)
-+cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
-+cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon)
-+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon)
- cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
- cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
- cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -32,6 +39,10 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-
- cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
- cflags-$(CONFIG_MVIAC7) += -march=i686
- cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
-+cflags-$(CONFIG_MCOREI7) += -march=i686 $(call tune,corei7)
-+cflags-$(CONFIG_MCOREI7AVX) += -march=i686 $(call tune,corei7-avx)
-+cflags-$(CONFIG_MCOREAVXI) += -march=i686 $(call tune,core-avx-i)
-+cflags-$(CONFIG_MCOREAVX2) += -march=i686 $(call tune,core-avx2)
- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
diff --git a/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch b/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch
deleted file mode 100644
index 468d1573..00000000
--- a/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From 63e26848e2df36a3c29d2d38ce8b008539d64a5d Mon Sep 17 00:00:00 2001
-From: Paolo Valente <paolo.valente@unimore.it>
-Date: Tue, 7 Apr 2015 13:39:12 +0200
-Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v7r7-4.0
-
-Update Kconfig.iosched and do the related Makefile changes to include
-kernel configuration options for BFQ. Also add the bfqio controller
-to the cgroups subsystem.
-
-Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
-Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
----
- block/Kconfig.iosched | 32 ++++++++++++++++++++++++++++++++
- block/Makefile | 1 +
- include/linux/cgroup_subsys.h | 4 ++++
- 3 files changed, 37 insertions(+)
-
-diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
-index 421bef9..0ee5f0f 100644
---- a/block/Kconfig.iosched
-+++ b/block/Kconfig.iosched
-@@ -39,6 +39,27 @@ config CFQ_GROUP_IOSCHED
- ---help---
- Enable group IO scheduling in CFQ.
-
-+config IOSCHED_BFQ
-+ tristate "BFQ I/O scheduler"
-+ default n
-+ ---help---
-+ The BFQ I/O scheduler tries to distribute bandwidth among
-+ all processes according to their weights.
-+ It aims at distributing the bandwidth as desired, independently of
-+ the disk parameters and with any workload. It also tries to
-+ guarantee low latency to interactive and soft real-time
-+ applications. If compiled built-in (saying Y here), BFQ can
-+ be configured to support hierarchical scheduling.
-+
-+config CGROUP_BFQIO
-+ bool "BFQ hierarchical scheduling support"
-+ depends on CGROUPS && IOSCHED_BFQ=y
-+ default n
-+ ---help---
-+ Enable hierarchical scheduling in BFQ, using the cgroups
-+ filesystem interface. The name of the subsystem will be
-+ bfqio.
-+
- choice
- prompt "Default I/O scheduler"
- default DEFAULT_CFQ
-@@ -52,6 +73,16 @@ choice
- config DEFAULT_CFQ
- bool "CFQ" if IOSCHED_CFQ=y
-
-+ config DEFAULT_BFQ
-+ bool "BFQ" if IOSCHED_BFQ=y
-+ help
-+ Selects BFQ as the default I/O scheduler which will be
-+ used by default for all block devices.
-+ The BFQ I/O scheduler aims at distributing the bandwidth
-+ as desired, independently of the disk parameters and with
-+ any workload. It also tries to guarantee low latency to
-+ interactive and soft real-time applications.
-+
- config DEFAULT_NOOP
- bool "No-op"
-
-@@ -61,6 +92,7 @@ config DEFAULT_IOSCHED
- string
- default "deadline" if DEFAULT_DEADLINE
- default "cfq" if DEFAULT_CFQ
-+ default "bfq" if DEFAULT_BFQ
- default "noop" if DEFAULT_NOOP
-
- endmenu
-diff --git a/block/Makefile b/block/Makefile
-index 00ecc97..1ed86d5 100644
---- a/block/Makefile
-+++ b/block/Makefile
-@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
- obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
- obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
- obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
-+obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
-
- obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
- obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
-diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
-index e4a96fb..267d681 100644
---- a/include/linux/cgroup_subsys.h
-+++ b/include/linux/cgroup_subsys.h
-@@ -35,6 +35,10 @@ SUBSYS(freezer)
- SUBSYS(net_cls)
- #endif
-
-+#if IS_ENABLED(CONFIG_CGROUP_BFQIO)
-+SUBSYS(bfqio)
-+#endif
-+
- #if IS_ENABLED(CONFIG_CGROUP_PERF)
- SUBSYS(perf_event)
- #endif
---
-2.1.0
-
diff --git a/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch1 b/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch1
deleted file mode 100644
index a6cfc585..00000000
--- a/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch1
+++ /dev/null
@@ -1,6966 +0,0 @@
-From 8cdf2dae6ee87049c7bb086d34e2ce981b545813 Mon Sep 17 00:00:00 2001
-From: Paolo Valente <paolo.valente@unimore.it>
-Date: Thu, 9 May 2013 19:10:02 +0200
-Subject: [PATCH 2/3] block: introduce the BFQ-v7r7 I/O sched for 4.0
-
-Add the BFQ-v7r7 I/O scheduler to 4.0.
-The general structure is borrowed from CFQ, as much of the code for
-handling I/O contexts. Over time, several useful features have been
-ported from CFQ as well (details in the changelog in README.BFQ). A
-(bfq_)queue is associated to each task doing I/O on a device, and each
-time a scheduling decision has to be made a queue is selected and served
-until it expires.
-
- - Slices are given in the service domain: tasks are assigned
- budgets, measured in number of sectors. Once got the disk, a task
- must however consume its assigned budget within a configurable
- maximum time (by default, the maximum possible value of the
- budgets is automatically computed to comply with this timeout).
- This allows the desired latency vs "throughput boosting" tradeoff
- to be set.
-
- - Budgets are scheduled according to a variant of WF2Q+, implemented
- using an augmented rb-tree to take eligibility into account while
- preserving an O(log N) overall complexity.
-
- - A low-latency tunable is provided; if enabled, both interactive
- and soft real-time applications are guaranteed a very low latency.
-
- - Latency guarantees are preserved also in the presence of NCQ.
-
- - Also with flash-based devices, a high throughput is achieved
- while still preserving latency guarantees.
-
- - BFQ features Early Queue Merge (EQM), a sort of fusion of the
- cooperating-queue-merging and the preemption mechanisms present
- in CFQ. EQM is in fact a unified mechanism that tries to get a
- sequential read pattern, and hence a high throughput, with any
- set of processes performing interleaved I/O over a contiguous
- sequence of sectors.
-
- - BFQ supports full hierarchical scheduling, exporting a cgroups
- interface. Since each node has a full scheduler, each group can
- be assigned its own weight.
-
- - If the cgroups interface is not used, only I/O priorities can be
- assigned to processes, with ioprio values mapped to weights
- with the relation weight = IOPRIO_BE_NR - ioprio.
-
- - ioprio classes are served in strict priority order, i.e., lower
- priority queues are not served as long as there are higher
- priority queues. Among queues in the same class the bandwidth is
- distributed in proportion to the weight of each queue. A very
- thin extra bandwidth is however guaranteed to the Idle class, to
- prevent it from starving.
-
-Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
-Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
----
- block/bfq-cgroup.c | 936 ++++++++++++
- block/bfq-ioc.c | 36 +
- block/bfq-iosched.c | 3902 +++++++++++++++++++++++++++++++++++++++++++++++++++
- block/bfq-sched.c | 1214 ++++++++++++++++
- block/bfq.h | 775 ++++++++++
- 5 files changed, 6863 insertions(+)
- create mode 100644 block/bfq-cgroup.c
- create mode 100644 block/bfq-ioc.c
- create mode 100644 block/bfq-iosched.c
- create mode 100644 block/bfq-sched.c
- create mode 100644 block/bfq.h
-
-diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
-new file mode 100644
-index 0000000..11e2f1d
---- /dev/null
-+++ b/block/bfq-cgroup.c
-@@ -0,0 +1,936 @@
-+/*
-+ * BFQ: CGROUPS support.
-+ *
-+ * Based on ideas and code from CFQ:
-+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
-+ *
-+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
-+ * Paolo Valente <paolo.valente@unimore.it>
-+ *
-+ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
-+ *
-+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
-+ * file.
-+ */
-+
-+#ifdef CONFIG_CGROUP_BFQIO
-+
-+static DEFINE_MUTEX(bfqio_mutex);
-+
-+static bool bfqio_is_removed(struct bfqio_cgroup *bgrp)
-+{
-+ return bgrp ? !bgrp->online : false;
-+}
-+
-+static struct bfqio_cgroup bfqio_root_cgroup = {
-+ .weight = BFQ_DEFAULT_GRP_WEIGHT,
-+ .ioprio = BFQ_DEFAULT_GRP_IOPRIO,
-+ .ioprio_class = BFQ_DEFAULT_GRP_CLASS,
-+};
-+
-+static inline void bfq_init_entity(struct bfq_entity *entity,
-+ struct bfq_group *bfqg)
-+{
-+ entity->weight = entity->new_weight;
-+ entity->orig_weight = entity->new_weight;
-+ entity->ioprio = entity->new_ioprio;
-+ entity->ioprio_class = entity->new_ioprio_class;
-+ entity->parent = bfqg->my_entity;
-+ entity->sched_data = &bfqg->sched_data;
-+}
-+
-+static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css)
-+{
-+ return css ? container_of(css, struct bfqio_cgroup, css) : NULL;
-+}
-+
-+/*
-+ * Search the bfq_group for bfqd into the hash table (by now only a list)
-+ * of bgrp. Must be called under rcu_read_lock().
-+ */
-+static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
-+ struct bfq_data *bfqd)
-+{
-+ struct bfq_group *bfqg;
-+ void *key;
-+
-+ hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) {
-+ key = rcu_dereference(bfqg->bfqd);
-+ if (key == bfqd)
-+ return bfqg;
-+ }
-+
-+ return NULL;
-+}
-+
-+static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
-+ struct bfq_group *bfqg)
-+{
-+ struct bfq_entity *entity = &bfqg->entity;
-+
-+ /*
-+ * If the weight of the entity has never been set via the sysfs
-+ * interface, then bgrp->weight == 0. In this case we initialize
-+ * the weight from the current ioprio value. Otherwise, the group
-+ * weight, if set, has priority over the ioprio value.
-+ */
-+ if (bgrp->weight == 0) {
-+ entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio);
-+ entity->new_ioprio = bgrp->ioprio;
-+ } else {
-+ if (bgrp->weight < BFQ_MIN_WEIGHT ||
-+ bgrp->weight > BFQ_MAX_WEIGHT) {
-+ printk(KERN_CRIT "bfq_group_init_entity: "
-+ "bgrp->weight %d\n", bgrp->weight);
-+ BUG();
-+ }
-+ entity->new_weight = bgrp->weight;
-+ entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight);
-+ }
-+ entity->orig_weight = entity->weight = entity->new_weight;
-+ entity->ioprio = entity->new_ioprio;
-+ entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
-+ entity->my_sched_data = &bfqg->sched_data;
-+ bfqg->active_entities = 0;
-+}
-+
-+static inline void bfq_group_set_parent(struct bfq_group *bfqg,
-+ struct bfq_group *parent)
-+{
-+ struct bfq_entity *entity;
-+
-+ BUG_ON(parent == NULL);
-+ BUG_ON(bfqg == NULL);
-+
-+ entity = &bfqg->entity;
-+ entity->parent = parent->my_entity;
-+ entity->sched_data = &parent->sched_data;
-+}
-+
-+/**
-+ * bfq_group_chain_alloc - allocate a chain of groups.
-+ * @bfqd: queue descriptor.
-+ * @css: the leaf cgroup_subsys_state this chain starts from.
-+ *
-+ * Allocate a chain of groups starting from the one belonging to
-+ * @cgroup up to the root cgroup. Stop if a cgroup on the chain
-+ * to the root has already an allocated group on @bfqd.
-+ */
-+static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
-+ struct cgroup_subsys_state *css)
-+{
-+ struct bfqio_cgroup *bgrp;
-+ struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
-+
-+ for (; css != NULL; css = css->parent) {
-+ bgrp = css_to_bfqio(css);
-+
-+ bfqg = bfqio_lookup_group(bgrp, bfqd);
-+ if (bfqg != NULL) {
-+ /*
-+ * All the cgroups in the path from there to the
-+ * root must have a bfq_group for bfqd, so we don't
-+ * need any more allocations.
-+ */
-+ break;
-+ }
-+
-+ bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
-+ if (bfqg == NULL)
-+ goto cleanup;
-+
-+ bfq_group_init_entity(bgrp, bfqg);
-+ bfqg->my_entity = &bfqg->entity;
-+
-+ if (leaf == NULL) {
-+ leaf = bfqg;
-+ prev = leaf;
-+ } else {
-+ bfq_group_set_parent(prev, bfqg);
-+ /*
-+ * Build a list of allocated nodes using the bfqd
-+ * filed, that is still unused and will be
-+ * initialized only after the node will be
-+ * connected.
-+ */
-+ prev->bfqd = bfqg;
-+ prev = bfqg;
-+ }
-+ }
-+
-+ return leaf;
-+
-+cleanup:
-+ while (leaf != NULL) {
-+ prev = leaf;
-+ leaf = leaf->bfqd;
-+ kfree(prev);
-+ }
-+
-+ return NULL;
-+}
-+
-+/**
-+ * bfq_group_chain_link - link an allocated group chain to a cgroup
-+ * hierarchy.
-+ * @bfqd: the queue descriptor.
-+ * @css: the leaf cgroup_subsys_state to start from.
-+ * @leaf: the leaf group (to be associated to @cgroup).
-+ *
-+ * Try to link a chain of groups to a cgroup hierarchy, connecting the
-+ * nodes bottom-up, so we can be sure that when we find a cgroup in the
-+ * hierarchy that already as a group associated to @bfqd all the nodes
-+ * in the path to the root cgroup have one too.
-+ *
-+ * On locking: the queue lock protects the hierarchy (there is a hierarchy
-+ * per device) while the bfqio_cgroup lock protects the list of groups
-+ * belonging to the same cgroup.
-+ */
-+static void bfq_group_chain_link(struct bfq_data *bfqd,
-+ struct cgroup_subsys_state *css,
-+ struct bfq_group *leaf)
-+{
-+ struct bfqio_cgroup *bgrp;
-+ struct bfq_group *bfqg, *next, *prev = NULL;
-+ unsigned long flags;
-+
-+ assert_spin_locked(bfqd->queue->queue_lock);
-+
-+ for (; css != NULL && leaf != NULL; css = css->parent) {
-+ bgrp = css_to_bfqio(css);
-+ next = leaf->bfqd;
-+
-+ bfqg = bfqio_lookup_group(bgrp, bfqd);
-+ BUG_ON(bfqg != NULL);
-+
-+ spin_lock_irqsave(&bgrp->lock, flags);
-+
-+ rcu_assign_pointer(leaf->bfqd, bfqd);
-+ hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
-+ hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
-+
-+ spin_unlock_irqrestore(&bgrp->lock, flags);
-+
-+ prev = leaf;
-+ leaf = next;
-+ }
-+
-+ BUG_ON(css == NULL && leaf != NULL);
-+ if (css != NULL && prev != NULL) {
-+ bgrp = css_to_bfqio(css);
-+ bfqg = bfqio_lookup_group(bgrp, bfqd);
-+ bfq_group_set_parent(prev, bfqg);
-+ }
-+}
-+
-+/**
-+ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
-+ * @bfqd: queue descriptor.
-+ * @cgroup: cgroup being searched for.
-+ *
-+ * Return a group associated to @bfqd in @cgroup, allocating one if
-+ * necessary. When a group is returned all the cgroups in the path
-+ * to the root have a group associated to @bfqd.
-+ *
-+ * If the allocation fails, return the root group: this breaks guarantees
-+ * but is a safe fallback. If this loss becomes a problem it can be
-+ * mitigated using the equivalent weight (given by the product of the
-+ * weights of the groups in the path from @group to the root) in the
-+ * root scheduler.
-+ *
-+ * We allocate all the missing nodes in the path from the leaf cgroup
-+ * to the root and we connect the nodes only after all the allocations
-+ * have been successful.
-+ */
-+static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
-+ struct cgroup_subsys_state *css)
-+{
-+ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
-+ struct bfq_group *bfqg;
-+
-+ bfqg = bfqio_lookup_group(bgrp, bfqd);
-+ if (bfqg != NULL)
-+ return bfqg;
-+
-+ bfqg = bfq_group_chain_alloc(bfqd, css);
-+ if (bfqg != NULL)
-+ bfq_group_chain_link(bfqd, css, bfqg);
-+ else
-+ bfqg = bfqd->root_group;
-+
-+ return bfqg;
-+}
-+
-+/**
-+ * bfq_bfqq_move - migrate @bfqq to @bfqg.
-+ * @bfqd: queue descriptor.
-+ * @bfqq: the queue to move.
-+ * @entity: @bfqq's entity.
-+ * @bfqg: the group to move to.
-+ *
-+ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
-+ * it on the new one. Avoid putting the entity on the old group idle tree.
-+ *
-+ * Must be called under the queue lock; the cgroup owning @bfqg must
-+ * not disappear (by now this just means that we are called under
-+ * rcu_read_lock()).
-+ */
-+static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+ struct bfq_entity *entity, struct bfq_group *bfqg)
-+{
-+ int busy, resume;
-+
-+ busy = bfq_bfqq_busy(bfqq);
-+ resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
-+
-+ BUG_ON(resume && !entity->on_st);
-+ BUG_ON(busy && !resume && entity->on_st &&
-+ bfqq != bfqd->in_service_queue);
-+
-+ if (busy) {
-+ BUG_ON(atomic_read(&bfqq->ref) < 2);
-+
-+ if (!resume)
-+ bfq_del_bfqq_busy(bfqd, bfqq, 0);
-+ else
-+ bfq_deactivate_bfqq(bfqd, bfqq, 0);
-+ } else if (entity->on_st)
-+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
-+
-+ /*
-+ * Here we use a reference to bfqg. We don't need a refcounter
-+ * as the cgroup reference will not be dropped, so that its
-+ * destroy() callback will not be invoked.
-+ */
-+ entity->parent = bfqg->my_entity;
-+ entity->sched_data = &bfqg->sched_data;
-+
-+ if (busy && resume)
-+ bfq_activate_bfqq(bfqd, bfqq);
-+
-+ if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver)
-+ bfq_schedule_dispatch(bfqd);
-+}
-+
-+/**
-+ * __bfq_bic_change_cgroup - move @bic to @cgroup.
-+ * @bfqd: the queue descriptor.
-+ * @bic: the bic to move.
-+ * @cgroup: the cgroup to move to.
-+ *
-+ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
-+ * has to make sure that the reference to cgroup is valid across the call.
-+ *
-+ * NOTE: an alternative approach might have been to store the current
-+ * cgroup in bfqq and getting a reference to it, reducing the lookup
-+ * time here, at the price of slightly more complex code.
-+ */
-+static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
-+ struct bfq_io_cq *bic,
-+ struct cgroup_subsys_state *css)
-+{
-+ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
-+ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
-+ struct bfq_entity *entity;
-+ struct bfq_group *bfqg;
-+ struct bfqio_cgroup *bgrp;
-+
-+ bgrp = css_to_bfqio(css);
-+
-+ bfqg = bfq_find_alloc_group(bfqd, css);
-+ if (async_bfqq != NULL) {
-+ entity = &async_bfqq->entity;
-+
-+ if (entity->sched_data != &bfqg->sched_data) {
-+ bic_set_bfqq(bic, NULL, 0);
-+ bfq_log_bfqq(bfqd, async_bfqq,
-+ "bic_change_group: %p %d",
-+ async_bfqq, atomic_read(&async_bfqq->ref));
-+ bfq_put_queue(async_bfqq);
-+ }
-+ }
-+
-+ if (sync_bfqq != NULL) {
-+ entity = &sync_bfqq->entity;
-+ if (entity->sched_data != &bfqg->sched_data)
-+ bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
-+ }
-+
-+ return bfqg;
-+}
-+
-+/**
-+ * bfq_bic_change_cgroup - move @bic to @cgroup.
-+ * @bic: the bic being migrated.
-+ * @cgroup: the destination cgroup.
-+ *
-+ * When the task owning @bic is moved to @cgroup, @bic is immediately
-+ * moved into its new parent group.
-+ */
-+static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
-+ struct cgroup_subsys_state *css)
-+{
-+ struct bfq_data *bfqd;
-+ unsigned long uninitialized_var(flags);
-+
-+ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
-+ &flags);
-+ if (bfqd != NULL) {
-+ __bfq_bic_change_cgroup(bfqd, bic, css);
-+ bfq_put_bfqd_unlock(bfqd, &flags);
-+ }
-+}
-+
-+/**
-+ * bfq_bic_update_cgroup - update the cgroup of @bic.
-+ * @bic: the @bic to update.
-+ *
-+ * Make sure that @bic is enqueued in the cgroup of the current task.
-+ * We need this in addition to moving bics during the cgroup attach
-+ * phase because the task owning @bic could be at its first disk
-+ * access or we may end up in the root cgroup as the result of a
-+ * memory allocation failure and here we try to move to the right
-+ * group.
-+ *
-+ * Must be called under the queue lock. It is safe to use the returned
-+ * value even after the rcu_read_unlock() as the migration/destruction
-+ * paths act under the queue lock too. IOW it is impossible to race with
-+ * group migration/destruction and end up with an invalid group as:
-+ * a) here cgroup has not yet been destroyed, nor its destroy callback
-+ * has started execution, as current holds a reference to it,
-+ * b) if it is destroyed after rcu_read_unlock() [after current is
-+ * migrated to a different cgroup] its attach() callback will have
-+ * taken care of remove all the references to the old cgroup data.
-+ */
-+static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
-+{
-+ struct bfq_data *bfqd = bic_to_bfqd(bic);
-+ struct bfq_group *bfqg;
-+ struct cgroup_subsys_state *css;
-+
-+ BUG_ON(bfqd == NULL);
-+
-+ rcu_read_lock();
-+ css = task_css(current, bfqio_cgrp_id);
-+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, css);
-+ rcu_read_unlock();
-+
-+ return bfqg;
-+}
-+
-+/**
-+ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
-+ * @st: the service tree being flushed.
-+ */
-+static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
-+{
-+ struct bfq_entity *entity = st->first_idle;
-+
-+ for (; entity != NULL; entity = st->first_idle)
-+ __bfq_deactivate_entity(entity, 0);
-+}
-+
-+/**
-+ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
-+ * @bfqd: the device data structure with the root group.
-+ * @entity: the entity to move.
-+ */
-+static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
-+ struct bfq_entity *entity)
-+{
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+
-+ BUG_ON(bfqq == NULL);
-+ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
-+ return;
-+}
-+
-+/**
-+ * bfq_reparent_active_entities - move to the root group all active
-+ * entities.
-+ * @bfqd: the device data structure with the root group.
-+ * @bfqg: the group to move from.
-+ * @st: the service tree with the entities.
-+ *
-+ * Needs queue_lock to be taken and reference to be valid over the call.
-+ */
-+static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
-+ struct bfq_group *bfqg,
-+ struct bfq_service_tree *st)
-+{
-+ struct rb_root *active = &st->active;
-+ struct bfq_entity *entity = NULL;
-+
-+ if (!RB_EMPTY_ROOT(&st->active))
-+ entity = bfq_entity_of(rb_first(active));
-+
-+ for (; entity != NULL; entity = bfq_entity_of(rb_first(active)))
-+ bfq_reparent_leaf_entity(bfqd, entity);
-+
-+ if (bfqg->sched_data.in_service_entity != NULL)
-+ bfq_reparent_leaf_entity(bfqd,
-+ bfqg->sched_data.in_service_entity);
-+
-+ return;
-+}
-+
-+/**
-+ * bfq_destroy_group - destroy @bfqg.
-+ * @bgrp: the bfqio_cgroup containing @bfqg.
-+ * @bfqg: the group being destroyed.
-+ *
-+ * Destroy @bfqg, making sure that it is not referenced from its parent.
-+ */
-+static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
-+{
-+ struct bfq_data *bfqd;
-+ struct bfq_service_tree *st;
-+ struct bfq_entity *entity = bfqg->my_entity;
-+ unsigned long uninitialized_var(flags);
-+ int i;
-+
-+ hlist_del(&bfqg->group_node);
-+
-+ /*
-+ * Empty all service_trees belonging to this group before
-+ * deactivating the group itself.
-+ */
-+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
-+ st = bfqg->sched_data.service_tree + i;
-+
-+ /*
-+ * The idle tree may still contain bfq_queues belonging
-+ * to exited task because they never migrated to a different
-+ * cgroup from the one being destroyed now. No one else
-+ * can access them so it's safe to act without any lock.
-+ */
-+ bfq_flush_idle_tree(st);
-+
-+ /*
-+ * It may happen that some queues are still active
-+ * (busy) upon group destruction (if the corresponding
-+ * processes have been forced to terminate). We move
-+ * all the leaf entities corresponding to these queues
-+ * to the root_group.
-+ * Also, it may happen that the group has an entity
-+ * in service, which is disconnected from the active
-+ * tree: it must be moved, too.
-+ * There is no need to put the sync queues, as the
-+ * scheduler has taken no reference.
-+ */
-+ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
-+ if (bfqd != NULL) {
-+ bfq_reparent_active_entities(bfqd, bfqg, st);
-+ bfq_put_bfqd_unlock(bfqd, &flags);
-+ }
-+ BUG_ON(!RB_EMPTY_ROOT(&st->active));
-+ BUG_ON(!RB_EMPTY_ROOT(&st->idle));
-+ }
-+ BUG_ON(bfqg->sched_data.next_in_service != NULL);
-+ BUG_ON(bfqg->sched_data.in_service_entity != NULL);
-+
-+ /*
-+ * We may race with device destruction, take extra care when
-+ * dereferencing bfqg->bfqd.
-+ */
-+ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
-+ if (bfqd != NULL) {
-+ hlist_del(&bfqg->bfqd_node);
-+ __bfq_deactivate_entity(entity, 0);
-+ bfq_put_async_queues(bfqd, bfqg);
-+ bfq_put_bfqd_unlock(bfqd, &flags);
-+ }
-+ BUG_ON(entity->tree != NULL);
-+
-+ /*
-+ * No need to defer the kfree() to the end of the RCU grace
-+ * period: we are called from the destroy() callback of our
-+ * cgroup, so we can be sure that no one is a) still using
-+ * this cgroup or b) doing lookups in it.
-+ */
-+ kfree(bfqg);
-+}
-+
-+static void bfq_end_wr_async(struct bfq_data *bfqd)
-+{
-+ struct hlist_node *tmp;
-+ struct bfq_group *bfqg;
-+
-+ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
-+ bfq_end_wr_async_queues(bfqd, bfqg);
-+ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
-+}
-+
-+/**
-+ * bfq_disconnect_groups - disconnect @bfqd from all its groups.
-+ * @bfqd: the device descriptor being exited.
-+ *
-+ * When the device exits we just make sure that no lookup can return
-+ * the now unused group structures. They will be deallocated on cgroup
-+ * destruction.
-+ */
-+static void bfq_disconnect_groups(struct bfq_data *bfqd)
-+{
-+ struct hlist_node *tmp;
-+ struct bfq_group *bfqg;
-+
-+ bfq_log(bfqd, "disconnect_groups beginning");
-+ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
-+ hlist_del(&bfqg->bfqd_node);
-+
-+ __bfq_deactivate_entity(bfqg->my_entity, 0);
-+
-+ /*
-+ * Don't remove from the group hash, just set an
-+ * invalid key. No lookups can race with the
-+ * assignment as bfqd is being destroyed; this
-+ * implies also that new elements cannot be added
-+ * to the list.
-+ */
-+ rcu_assign_pointer(bfqg->bfqd, NULL);
-+
-+ bfq_log(bfqd, "disconnect_groups: put async for group %p",
-+ bfqg);
-+ bfq_put_async_queues(bfqd, bfqg);
-+ }
-+}
-+
-+static inline void bfq_free_root_group(struct bfq_data *bfqd)
-+{
-+ struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
-+ struct bfq_group *bfqg = bfqd->root_group;
-+
-+ bfq_put_async_queues(bfqd, bfqg);
-+
-+ spin_lock_irq(&bgrp->lock);
-+ hlist_del_rcu(&bfqg->group_node);
-+ spin_unlock_irq(&bgrp->lock);
-+
-+ /*
-+ * No need to synchronize_rcu() here: since the device is gone
-+ * there cannot be any read-side access to its root_group.
-+ */
-+ kfree(bfqg);
-+}
-+
-+static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
-+{
-+ struct bfq_group *bfqg;
-+ struct bfqio_cgroup *bgrp;
-+ int i;
-+
-+ bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node);
-+ if (bfqg == NULL)
-+ return NULL;
-+
-+ bfqg->entity.parent = NULL;
-+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
-+ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
-+
-+ bgrp = &bfqio_root_cgroup;
-+ spin_lock_irq(&bgrp->lock);
-+ rcu_assign_pointer(bfqg->bfqd, bfqd);
-+ hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
-+ spin_unlock_irq(&bgrp->lock);
-+
-+ return bfqg;
-+}
-+
-+#define SHOW_FUNCTION(__VAR) \
-+static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \
-+ struct cftype *cftype) \
-+{ \
-+ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
-+ u64 ret = -ENODEV; \
-+ \
-+ mutex_lock(&bfqio_mutex); \
-+ if (bfqio_is_removed(bgrp)) \
-+ goto out_unlock; \
-+ \
-+ spin_lock_irq(&bgrp->lock); \
-+ ret = bgrp->__VAR; \
-+ spin_unlock_irq(&bgrp->lock); \
-+ \
-+out_unlock: \
-+ mutex_unlock(&bfqio_mutex); \
-+ return ret; \
-+}
-+
-+SHOW_FUNCTION(weight);
-+SHOW_FUNCTION(ioprio);
-+SHOW_FUNCTION(ioprio_class);
-+#undef SHOW_FUNCTION
-+
-+#define STORE_FUNCTION(__VAR, __MIN, __MAX) \
-+static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\
-+ struct cftype *cftype, \
-+ u64 val) \
-+{ \
-+ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
-+ struct bfq_group *bfqg; \
-+ int ret = -EINVAL; \
-+ \
-+ if (val < (__MIN) || val > (__MAX)) \
-+ return ret; \
-+ \
-+ ret = -ENODEV; \
-+ mutex_lock(&bfqio_mutex); \
-+ if (bfqio_is_removed(bgrp)) \
-+ goto out_unlock; \
-+ ret = 0; \
-+ \
-+ spin_lock_irq(&bgrp->lock); \
-+ bgrp->__VAR = (unsigned short)val; \
-+ hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \
-+ /* \
-+ * Setting the ioprio_changed flag of the entity \
-+ * to 1 with new_##__VAR == ##__VAR would re-set \
-+ * the value of the weight to its ioprio mapping. \
-+ * Set the flag only if necessary. \
-+ */ \
-+ if ((unsigned short)val != bfqg->entity.new_##__VAR) { \
-+ bfqg->entity.new_##__VAR = (unsigned short)val; \
-+ /* \
-+ * Make sure that the above new value has been \
-+ * stored in bfqg->entity.new_##__VAR before \
-+ * setting the ioprio_changed flag. In fact, \
-+ * this flag may be read asynchronously (in \
-+ * critical sections protected by a different \
-+ * lock than that held here), and finding this \
-+ * flag set may cause the execution of the code \
-+ * for updating parameters whose value may \
-+ * depend also on bfqg->entity.new_##__VAR (in \
-+ * __bfq_entity_update_weight_prio). \
-+ * This barrier makes sure that the new value \
-+ * of bfqg->entity.new_##__VAR is correctly \
-+ * seen in that code. \
-+ */ \
-+ smp_wmb(); \
-+ bfqg->entity.ioprio_changed = 1; \
-+ } \
-+ } \
-+ spin_unlock_irq(&bgrp->lock); \
-+ \
-+out_unlock: \
-+ mutex_unlock(&bfqio_mutex); \
-+ return ret; \
-+}
-+
-+STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
-+STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
-+STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
-+#undef STORE_FUNCTION
-+
-+static struct cftype bfqio_files[] = {
-+ {
-+ .name = "weight",
-+ .read_u64 = bfqio_cgroup_weight_read,
-+ .write_u64 = bfqio_cgroup_weight_write,
-+ },
-+ {
-+ .name = "ioprio",
-+ .read_u64 = bfqio_cgroup_ioprio_read,
-+ .write_u64 = bfqio_cgroup_ioprio_write,
-+ },
-+ {
-+ .name = "ioprio_class",
-+ .read_u64 = bfqio_cgroup_ioprio_class_read,
-+ .write_u64 = bfqio_cgroup_ioprio_class_write,
-+ },
-+ { }, /* terminate */
-+};
-+
-+static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state
-+ *parent_css)
-+{
-+ struct bfqio_cgroup *bgrp;
-+
-+ if (parent_css != NULL) {
-+ bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
-+ if (bgrp == NULL)
-+ return ERR_PTR(-ENOMEM);
-+ } else
-+ bgrp = &bfqio_root_cgroup;
-+
-+ spin_lock_init(&bgrp->lock);
-+ INIT_HLIST_HEAD(&bgrp->group_data);
-+ bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
-+ bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
-+
-+ return &bgrp->css;
-+}
-+
-+/*
-+ * We cannot support shared io contexts, as we have no means to support
-+ * two tasks with the same ioc in two different groups without major rework
-+ * of the main bic/bfqq data structures. By now we allow a task to change
-+ * its cgroup only if it's the only owner of its ioc; the drawback of this
-+ * behavior is that a group containing a task that forked using CLONE_IO
-+ * will not be destroyed until the tasks sharing the ioc die.
-+ */
-+static int bfqio_can_attach(struct cgroup_subsys_state *css,
-+ struct cgroup_taskset *tset)
-+{
-+ struct task_struct *task;
-+ struct io_context *ioc;
-+ int ret = 0;
-+
-+ cgroup_taskset_for_each(task, tset) {
-+ /*
-+ * task_lock() is needed to avoid races with
-+ * exit_io_context()
-+ */
-+ task_lock(task);
-+ ioc = task->io_context;
-+ if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
-+ /*
-+ * ioc == NULL means that the task is either too
-+ * young or exiting: if it has still no ioc the
-+ * ioc can't be shared, if the task is exiting the
-+ * attach will fail anyway, no matter what we
-+ * return here.
-+ */
-+ ret = -EINVAL;
-+ task_unlock(task);
-+ if (ret)
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+static void bfqio_attach(struct cgroup_subsys_state *css,
-+ struct cgroup_taskset *tset)
-+{
-+ struct task_struct *task;
-+ struct io_context *ioc;
-+ struct io_cq *icq;
-+
-+ /*
-+ * IMPORTANT NOTE: The move of more than one process at a time to a
-+ * new group has not yet been tested.
-+ */
-+ cgroup_taskset_for_each(task, tset) {
-+ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
-+ if (ioc) {
-+ /*
-+ * Handle cgroup change here.
-+ */
-+ rcu_read_lock();
-+ hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node)
-+ if (!strncmp(
-+ icq->q->elevator->type->elevator_name,
-+ "bfq", ELV_NAME_MAX))
-+ bfq_bic_change_cgroup(icq_to_bic(icq),
-+ css);
-+ rcu_read_unlock();
-+ put_io_context(ioc);
-+ }
-+ }
-+}
-+
-+static void bfqio_destroy(struct cgroup_subsys_state *css)
-+{
-+ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
-+ struct hlist_node *tmp;
-+ struct bfq_group *bfqg;
-+
-+ /*
-+ * Since we are destroying the cgroup, there are no more tasks
-+ * referencing it, and all the RCU grace periods that may have
-+ * referenced it are ended (as the destruction of the parent
-+ * cgroup is RCU-safe); bgrp->group_data will not be accessed by
-+ * anything else and we don't need any synchronization.
-+ */
-+ hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node)
-+ bfq_destroy_group(bgrp, bfqg);
-+
-+ BUG_ON(!hlist_empty(&bgrp->group_data));
-+
-+ kfree(bgrp);
-+}
-+
-+static int bfqio_css_online(struct cgroup_subsys_state *css)
-+{
-+ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
-+
-+ mutex_lock(&bfqio_mutex);
-+ bgrp->online = true;
-+ mutex_unlock(&bfqio_mutex);
-+
-+ return 0;
-+}
-+
-+static void bfqio_css_offline(struct cgroup_subsys_state *css)
-+{
-+ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
-+
-+ mutex_lock(&bfqio_mutex);
-+ bgrp->online = false;
-+ mutex_unlock(&bfqio_mutex);
-+}
-+
-+struct cgroup_subsys bfqio_cgrp_subsys = {
-+ .css_alloc = bfqio_create,
-+ .css_online = bfqio_css_online,
-+ .css_offline = bfqio_css_offline,
-+ .can_attach = bfqio_can_attach,
-+ .attach = bfqio_attach,
-+ .css_free = bfqio_destroy,
-+ .legacy_cftypes = bfqio_files,
-+};
-+#else
-+static inline void bfq_init_entity(struct bfq_entity *entity,
-+ struct bfq_group *bfqg)
-+{
-+ entity->weight = entity->new_weight;
-+ entity->orig_weight = entity->new_weight;
-+ entity->ioprio = entity->new_ioprio;
-+ entity->ioprio_class = entity->new_ioprio_class;
-+ entity->sched_data = &bfqg->sched_data;
-+}
-+
-+static inline struct bfq_group *
-+bfq_bic_update_cgroup(struct bfq_io_cq *bic)
-+{
-+ struct bfq_data *bfqd = bic_to_bfqd(bic);
-+ return bfqd->root_group;
-+}
-+
-+static inline void bfq_bfqq_move(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq,
-+ struct bfq_entity *entity,
-+ struct bfq_group *bfqg)
-+{
-+}
-+
-+static void bfq_end_wr_async(struct bfq_data *bfqd)
-+{
-+ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
-+}
-+
-+static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
-+{
-+ bfq_put_async_queues(bfqd, bfqd->root_group);
-+}
-+
-+static inline void bfq_free_root_group(struct bfq_data *bfqd)
-+{
-+ kfree(bfqd->root_group);
-+}
-+
-+static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
-+{
-+ struct bfq_group *bfqg;
-+ int i;
-+
-+ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
-+ if (bfqg == NULL)
-+ return NULL;
-+
-+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
-+ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
-+
-+ return bfqg;
-+}
-+#endif
-diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c
-new file mode 100644
-index 0000000..7f6b000
---- /dev/null
-+++ b/block/bfq-ioc.c
-@@ -0,0 +1,36 @@
-+/*
-+ * BFQ: I/O context handling.
-+ *
-+ * Based on ideas and code from CFQ:
-+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
-+ *
-+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
-+ * Paolo Valente <paolo.valente@unimore.it>
-+ *
-+ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
-+ */
-+
-+/**
-+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
-+ * @icq: the iocontext queue.
-+ */
-+static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
-+{
-+ /* bic->icq is the first member, %NULL will convert to %NULL */
-+ return container_of(icq, struct bfq_io_cq, icq);
-+}
-+
-+/**
-+ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
-+ * @bfqd: the lookup key.
-+ * @ioc: the io_context of the process doing I/O.
-+ *
-+ * Queue lock must be held.
-+ */
-+static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
-+ struct io_context *ioc)
-+{
-+ if (ioc)
-+ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
-+ return NULL;
-+}
-diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
-new file mode 100644
-index 0000000..97ee934
---- /dev/null
-+++ b/block/bfq-iosched.c
-@@ -0,0 +1,3902 @@
-+/*
-+ * Budget Fair Queueing (BFQ) disk scheduler.
-+ *
-+ * Based on ideas and code from CFQ:
-+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
-+ *
-+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
-+ * Paolo Valente <paolo.valente@unimore.it>
-+ *
-+ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
-+ *
-+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
-+ * file.
-+ *
-+ * BFQ is a proportional-share storage-I/O scheduling algorithm based on
-+ * the slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
-+ * measured in number of sectors, to processes instead of time slices. The
-+ * device is not granted to the in-service process for a given time slice,
-+ * but until it has exhausted its assigned budget. This change from the time
-+ * to the service domain allows BFQ to distribute the device throughput
-+ * among processes as desired, without any distortion due to ZBR, workload
-+ * fluctuations or other factors. BFQ uses an ad hoc internal scheduler,
-+ * called B-WF2Q+, to schedule processes according to their budgets. More
-+ * precisely, BFQ schedules queues associated to processes. Thanks to the
-+ * accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to
-+ * I/O-bound processes issuing sequential requests (to boost the
-+ * throughput), and yet guarantee a low latency to interactive and soft
-+ * real-time applications.
-+ *
-+ * BFQ is described in [1], where also a reference to the initial, more
-+ * theoretical paper on BFQ can be found. The interested reader can find
-+ * in the latter paper full details on the main algorithm, as well as
-+ * formulas of the guarantees and formal proofs of all the properties.
-+ * With respect to the version of BFQ presented in these papers, this
-+ * implementation adds a few more heuristics, such as the one that
-+ * guarantees a low latency to soft real-time applications, and a
-+ * hierarchical extension based on H-WF2Q+.
-+ *
-+ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
-+ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
-+ * complexity derives from the one introduced with EEVDF in [3].
-+ *
-+ * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness
-+ * with the BFQ Disk I/O Scheduler'',
-+ * Proceedings of the 5th Annual International Systems and Storage
-+ * Conference (SYSTOR '12), June 2012.
-+ *
-+ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
-+ *
-+ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
-+ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
-+ * Oct 1997.
-+ *
-+ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
-+ *
-+ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
-+ * First: A Flexible and Accurate Mechanism for Proportional Share
-+ * Resource Allocation,'' technical report.
-+ *
-+ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
-+ */
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/cgroup.h>
-+#include <linux/elevator.h>
-+#include <linux/jiffies.h>
-+#include <linux/rbtree.h>
-+#include <linux/ioprio.h>
-+#include "bfq.h"
-+#include "blk.h"
-+
-+/* Max number of dispatches in one round of service. */
-+static const int bfq_quantum = 4;
-+
-+/* Expiration time of sync (0) and async (1) requests, in jiffies. */
-+static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
-+
-+/* Maximum backwards seek, in KiB. */
-+static const int bfq_back_max = 16 * 1024;
-+
-+/* Penalty of a backwards seek, in number of sectors. */
-+static const int bfq_back_penalty = 2;
-+
-+/* Idling period duration, in jiffies. */
-+static int bfq_slice_idle = HZ / 125;
-+
-+/* Default maximum budget values, in sectors and number of requests. */
-+static const int bfq_default_max_budget = 16 * 1024;
-+static const int bfq_max_budget_async_rq = 4;
-+
-+/*
-+ * Async to sync throughput distribution is controlled as follows:
-+ * when an async request is served, the entity is charged the number
-+ * of sectors of the request, multiplied by the factor below
-+ */
-+static const int bfq_async_charge_factor = 10;
-+
-+/* Default timeout values, in jiffies, approximating CFQ defaults. */
-+static const int bfq_timeout_sync = HZ / 8;
-+static int bfq_timeout_async = HZ / 25;
-+
-+struct kmem_cache *bfq_pool;
-+
-+/* Below this threshold (in ms), we consider thinktime immediate. */
-+#define BFQ_MIN_TT 2
-+
-+/* hw_tag detection: parallel requests threshold and min samples needed. */
-+#define BFQ_HW_QUEUE_THRESHOLD 4
-+#define BFQ_HW_QUEUE_SAMPLES 32
-+
-+#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
-+#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
-+
-+/* Min samples used for peak rate estimation (for autotuning). */
-+#define BFQ_PEAK_RATE_SAMPLES 32
-+
-+/* Shift used for peak rate fixed precision calculations. */
-+#define BFQ_RATE_SHIFT 16
-+
-+/*
-+ * By default, BFQ computes the duration of the weight raising for
-+ * interactive applications automatically, using the following formula:
-+ * duration = (R / r) * T, where r is the peak rate of the device, and
-+ * R and T are two reference parameters.
-+ * In particular, R is the peak rate of the reference device (see below),
-+ * and T is a reference time: given the systems that are likely to be
-+ * installed on the reference device according to its speed class, T is
-+ * about the maximum time needed, under BFQ and while reading two files in
-+ * parallel, to load typical large applications on these systems.
-+ * In practice, the slower/faster the device at hand is, the more/less it
-+ * takes to load applications with respect to the reference device.
-+ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
-+ * applications.
-+ *
-+ * BFQ uses four different reference pairs (R, T), depending on:
-+ * . whether the device is rotational or non-rotational;
-+ * . whether the device is slow, such as old or portable HDDs, as well as
-+ * SD cards, or fast, such as newer HDDs and SSDs.
-+ *
-+ * The device's speed class is dynamically (re)detected in
-+ * bfq_update_peak_rate() every time the estimated peak rate is updated.
-+ *
-+ * In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0]
-+ * are the reference values for a slow/fast rotational device, whereas
-+ * R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for
-+ * a slow/fast non-rotational device. Finally, device_speed_thresh are the
-+ * thresholds used to switch between speed classes.
-+ * Both the reference peak rates and the thresholds are measured in
-+ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
-+ */
-+static int R_slow[2] = {1536, 10752};
-+static int R_fast[2] = {17415, 34791};
-+/*
-+ * To improve readability, a conversion function is used to initialize the
-+ * following arrays, which entails that they can be initialized only in a
-+ * function.
-+ */
-+static int T_slow[2];
-+static int T_fast[2];
-+static int device_speed_thresh[2];
-+
-+#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
-+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
-+
-+#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
-+#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
-+
-+static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
-+
-+#include "bfq-ioc.c"
-+#include "bfq-sched.c"
-+#include "bfq-cgroup.c"
-+
-+#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
-+ IOPRIO_CLASS_IDLE)
-+#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
-+ IOPRIO_CLASS_RT)
-+
-+#define bfq_sample_valid(samples) ((samples) > 80)
-+
-+/*
-+ * We regard a request as SYNC, if either it's a read or has the SYNC bit
-+ * set (in which case it could also be a direct WRITE).
-+ */
-+static inline int bfq_bio_sync(struct bio *bio)
-+{
-+ if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Scheduler run of queue, if there are requests pending and no one in the
-+ * driver that will restart queueing.
-+ */
-+static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
-+{
-+ if (bfqd->queued != 0) {
-+ bfq_log(bfqd, "schedule dispatch");
-+ kblockd_schedule_work(&bfqd->unplug_work);
-+ }
-+}
-+
-+/*
-+ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
-+ * We choose the request that is closesr to the head right now. Distance
-+ * behind the head is penalized and only allowed to a certain extent.
-+ */
-+static struct request *bfq_choose_req(struct bfq_data *bfqd,
-+ struct request *rq1,
-+ struct request *rq2,
-+ sector_t last)
-+{
-+ sector_t s1, s2, d1 = 0, d2 = 0;
-+ unsigned long back_max;
-+#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
-+#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
-+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
-+
-+ if (rq1 == NULL || rq1 == rq2)
-+ return rq2;
-+ if (rq2 == NULL)
-+ return rq1;
-+
-+ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
-+ return rq1;
-+ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
-+ return rq2;
-+ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
-+ return rq1;
-+ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
-+ return rq2;
-+
-+ s1 = blk_rq_pos(rq1);
-+ s2 = blk_rq_pos(rq2);
-+
-+ /*
-+ * By definition, 1KiB is 2 sectors.
-+ */
-+ back_max = bfqd->bfq_back_max * 2;
-+
-+ /*
-+ * Strict one way elevator _except_ in the case where we allow
-+ * short backward seeks which are biased as twice the cost of a
-+ * similar forward seek.
-+ */
-+ if (s1 >= last)
-+ d1 = s1 - last;
-+ else if (s1 + back_max >= last)
-+ d1 = (last - s1) * bfqd->bfq_back_penalty;
-+ else
-+ wrap |= BFQ_RQ1_WRAP;
-+
-+ if (s2 >= last)
-+ d2 = s2 - last;
-+ else if (s2 + back_max >= last)
-+ d2 = (last - s2) * bfqd->bfq_back_penalty;
-+ else
-+ wrap |= BFQ_RQ2_WRAP;
-+
-+ /* Found required data */
-+
-+ /*
-+ * By doing switch() on the bit mask "wrap" we avoid having to
-+ * check two variables for all permutations: --> faster!
-+ */
-+ switch (wrap) {
-+ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
-+ if (d1 < d2)
-+ return rq1;
-+ else if (d2 < d1)
-+ return rq2;
-+ else {
-+ if (s1 >= s2)
-+ return rq1;
-+ else
-+ return rq2;
-+ }
-+
-+ case BFQ_RQ2_WRAP:
-+ return rq1;
-+ case BFQ_RQ1_WRAP:
-+ return rq2;
-+ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
-+ default:
-+ /*
-+ * Since both rqs are wrapped,
-+ * start with the one that's further behind head
-+ * (--> only *one* back seek required),
-+ * since back seek takes more time than forward.
-+ */
-+ if (s1 <= s2)
-+ return rq1;
-+ else
-+ return rq2;
-+ }
-+}
-+
-+static struct bfq_queue *
-+bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
-+ sector_t sector, struct rb_node **ret_parent,
-+ struct rb_node ***rb_link)
-+{
-+ struct rb_node **p, *parent;
-+ struct bfq_queue *bfqq = NULL;
-+
-+ parent = NULL;
-+ p = &root->rb_node;
-+ while (*p) {
-+ struct rb_node **n;
-+
-+ parent = *p;
-+ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
-+
-+ /*
-+ * Sort strictly based on sector. Smallest to the left,
-+ * largest to the right.
-+ */
-+ if (sector > blk_rq_pos(bfqq->next_rq))
-+ n = &(*p)->rb_right;
-+ else if (sector < blk_rq_pos(bfqq->next_rq))
-+ n = &(*p)->rb_left;
-+ else
-+ break;
-+ p = n;
-+ bfqq = NULL;
-+ }
-+
-+ *ret_parent = parent;
-+ if (rb_link)
-+ *rb_link = p;
-+
-+ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
-+ (long long unsigned)sector,
-+ bfqq != NULL ? bfqq->pid : 0);
-+
-+ return bfqq;
-+}
-+
-+static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-+{
-+ struct rb_node **p, *parent;
-+ struct bfq_queue *__bfqq;
-+
-+ if (bfqq->pos_root != NULL) {
-+ rb_erase(&bfqq->pos_node, bfqq->pos_root);
-+ bfqq->pos_root = NULL;
-+ }
-+
-+ if (bfq_class_idle(bfqq))
-+ return;
-+ if (!bfqq->next_rq)
-+ return;
-+
-+ bfqq->pos_root = &bfqd->rq_pos_tree;
-+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
-+ blk_rq_pos(bfqq->next_rq), &parent, &p);
-+ if (__bfqq == NULL) {
-+ rb_link_node(&bfqq->pos_node, parent, p);
-+ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
-+ } else
-+ bfqq->pos_root = NULL;
-+}
-+
-+/*
-+ * Tell whether there are active queues or groups with differentiated weights.
-+ */
-+static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
-+{
-+ BUG_ON(!bfqd->hw_tag);
-+ /*
-+ * For weights to differ, at least one of the trees must contain
-+ * at least two nodes.
-+ */
-+ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
-+ (bfqd->queue_weights_tree.rb_node->rb_left ||
-+ bfqd->queue_weights_tree.rb_node->rb_right)
-+#ifdef CONFIG_CGROUP_BFQIO
-+ ) ||
-+ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
-+ (bfqd->group_weights_tree.rb_node->rb_left ||
-+ bfqd->group_weights_tree.rb_node->rb_right)
-+#endif
-+ );
-+}
-+
-+/*
-+ * If the weight-counter tree passed as input contains no counter for
-+ * the weight of the input entity, then add that counter; otherwise just
-+ * increment the existing counter.
-+ *
-+ * Note that weight-counter trees contain few nodes in mostly symmetric
-+ * scenarios. For example, if all queues have the same weight, then the
-+ * weight-counter tree for the queues may contain at most one node.
-+ * This holds even if low_latency is on, because weight-raised queues
-+ * are not inserted in the tree.
-+ * In most scenarios, the rate at which nodes are created/destroyed
-+ * should be low too.
-+ */
-+static void bfq_weights_tree_add(struct bfq_data *bfqd,
-+ struct bfq_entity *entity,
-+ struct rb_root *root)
-+{
-+ struct rb_node **new = &(root->rb_node), *parent = NULL;
-+
-+ /*
-+ * Do not insert if:
-+ * - the device does not support queueing;
-+ * - the entity is already associated with a counter, which happens if:
-+ * 1) the entity is associated with a queue, 2) a request arrival
-+ * has caused the queue to become both non-weight-raised, and hence
-+ * change its weight, and backlogged; in this respect, each
-+ * of the two events causes an invocation of this function,
-+ * 3) this is the invocation of this function caused by the second
-+ * event. This second invocation is actually useless, and we handle
-+ * this fact by exiting immediately. More efficient or clearer
-+ * solutions might possibly be adopted.
-+ */
-+ if (!bfqd->hw_tag || entity->weight_counter)
-+ return;
-+
-+ while (*new) {
-+ struct bfq_weight_counter *__counter = container_of(*new,
-+ struct bfq_weight_counter,
-+ weights_node);
-+ parent = *new;
-+
-+ if (entity->weight == __counter->weight) {
-+ entity->weight_counter = __counter;
-+ goto inc_counter;
-+ }
-+ if (entity->weight < __counter->weight)
-+ new = &((*new)->rb_left);
-+ else
-+ new = &((*new)->rb_right);
-+ }
-+
-+ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
-+ GFP_ATOMIC);
-+ entity->weight_counter->weight = entity->weight;
-+ rb_link_node(&entity->weight_counter->weights_node, parent, new);
-+ rb_insert_color(&entity->weight_counter->weights_node, root);
-+
-+inc_counter:
-+ entity->weight_counter->num_active++;
-+}
-+
-+/*
-+ * Decrement the weight counter associated with the entity, and, if the
-+ * counter reaches 0, remove the counter from the tree.
-+ * See the comments to the function bfq_weights_tree_add() for considerations
-+ * about overhead.
-+ */
-+static void bfq_weights_tree_remove(struct bfq_data *bfqd,
-+ struct bfq_entity *entity,
-+ struct rb_root *root)
-+{
-+ /*
-+ * Check whether the entity is actually associated with a counter.
-+ * In fact, the device may not be considered NCQ-capable for a while,
-+ * which implies that no insertion in the weight trees is performed,
-+ * after which the device may start to be deemed NCQ-capable, and hence
-+ * this function may start to be invoked. This may cause the function
-+ * to be invoked for entities that are not associated with any counter.
-+ */
-+ if (!entity->weight_counter)
-+ return;
-+
-+ BUG_ON(RB_EMPTY_ROOT(root));
-+ BUG_ON(entity->weight_counter->weight != entity->weight);
-+
-+ BUG_ON(!entity->weight_counter->num_active);
-+ entity->weight_counter->num_active--;
-+ if (entity->weight_counter->num_active > 0)
-+ goto reset_entity_pointer;
-+
-+ rb_erase(&entity->weight_counter->weights_node, root);
-+ kfree(entity->weight_counter);
-+
-+reset_entity_pointer:
-+ entity->weight_counter = NULL;
-+}
-+
-+static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq,
-+ struct request *last)
-+{
-+ struct rb_node *rbnext = rb_next(&last->rb_node);
-+ struct rb_node *rbprev = rb_prev(&last->rb_node);
-+ struct request *next = NULL, *prev = NULL;
-+
-+ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
-+
-+ if (rbprev != NULL)
-+ prev = rb_entry_rq(rbprev);
-+
-+ if (rbnext != NULL)
-+ next = rb_entry_rq(rbnext);
-+ else {
-+ rbnext = rb_first(&bfqq->sort_list);
-+ if (rbnext && rbnext != &last->rb_node)
-+ next = rb_entry_rq(rbnext);
-+ }
-+
-+ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
-+}
-+
-+/* see the definition of bfq_async_charge_factor for details */
-+static inline unsigned long bfq_serv_to_charge(struct request *rq,
-+ struct bfq_queue *bfqq)
-+{
-+ return blk_rq_sectors(rq) *
-+ (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) *
-+ bfq_async_charge_factor));
-+}
-+
-+/**
-+ * bfq_updated_next_req - update the queue after a new next_rq selection.
-+ * @bfqd: the device data the queue belongs to.
-+ * @bfqq: the queue to update.
-+ *
-+ * If the first request of a queue changes we make sure that the queue
-+ * has enough budget to serve at least its first request (if the
-+ * request has grown). We do this because if the queue has not enough
-+ * budget for its first request, it has to go through two dispatch
-+ * rounds to actually get it dispatched.
-+ */
-+static void bfq_updated_next_req(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ struct bfq_entity *entity = &bfqq->entity;
-+ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
-+ struct request *next_rq = bfqq->next_rq;
-+ unsigned long new_budget;
-+
-+ if (next_rq == NULL)
-+ return;
-+
-+ if (bfqq == bfqd->in_service_queue)
-+ /*
-+ * In order not to break guarantees, budgets cannot be
-+ * changed after an entity has been selected.
-+ */
-+ return;
-+
-+ BUG_ON(entity->tree != &st->active);
-+ BUG_ON(entity == entity->sched_data->in_service_entity);
-+
-+ new_budget = max_t(unsigned long, bfqq->max_budget,
-+ bfq_serv_to_charge(next_rq, bfqq));
-+ if (entity->budget != new_budget) {
-+ entity->budget = new_budget;
-+ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
-+ new_budget);
-+ bfq_activate_bfqq(bfqd, bfqq);
-+ }
-+}
-+
-+static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
-+{
-+ u64 dur;
-+
-+ if (bfqd->bfq_wr_max_time > 0)
-+ return bfqd->bfq_wr_max_time;
-+
-+ dur = bfqd->RT_prod;
-+ do_div(dur, bfqd->peak_rate);
-+
-+ return dur;
-+}
-+
-+/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
-+static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ struct bfq_queue *item;
-+ struct hlist_node *n;
-+
-+ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
-+ hlist_del_init(&item->burst_list_node);
-+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
-+ bfqd->burst_size = 1;
-+}
-+
-+/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
-+static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-+{
-+ /* Increment burst size to take into account also bfqq */
-+ bfqd->burst_size++;
-+
-+ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
-+ struct bfq_queue *pos, *bfqq_item;
-+ struct hlist_node *n;
-+
-+ /*
-+ * Enough queues have been activated shortly after each
-+ * other to consider this burst as large.
-+ */
-+ bfqd->large_burst = true;
-+
-+ /*
-+ * We can now mark all queues in the burst list as
-+ * belonging to a large burst.
-+ */
-+ hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
-+ burst_list_node)
-+ bfq_mark_bfqq_in_large_burst(bfqq_item);
-+ bfq_mark_bfqq_in_large_burst(bfqq);
-+
-+ /*
-+ * From now on, and until the current burst finishes, any
-+ * new queue being activated shortly after the last queue
-+ * was inserted in the burst can be immediately marked as
-+ * belonging to a large burst. So the burst list is not
-+ * needed any more. Remove it.
-+ */
-+ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
-+ burst_list_node)
-+ hlist_del_init(&pos->burst_list_node);
-+ } else /* burst not yet large: add bfqq to the burst list */
-+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
-+}
-+
-+/*
-+ * If many queues happen to become active shortly after each other, then,
-+ * to help the processes associated to these queues get their job done as
-+ * soon as possible, it is usually better to not grant either weight-raising
-+ * or device idling to these queues. In this comment we describe, firstly,
-+ * the reasons why this fact holds, and, secondly, the next function, which
-+ * implements the main steps needed to properly mark these queues so that
-+ * they can then be treated in a different way.
-+ *
-+ * As for the terminology, we say that a queue becomes active, i.e.,
-+ * switches from idle to backlogged, either when it is created (as a
-+ * consequence of the arrival of an I/O request), or, if already existing,
-+ * when a new request for the queue arrives while the queue is idle.
-+ * Bursts of activations, i.e., activations of different queues occurring
-+ * shortly after each other, are typically caused by services or applications
-+ * that spawn or reactivate many parallel threads/processes. Examples are
-+ * systemd during boot or git grep.
-+ *
-+ * These services or applications benefit mostly from a high throughput:
-+ * the quicker the requests of the activated queues are cumulatively served,
-+ * the sooner the target job of these queues gets completed. As a consequence,
-+ * weight-raising any of these queues, which also implies idling the device
-+ * for it, is almost always counterproductive: in most cases it just lowers
-+ * throughput.
-+ *
-+ * On the other hand, a burst of activations may be also caused by the start
-+ * of an application that does not consist in a lot of parallel I/O-bound
-+ * threads. In fact, with a complex application, the burst may be just a
-+ * consequence of the fact that several processes need to be executed to
-+ * start-up the application. To start an application as quickly as possible,
-+ * the best thing to do is to privilege the I/O related to the application
-+ * with respect to all other I/O. Therefore, the best strategy to start as
-+ * quickly as possible an application that causes a burst of activations is
-+ * to weight-raise all the queues activated during the burst. This is the
-+ * exact opposite of the best strategy for the other type of bursts.
-+ *
-+ * In the end, to take the best action for each of the two cases, the two
-+ * types of bursts need to be distinguished. Fortunately, this seems
-+ * relatively easy to do, by looking at the sizes of the bursts. In
-+ * particular, we found a threshold such that bursts with a larger size
-+ * than that threshold are apparently caused only by services or commands
-+ * such as systemd or git grep. For brevity, hereafter we call just 'large'
-+ * these bursts. BFQ *does not* weight-raise queues whose activations occur
-+ * in a large burst. In addition, for each of these queues BFQ performs or
-+ * does not perform idling depending on which choice boosts the throughput
-+ * most. The exact choice depends on the device and request pattern at
-+ * hand.
-+ *
-+ * Turning back to the next function, it implements all the steps needed
-+ * to detect the occurrence of a large burst and to properly mark all the
-+ * queues belonging to it (so that they can then be treated in a different
-+ * way). This goal is achieved by maintaining a special "burst list" that
-+ * holds, temporarily, the queues that belong to the burst in progress. The
-+ * list is then used to mark these queues as belonging to a large burst if
-+ * the burst does become large. The main steps are the following.
-+ *
-+ * . when the very first queue is activated, the queue is inserted into the
-+ * list (as it could be the first queue in a possible burst)
-+ *
-+ * . if the current burst has not yet become large, and a queue Q that does
-+ * not yet belong to the burst is activated shortly after the last time
-+ * at which a new queue entered the burst list, then the function appends
-+ * Q to the burst list
-+ *
-+ * . if, as a consequence of the previous step, the burst size reaches
-+ * the large-burst threshold, then
-+ *
-+ * . all the queues in the burst list are marked as belonging to a
-+ * large burst
-+ *
-+ * . the burst list is deleted; in fact, the burst list already served
-+ * its purpose (keeping temporarily track of the queues in a burst,
-+ * so as to be able to mark them as belonging to a large burst in the
-+ * previous sub-step), and now is not needed any more
-+ *
-+ * . the device enters a large-burst mode
-+ *
-+ * . if a queue Q that does not belong to the burst is activated while
-+ * the device is in large-burst mode and shortly after the last time
-+ * at which a queue either entered the burst list or was marked as
-+ * belonging to the current large burst, then Q is immediately marked
-+ * as belonging to a large burst.
-+ *
-+ * . if a queue Q that does not belong to the burst is activated a while
-+ * later, i.e., not shortly after, than the last time at which a queue
-+ * either entered the burst list or was marked as belonging to the
-+ * current large burst, then the current burst is deemed as finished and:
-+ *
-+ * . the large-burst mode is reset if set
-+ *
-+ * . the burst list is emptied
-+ *
-+ * . Q is inserted in the burst list, as Q may be the first queue
-+ * in a possible new burst (then the burst list contains just Q
-+ * after this step).
-+ */
-+static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+ bool idle_for_long_time)
-+{
-+ /*
-+ * If bfqq happened to be activated in a burst, but has been idle
-+ * for at least as long as an interactive queue, then we assume
-+ * that, in the overall I/O initiated in the burst, the I/O
-+ * associated to bfqq is finished. So bfqq does not need to be
-+ * treated as a queue belonging to a burst anymore. Accordingly,
-+ * we reset bfqq's in_large_burst flag if set, and remove bfqq
-+ * from the burst list if it's there. We do not decrement instead
-+ * burst_size, because the fact that bfqq does not need to belong
-+ * to the burst list any more does not invalidate the fact that
-+ * bfqq may have been activated during the current burst.
-+ */
-+ if (idle_for_long_time) {
-+ hlist_del_init(&bfqq->burst_list_node);
-+ bfq_clear_bfqq_in_large_burst(bfqq);
-+ }
-+
-+ /*
-+ * If bfqq is already in the burst list or is part of a large
-+ * burst, then there is nothing else to do.
-+ */
-+ if (!hlist_unhashed(&bfqq->burst_list_node) ||
-+ bfq_bfqq_in_large_burst(bfqq))
-+ return;
-+
-+ /*
-+ * If bfqq's activation happens late enough, then the current
-+ * burst is finished, and related data structures must be reset.
-+ *
-+ * In this respect, consider the special case where bfqq is the very
-+ * first queue being activated. In this case, last_ins_in_burst is
-+ * not yet significant when we get here. But it is easy to verify
-+ * that, whether or not the following condition is true, bfqq will
-+ * end up being inserted into the burst list. In particular the
-+ * list will happen to contain only bfqq. And this is exactly what
-+ * has to happen, as bfqq may be the first queue in a possible
-+ * burst.
-+ */
-+ if (time_is_before_jiffies(bfqd->last_ins_in_burst +
-+ bfqd->bfq_burst_interval)) {
-+ bfqd->large_burst = false;
-+ bfq_reset_burst_list(bfqd, bfqq);
-+ return;
-+ }
-+
-+ /*
-+ * If we get here, then bfqq is being activated shortly after the
-+ * last queue. So, if the current burst is also large, we can mark
-+ * bfqq as belonging to this large burst immediately.
-+ */
-+ if (bfqd->large_burst) {
-+ bfq_mark_bfqq_in_large_burst(bfqq);
-+ return;
-+ }
-+
-+ /*
-+ * If we get here, then a large-burst state has not yet been
-+ * reached, but bfqq is being activated shortly after the last
-+ * queue. Then we add bfqq to the burst.
-+ */
-+ bfq_add_to_burst(bfqd, bfqq);
-+}
-+
-+static void bfq_add_request(struct request *rq)
-+{
-+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
-+ struct bfq_entity *entity = &bfqq->entity;
-+ struct bfq_data *bfqd = bfqq->bfqd;
-+ struct request *next_rq, *prev;
-+ unsigned long old_wr_coeff = bfqq->wr_coeff;
-+ bool interactive = false;
-+
-+ bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
-+ bfqq->queued[rq_is_sync(rq)]++;
-+ bfqd->queued++;
-+
-+ elv_rb_add(&bfqq->sort_list, rq);
-+
-+ /*
-+ * Check if this request is a better next-serve candidate.
-+ */
-+ prev = bfqq->next_rq;
-+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
-+ BUG_ON(next_rq == NULL);
-+ bfqq->next_rq = next_rq;
-+
-+ /*
-+ * Adjust priority tree position, if next_rq changes.
-+ */
-+ if (prev != bfqq->next_rq)
-+ bfq_rq_pos_tree_add(bfqd, bfqq);
-+
-+ if (!bfq_bfqq_busy(bfqq)) {
-+ bool soft_rt,
-+ idle_for_long_time = time_is_before_jiffies(
-+ bfqq->budget_timeout +
-+ bfqd->bfq_wr_min_idle_time);
-+
-+ if (bfq_bfqq_sync(bfqq)) {
-+ bool already_in_burst =
-+ !hlist_unhashed(&bfqq->burst_list_node) ||
-+ bfq_bfqq_in_large_burst(bfqq);
-+ bfq_handle_burst(bfqd, bfqq, idle_for_long_time);
-+ /*
-+ * If bfqq was not already in the current burst,
-+ * then, at this point, bfqq either has been
-+ * added to the current burst or has caused the
-+ * current burst to terminate. In particular, in
-+ * the second case, bfqq has become the first
-+ * queue in a possible new burst.
-+ * In both cases last_ins_in_burst needs to be
-+ * moved forward.
-+ */
-+ if (!already_in_burst)
-+ bfqd->last_ins_in_burst = jiffies;
-+ }
-+
-+ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
-+ !bfq_bfqq_in_large_burst(bfqq) &&
-+ time_is_before_jiffies(bfqq->soft_rt_next_start);
-+ interactive = !bfq_bfqq_in_large_burst(bfqq) &&
-+ idle_for_long_time;
-+ entity->budget = max_t(unsigned long, bfqq->max_budget,
-+ bfq_serv_to_charge(next_rq, bfqq));
-+
-+ if (!bfq_bfqq_IO_bound(bfqq)) {
-+ if (time_before(jiffies,
-+ RQ_BIC(rq)->ttime.last_end_request +
-+ bfqd->bfq_slice_idle)) {
-+ bfqq->requests_within_timer++;
-+ if (bfqq->requests_within_timer >=
-+ bfqd->bfq_requests_within_timer)
-+ bfq_mark_bfqq_IO_bound(bfqq);
-+ } else
-+ bfqq->requests_within_timer = 0;
-+ }
-+
-+ if (!bfqd->low_latency)
-+ goto add_bfqq_busy;
-+
-+ /*
-+ * If the queue is not being boosted and has been idle
-+ * for enough time, start a weight-raising period
-+ */
-+ if (old_wr_coeff == 1 && (interactive || soft_rt)) {
-+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
-+ if (interactive)
-+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
-+ else
-+ bfqq->wr_cur_max_time =
-+ bfqd->bfq_wr_rt_max_time;
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "wrais starting at %lu, rais_max_time %u",
-+ jiffies,
-+ jiffies_to_msecs(bfqq->wr_cur_max_time));
-+ } else if (old_wr_coeff > 1) {
-+ if (interactive)
-+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
-+ else if (bfq_bfqq_in_large_burst(bfqq) ||
-+ (bfqq->wr_cur_max_time ==
-+ bfqd->bfq_wr_rt_max_time &&
-+ !soft_rt)) {
-+ bfqq->wr_coeff = 1;
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "wrais ending at %lu, rais_max_time %u",
-+ jiffies,
-+ jiffies_to_msecs(bfqq->
-+ wr_cur_max_time));
-+ } else if (time_before(
-+ bfqq->last_wr_start_finish +
-+ bfqq->wr_cur_max_time,
-+ jiffies +
-+ bfqd->bfq_wr_rt_max_time) &&
-+ soft_rt) {
-+ /*
-+ *
-+ * The remaining weight-raising time is lower
-+ * than bfqd->bfq_wr_rt_max_time, which
-+ * means that the application is enjoying
-+ * weight raising either because deemed soft-
-+ * rt in the near past, or because deemed
-+ * interactive a long ago. In both cases,
-+ * resetting now the current remaining weight-
-+ * raising time for the application to the
-+ * weight-raising duration for soft rt
-+ * applications would not cause any latency
-+ * increase for the application (as the new
-+ * duration would be higher than the remaining
-+ * time).
-+ *
-+ * In addition, the application is now meeting
-+ * the requirements for being deemed soft rt.
-+ * In the end we can correctly and safely
-+ * (re)charge the weight-raising duration for
-+ * the application with the weight-raising
-+ * duration for soft rt applications.
-+ *
-+ * In particular, doing this recharge now, i.e.,
-+ * before the weight-raising period for the
-+ * application finishes, reduces the probability
-+ * of the following negative scenario:
-+ * 1) the weight of a soft rt application is
-+ * raised at startup (as for any newly
-+ * created application),
-+ * 2) since the application is not interactive,
-+ * at a certain time weight-raising is
-+ * stopped for the application,
-+ * 3) at that time the application happens to
-+ * still have pending requests, and hence
-+ * is destined to not have a chance to be
-+ * deemed soft rt before these requests are
-+ * completed (see the comments to the
-+ * function bfq_bfqq_softrt_next_start()
-+ * for details on soft rt detection),
-+ * 4) these pending requests experience a high
-+ * latency because the application is not
-+ * weight-raised while they are pending.
-+ */
-+ bfqq->last_wr_start_finish = jiffies;
-+ bfqq->wr_cur_max_time =
-+ bfqd->bfq_wr_rt_max_time;
-+ }
-+ }
-+ if (old_wr_coeff != bfqq->wr_coeff)
-+ entity->ioprio_changed = 1;
-+add_bfqq_busy:
-+ bfqq->last_idle_bklogged = jiffies;
-+ bfqq->service_from_backlogged = 0;
-+ bfq_clear_bfqq_softrt_update(bfqq);
-+ bfq_add_bfqq_busy(bfqd, bfqq);
-+ } else {
-+ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
-+ time_is_before_jiffies(
-+ bfqq->last_wr_start_finish +
-+ bfqd->bfq_wr_min_inter_arr_async)) {
-+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
-+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
-+
-+ bfqd->wr_busy_queues++;
-+ entity->ioprio_changed = 1;
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "non-idle wrais starting at %lu, rais_max_time %u",
-+ jiffies,
-+ jiffies_to_msecs(bfqq->wr_cur_max_time));
-+ }
-+ if (prev != bfqq->next_rq)
-+ bfq_updated_next_req(bfqd, bfqq);
-+ }
-+
-+ if (bfqd->low_latency &&
-+ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
-+ bfqq->last_wr_start_finish = jiffies;
-+}
-+
-+static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
-+ struct bio *bio)
-+{
-+ struct task_struct *tsk = current;
-+ struct bfq_io_cq *bic;
-+ struct bfq_queue *bfqq;
-+
-+ bic = bfq_bic_lookup(bfqd, tsk->io_context);
-+ if (bic == NULL)
-+ return NULL;
-+
-+ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
-+ if (bfqq != NULL)
-+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
-+
-+ return NULL;
-+}
-+
-+static void bfq_activate_request(struct request_queue *q, struct request *rq)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+
-+ bfqd->rq_in_driver++;
-+ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
-+ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
-+ (long long unsigned)bfqd->last_position);
-+}
-+
-+static inline void bfq_deactivate_request(struct request_queue *q,
-+ struct request *rq)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+
-+ BUG_ON(bfqd->rq_in_driver == 0);
-+ bfqd->rq_in_driver--;
-+}
-+
-+static void bfq_remove_request(struct request *rq)
-+{
-+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
-+ struct bfq_data *bfqd = bfqq->bfqd;
-+ const int sync = rq_is_sync(rq);
-+
-+ if (bfqq->next_rq == rq) {
-+ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
-+ bfq_updated_next_req(bfqd, bfqq);
-+ }
-+
-+ list_del_init(&rq->queuelist);
-+ BUG_ON(bfqq->queued[sync] == 0);
-+ bfqq->queued[sync]--;
-+ bfqd->queued--;
-+ elv_rb_del(&bfqq->sort_list, rq);
-+
-+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
-+ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue)
-+ bfq_del_bfqq_busy(bfqd, bfqq, 1);
-+ /*
-+ * Remove queue from request-position tree as it is empty.
-+ */
-+ if (bfqq->pos_root != NULL) {
-+ rb_erase(&bfqq->pos_node, bfqq->pos_root);
-+ bfqq->pos_root = NULL;
-+ }
-+ }
-+
-+ if (rq->cmd_flags & REQ_META) {
-+ BUG_ON(bfqq->meta_pending == 0);
-+ bfqq->meta_pending--;
-+ }
-+}
-+
-+static int bfq_merge(struct request_queue *q, struct request **req,
-+ struct bio *bio)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+ struct request *__rq;
-+
-+ __rq = bfq_find_rq_fmerge(bfqd, bio);
-+ if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
-+ *req = __rq;
-+ return ELEVATOR_FRONT_MERGE;
-+ }
-+
-+ return ELEVATOR_NO_MERGE;
-+}
-+
-+static void bfq_merged_request(struct request_queue *q, struct request *req,
-+ int type)
-+{
-+ if (type == ELEVATOR_FRONT_MERGE &&
-+ rb_prev(&req->rb_node) &&
-+ blk_rq_pos(req) <
-+ blk_rq_pos(container_of(rb_prev(&req->rb_node),
-+ struct request, rb_node))) {
-+ struct bfq_queue *bfqq = RQ_BFQQ(req);
-+ struct bfq_data *bfqd = bfqq->bfqd;
-+ struct request *prev, *next_rq;
-+
-+ /* Reposition request in its sort_list */
-+ elv_rb_del(&bfqq->sort_list, req);
-+ elv_rb_add(&bfqq->sort_list, req);
-+ /* Choose next request to be served for bfqq */
-+ prev = bfqq->next_rq;
-+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
-+ bfqd->last_position);
-+ BUG_ON(next_rq == NULL);
-+ bfqq->next_rq = next_rq;
-+ /*
-+ * If next_rq changes, update both the queue's budget to
-+ * fit the new request and the queue's position in its
-+ * rq_pos_tree.
-+ */
-+ if (prev != bfqq->next_rq) {
-+ bfq_updated_next_req(bfqd, bfqq);
-+ bfq_rq_pos_tree_add(bfqd, bfqq);
-+ }
-+ }
-+}
-+
-+static void bfq_merged_requests(struct request_queue *q, struct request *rq,
-+ struct request *next)
-+{
-+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
-+
-+ /*
-+ * Reposition in fifo if next is older than rq.
-+ */
-+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
-+ time_before(next->fifo_time, rq->fifo_time)) {
-+ list_move(&rq->queuelist, &next->queuelist);
-+ rq->fifo_time = next->fifo_time;
-+ }
-+
-+ if (bfqq->next_rq == next)
-+ bfqq->next_rq = rq;
-+
-+ bfq_remove_request(next);
-+}
-+
-+/* Must be called with bfqq != NULL */
-+static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
-+{
-+ BUG_ON(bfqq == NULL);
-+ if (bfq_bfqq_busy(bfqq))
-+ bfqq->bfqd->wr_busy_queues--;
-+ bfqq->wr_coeff = 1;
-+ bfqq->wr_cur_max_time = 0;
-+ /* Trigger a weight change on the next activation of the queue */
-+ bfqq->entity.ioprio_changed = 1;
-+}
-+
-+static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
-+ struct bfq_group *bfqg)
-+{
-+ int i, j;
-+
-+ for (i = 0; i < 2; i++)
-+ for (j = 0; j < IOPRIO_BE_NR; j++)
-+ if (bfqg->async_bfqq[i][j] != NULL)
-+ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
-+ if (bfqg->async_idle_bfqq != NULL)
-+ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
-+}
-+
-+static void bfq_end_wr(struct bfq_data *bfqd)
-+{
-+ struct bfq_queue *bfqq;
-+
-+ spin_lock_irq(bfqd->queue->queue_lock);
-+
-+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
-+ bfq_bfqq_end_wr(bfqq);
-+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
-+ bfq_bfqq_end_wr(bfqq);
-+ bfq_end_wr_async(bfqd);
-+
-+ spin_unlock_irq(bfqd->queue->queue_lock);
-+}
-+
-+static int bfq_allow_merge(struct request_queue *q, struct request *rq,
-+ struct bio *bio)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+ struct bfq_io_cq *bic;
-+ struct bfq_queue *bfqq;
-+
-+ /*
-+ * Disallow merge of a sync bio into an async request.
-+ */
-+ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
-+ return 0;
-+
-+ /*
-+ * Lookup the bfqq that this bio will be queued with. Allow
-+ * merge only if rq is queued there.
-+ * Queue lock is held here.
-+ */
-+ bic = bfq_bic_lookup(bfqd, current->io_context);
-+ if (bic == NULL)
-+ return 0;
-+
-+ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
-+ return bfqq == RQ_BFQQ(rq);
-+}
-+
-+static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ if (bfqq != NULL) {
-+ bfq_mark_bfqq_must_alloc(bfqq);
-+ bfq_mark_bfqq_budget_new(bfqq);
-+ bfq_clear_bfqq_fifo_expire(bfqq);
-+
-+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
-+
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "set_in_service_queue, cur-budget = %lu",
-+ bfqq->entity.budget);
-+ }
-+
-+ bfqd->in_service_queue = bfqq;
-+}
-+
-+/*
-+ * Get and set a new queue for service.
-+ */
-+static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ if (!bfqq)
-+ bfqq = bfq_get_next_queue(bfqd);
-+ else
-+ bfq_get_next_queue_forced(bfqd, bfqq);
-+
-+ __bfq_set_in_service_queue(bfqd, bfqq);
-+ return bfqq;
-+}
-+
-+static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
-+ struct request *rq)
-+{
-+ if (blk_rq_pos(rq) >= bfqd->last_position)
-+ return blk_rq_pos(rq) - bfqd->last_position;
-+ else
-+ return bfqd->last_position - blk_rq_pos(rq);
-+}
-+
-+/*
-+ * Return true if bfqq has no request pending and rq is close enough to
-+ * bfqd->last_position, or if rq is closer to bfqd->last_position than
-+ * bfqq->next_rq
-+ */
-+static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
-+{
-+ return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
-+}
-+
-+static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
-+{
-+ struct rb_root *root = &bfqd->rq_pos_tree;
-+ struct rb_node *parent, *node;
-+ struct bfq_queue *__bfqq;
-+ sector_t sector = bfqd->last_position;
-+
-+ if (RB_EMPTY_ROOT(root))
-+ return NULL;
-+
-+ /*
-+ * First, if we find a request starting at the end of the last
-+ * request, choose it.
-+ */
-+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
-+ if (__bfqq != NULL)
-+ return __bfqq;
-+
-+ /*
-+ * If the exact sector wasn't found, the parent of the NULL leaf
-+ * will contain the closest sector (rq_pos_tree sorted by
-+ * next_request position).
-+ */
-+ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
-+ if (bfq_rq_close(bfqd, __bfqq->next_rq))
-+ return __bfqq;
-+
-+ if (blk_rq_pos(__bfqq->next_rq) < sector)
-+ node = rb_next(&__bfqq->pos_node);
-+ else
-+ node = rb_prev(&__bfqq->pos_node);
-+ if (node == NULL)
-+ return NULL;
-+
-+ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
-+ if (bfq_rq_close(bfqd, __bfqq->next_rq))
-+ return __bfqq;
-+
-+ return NULL;
-+}
-+
-+/*
-+ * bfqd - obvious
-+ * cur_bfqq - passed in so that we don't decide that the current queue
-+ * is closely cooperating with itself.
-+ *
-+ * We are assuming that cur_bfqq has dispatched at least one request,
-+ * and that bfqd->last_position reflects a position on the disk associated
-+ * with the I/O issued by cur_bfqq.
-+ */
-+static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
-+ struct bfq_queue *cur_bfqq)
-+{
-+ struct bfq_queue *bfqq;
-+
-+ if (bfq_class_idle(cur_bfqq))
-+ return NULL;
-+ if (!bfq_bfqq_sync(cur_bfqq))
-+ return NULL;
-+ if (BFQQ_SEEKY(cur_bfqq))
-+ return NULL;
-+
-+ /* If device has only one backlogged bfq_queue, don't search. */
-+ if (bfqd->busy_queues == 1)
-+ return NULL;
-+
-+ /*
-+ * We should notice if some of the queues are cooperating, e.g.
-+ * working closely on the same area of the disk. In that case,
-+ * we can group them together and don't waste time idling.
-+ */
-+ bfqq = bfqq_close(bfqd);
-+ if (bfqq == NULL || bfqq == cur_bfqq)
-+ return NULL;
-+
-+ /*
-+ * Do not merge queues from different bfq_groups.
-+ */
-+ if (bfqq->entity.parent != cur_bfqq->entity.parent)
-+ return NULL;
-+
-+ /*
-+ * It only makes sense to merge sync queues.
-+ */
-+ if (!bfq_bfqq_sync(bfqq))
-+ return NULL;
-+ if (BFQQ_SEEKY(bfqq))
-+ return NULL;
-+
-+ /*
-+ * Do not merge queues of different priority classes.
-+ */
-+ if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
-+ return NULL;
-+
-+ return bfqq;
-+}
-+
-+/*
-+ * If enough samples have been computed, return the current max budget
-+ * stored in bfqd, which is dynamically updated according to the
-+ * estimated disk peak rate; otherwise return the default max budget
-+ */
-+static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
-+{
-+ if (bfqd->budgets_assigned < 194)
-+ return bfq_default_max_budget;
-+ else
-+ return bfqd->bfq_max_budget;
-+}
-+
-+/*
-+ * Return min budget, which is a fraction of the current or default
-+ * max budget (trying with 1/32)
-+ */
-+static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
-+{
-+ if (bfqd->budgets_assigned < 194)
-+ return bfq_default_max_budget / 32;
-+ else
-+ return bfqd->bfq_max_budget / 32;
-+}
-+
-+static void bfq_arm_slice_timer(struct bfq_data *bfqd)
-+{
-+ struct bfq_queue *bfqq = bfqd->in_service_queue;
-+ struct bfq_io_cq *bic;
-+ unsigned long sl;
-+
-+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
-+
-+ /* Processes have exited, don't wait. */
-+ bic = bfqd->in_service_bic;
-+ if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
-+ return;
-+
-+ bfq_mark_bfqq_wait_request(bfqq);
-+
-+ /*
-+ * We don't want to idle for seeks, but we do want to allow
-+ * fair distribution of slice time for a process doing back-to-back
-+ * seeks. So allow a little bit of time for him to submit a new rq.
-+ *
-+ * To prevent processes with (partly) seeky workloads from
-+ * being too ill-treated, grant them a small fraction of the
-+ * assigned budget before reducing the waiting time to
-+ * BFQ_MIN_TT. This happened to help reduce latency.
-+ */
-+ sl = bfqd->bfq_slice_idle;
-+ /*
-+ * Unless the queue is being weight-raised, grant only minimum idle
-+ * time if the queue either has been seeky for long enough or has
-+ * already proved to be constantly seeky.
-+ */
-+ if (bfq_sample_valid(bfqq->seek_samples) &&
-+ ((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
-+ bfq_max_budget(bfqq->bfqd) / 8) ||
-+ bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1)
-+ sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
-+ else if (bfqq->wr_coeff > 1)
-+ sl = sl * 3;
-+ bfqd->last_idling_start = ktime_get();
-+ mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
-+ bfq_log(bfqd, "arm idle: %u/%u ms",
-+ jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
-+}
-+
-+/*
-+ * Set the maximum time for the in-service queue to consume its
-+ * budget. This prevents seeky processes from lowering the disk
-+ * throughput (always guaranteed with a time slice scheme as in CFQ).
-+ */
-+static void bfq_set_budget_timeout(struct bfq_data *bfqd)
-+{
-+ struct bfq_queue *bfqq = bfqd->in_service_queue;
-+ unsigned int timeout_coeff;
-+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
-+ timeout_coeff = 1;
-+ else
-+ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
-+
-+ bfqd->last_budget_start = ktime_get();
-+
-+ bfq_clear_bfqq_budget_new(bfqq);
-+ bfqq->budget_timeout = jiffies +
-+ bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
-+
-+ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
-+ jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
-+ timeout_coeff));
-+}
-+
-+/*
-+ * Move request from internal lists to the request queue dispatch list.
-+ */
-+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
-+
-+ /*
-+ * For consistency, the next instruction should have been executed
-+ * after removing the request from the queue and dispatching it.
-+ * We execute instead this instruction before bfq_remove_request()
-+ * (and hence introduce a temporary inconsistency), for efficiency.
-+ * In fact, in a forced_dispatch, this prevents two counters related
-+ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
-+ * is not in service, and then to be incremented again after
-+ * incrementing bfqq->dispatched.
-+ */
-+ bfqq->dispatched++;
-+ bfq_remove_request(rq);
-+ elv_dispatch_sort(q, rq);
-+
-+ if (bfq_bfqq_sync(bfqq))
-+ bfqd->sync_flight++;
-+}
-+
-+/*
-+ * Return expired entry, or NULL to just start from scratch in rbtree.
-+ */
-+static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
-+{
-+ struct request *rq = NULL;
-+
-+ if (bfq_bfqq_fifo_expire(bfqq))
-+ return NULL;
-+
-+ bfq_mark_bfqq_fifo_expire(bfqq);
-+
-+ if (list_empty(&bfqq->fifo))
-+ return NULL;
-+
-+ rq = rq_entry_fifo(bfqq->fifo.next);
-+
-+ if (time_before(jiffies, rq->fifo_time))
-+ return NULL;
-+
-+ return rq;
-+}
-+
-+/* Must be called with the queue_lock held. */
-+static int bfqq_process_refs(struct bfq_queue *bfqq)
-+{
-+ int process_refs, io_refs;
-+
-+ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
-+ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
-+ BUG_ON(process_refs < 0);
-+ return process_refs;
-+}
-+
-+static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
-+{
-+ int process_refs, new_process_refs;
-+ struct bfq_queue *__bfqq;
-+
-+ /*
-+ * If there are no process references on the new_bfqq, then it is
-+ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
-+ * may have dropped their last reference (not just their last process
-+ * reference).
-+ */
-+ if (!bfqq_process_refs(new_bfqq))
-+ return;
-+
-+ /* Avoid a circular list and skip interim queue merges. */
-+ while ((__bfqq = new_bfqq->new_bfqq)) {
-+ if (__bfqq == bfqq)
-+ return;
-+ new_bfqq = __bfqq;
-+ }
-+
-+ process_refs = bfqq_process_refs(bfqq);
-+ new_process_refs = bfqq_process_refs(new_bfqq);
-+ /*
-+ * If the process for the bfqq has gone away, there is no
-+ * sense in merging the queues.
-+ */
-+ if (process_refs == 0 || new_process_refs == 0)
-+ return;
-+
-+ /*
-+ * Merge in the direction of the lesser amount of work.
-+ */
-+ if (new_process_refs >= process_refs) {
-+ bfqq->new_bfqq = new_bfqq;
-+ atomic_add(process_refs, &new_bfqq->ref);
-+ } else {
-+ new_bfqq->new_bfqq = bfqq;
-+ atomic_add(new_process_refs, &bfqq->ref);
-+ }
-+ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
-+ new_bfqq->pid);
-+}
-+
-+static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
-+{
-+ struct bfq_entity *entity = &bfqq->entity;
-+ return entity->budget - entity->service;
-+}
-+
-+static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-+{
-+ BUG_ON(bfqq != bfqd->in_service_queue);
-+
-+ __bfq_bfqd_reset_in_service(bfqd);
-+
-+ /*
-+ * If this bfqq is shared between multiple processes, check
-+ * to make sure that those processes are still issuing I/Os
-+ * within the mean seek distance. If not, it may be time to
-+ * break the queues apart again.
-+ */
-+ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
-+ bfq_mark_bfqq_split_coop(bfqq);
-+
-+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
-+ /*
-+ * Overloading budget_timeout field to store the time
-+ * at which the queue remains with no backlog; used by
-+ * the weight-raising mechanism.
-+ */
-+ bfqq->budget_timeout = jiffies;
-+ bfq_del_bfqq_busy(bfqd, bfqq, 1);
-+ } else {
-+ bfq_activate_bfqq(bfqd, bfqq);
-+ /*
-+ * Resort priority tree of potential close cooperators.
-+ */
-+ bfq_rq_pos_tree_add(bfqd, bfqq);
-+ }
-+}
-+
-+/**
-+ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
-+ * @bfqd: device data.
-+ * @bfqq: queue to update.
-+ * @reason: reason for expiration.
-+ *
-+ * Handle the feedback on @bfqq budget. See the body for detailed
-+ * comments.
-+ */
-+static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq,
-+ enum bfqq_expiration reason)
-+{
-+ struct request *next_rq;
-+ unsigned long budget, min_budget;
-+
-+ budget = bfqq->max_budget;
-+ min_budget = bfq_min_budget(bfqd);
-+
-+ BUG_ON(bfqq != bfqd->in_service_queue);
-+
-+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
-+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
-+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
-+ budget, bfq_min_budget(bfqd));
-+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
-+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
-+
-+ if (bfq_bfqq_sync(bfqq)) {
-+ switch (reason) {
-+ /*
-+ * Caveat: in all the following cases we trade latency
-+ * for throughput.
-+ */
-+ case BFQ_BFQQ_TOO_IDLE:
-+ /*
-+ * This is the only case where we may reduce
-+ * the budget: if there is no request of the
-+ * process still waiting for completion, then
-+ * we assume (tentatively) that the timer has
-+ * expired because the batch of requests of
-+ * the process could have been served with a
-+ * smaller budget. Hence, betting that
-+ * process will behave in the same way when it
-+ * becomes backlogged again, we reduce its
-+ * next budget. As long as we guess right,
-+ * this budget cut reduces the latency
-+ * experienced by the process.
-+ *
-+ * However, if there are still outstanding
-+ * requests, then the process may have not yet
-+ * issued its next request just because it is
-+ * still waiting for the completion of some of
-+ * the still outstanding ones. So in this
-+ * subcase we do not reduce its budget, on the
-+ * contrary we increase it to possibly boost
-+ * the throughput, as discussed in the
-+ * comments to the BUDGET_TIMEOUT case.
-+ */
-+ if (bfqq->dispatched > 0) /* still outstanding reqs */
-+ budget = min(budget * 2, bfqd->bfq_max_budget);
-+ else {
-+ if (budget > 5 * min_budget)
-+ budget -= 4 * min_budget;
-+ else
-+ budget = min_budget;
-+ }
-+ break;
-+ case BFQ_BFQQ_BUDGET_TIMEOUT:
-+ /*
-+ * We double the budget here because: 1) it
-+ * gives the chance to boost the throughput if
-+ * this is not a seeky process (which may have
-+ * bumped into this timeout because of, e.g.,
-+ * ZBR), 2) together with charge_full_budget
-+ * it helps give seeky processes higher
-+ * timestamps, and hence be served less
-+ * frequently.
-+ */
-+ budget = min(budget * 2, bfqd->bfq_max_budget);
-+ break;
-+ case BFQ_BFQQ_BUDGET_EXHAUSTED:
-+ /*
-+ * The process still has backlog, and did not
-+ * let either the budget timeout or the disk
-+ * idling timeout expire. Hence it is not
-+ * seeky, has a short thinktime and may be
-+ * happy with a higher budget too. So
-+ * definitely increase the budget of this good
-+ * candidate to boost the disk throughput.
-+ */
-+ budget = min(budget * 4, bfqd->bfq_max_budget);
-+ break;
-+ case BFQ_BFQQ_NO_MORE_REQUESTS:
-+ /*
-+ * Leave the budget unchanged.
-+ */
-+ default:
-+ return;
-+ }
-+ } else /* async queue */
-+ /* async queues get always the maximum possible budget
-+ * (their ability to dispatch is limited by
-+ * @bfqd->bfq_max_budget_async_rq).
-+ */
-+ budget = bfqd->bfq_max_budget;
-+
-+ bfqq->max_budget = budget;
-+
-+ if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
-+ bfqq->max_budget > bfqd->bfq_max_budget)
-+ bfqq->max_budget = bfqd->bfq_max_budget;
-+
-+ /*
-+ * Make sure that we have enough budget for the next request.
-+ * Since the finish time of the bfqq must be kept in sync with
-+ * the budget, be sure to call __bfq_bfqq_expire() after the
-+ * update.
-+ */
-+ next_rq = bfqq->next_rq;
-+ if (next_rq != NULL)
-+ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
-+ bfq_serv_to_charge(next_rq, bfqq));
-+ else
-+ bfqq->entity.budget = bfqq->max_budget;
-+
-+ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
-+ next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
-+ bfqq->entity.budget);
-+}
-+
-+static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
-+{
-+ unsigned long max_budget;
-+
-+ /*
-+ * The max_budget calculated when autotuning is equal to the
-+ * amount of sectors transfered in timeout_sync at the
-+ * estimated peak rate.
-+ */
-+ max_budget = (unsigned long)(peak_rate * 1000 *
-+ timeout >> BFQ_RATE_SHIFT);
-+
-+ return max_budget;
-+}
-+
-+/*
-+ * In addition to updating the peak rate, checks whether the process
-+ * is "slow", and returns 1 if so. This slow flag is used, in addition
-+ * to the budget timeout, to reduce the amount of service provided to
-+ * seeky processes, and hence reduce their chances to lower the
-+ * throughput. See the code for more details.
-+ */
-+static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+ int compensate, enum bfqq_expiration reason)
-+{
-+ u64 bw, usecs, expected, timeout;
-+ ktime_t delta;
-+ int update = 0;
-+
-+ if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
-+ return 0;
-+
-+ if (compensate)
-+ delta = bfqd->last_idling_start;
-+ else
-+ delta = ktime_get();
-+ delta = ktime_sub(delta, bfqd->last_budget_start);
-+ usecs = ktime_to_us(delta);
-+
-+ /* Don't trust short/unrealistic values. */
-+ if (usecs < 100 || usecs >= LONG_MAX)
-+ return 0;
-+
-+ /*
-+ * Calculate the bandwidth for the last slice. We use a 64 bit
-+ * value to store the peak rate, in sectors per usec in fixed
-+ * point math. We do so to have enough precision in the estimate
-+ * and to avoid overflows.
-+ */
-+ bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
-+ do_div(bw, (unsigned long)usecs);
-+
-+ timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
-+
-+ /*
-+ * Use only long (> 20ms) intervals to filter out spikes for
-+ * the peak rate estimation.
-+ */
-+ if (usecs > 20000) {
-+ if (bw > bfqd->peak_rate ||
-+ (!BFQQ_SEEKY(bfqq) &&
-+ reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
-+ bfq_log(bfqd, "measured bw =%llu", bw);
-+ /*
-+ * To smooth oscillations use a low-pass filter with
-+ * alpha=7/8, i.e.,
-+ * new_rate = (7/8) * old_rate + (1/8) * bw
-+ */
-+ do_div(bw, 8);
-+ if (bw == 0)
-+ return 0;
-+ bfqd->peak_rate *= 7;
-+ do_div(bfqd->peak_rate, 8);
-+ bfqd->peak_rate += bw;
-+ update = 1;
-+ bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
-+ }
-+
-+ update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
-+
-+ if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
-+ bfqd->peak_rate_samples++;
-+
-+ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
-+ update) {
-+ int dev_type = blk_queue_nonrot(bfqd->queue);
-+ if (bfqd->bfq_user_max_budget == 0) {
-+ bfqd->bfq_max_budget =
-+ bfq_calc_max_budget(bfqd->peak_rate,
-+ timeout);
-+ bfq_log(bfqd, "new max_budget=%lu",
-+ bfqd->bfq_max_budget);
-+ }
-+ if (bfqd->device_speed == BFQ_BFQD_FAST &&
-+ bfqd->peak_rate < device_speed_thresh[dev_type]) {
-+ bfqd->device_speed = BFQ_BFQD_SLOW;
-+ bfqd->RT_prod = R_slow[dev_type] *
-+ T_slow[dev_type];
-+ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
-+ bfqd->peak_rate > device_speed_thresh[dev_type]) {
-+ bfqd->device_speed = BFQ_BFQD_FAST;
-+ bfqd->RT_prod = R_fast[dev_type] *
-+ T_fast[dev_type];
-+ }
-+ }
-+ }
-+
-+ /*
-+ * If the process has been served for a too short time
-+ * interval to let its possible sequential accesses prevail on
-+ * the initial seek time needed to move the disk head on the
-+ * first sector it requested, then give the process a chance
-+ * and for the moment return false.
-+ */
-+ if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
-+ return 0;
-+
-+ /*
-+ * A process is considered ``slow'' (i.e., seeky, so that we
-+ * cannot treat it fairly in the service domain, as it would
-+ * slow down too much the other processes) if, when a slice
-+ * ends for whatever reason, it has received service at a
-+ * rate that would not be high enough to complete the budget
-+ * before the budget timeout expiration.
-+ */
-+ expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
-+
-+ /*
-+ * Caveat: processes doing IO in the slower disk zones will
-+ * tend to be slow(er) even if not seeky. And the estimated
-+ * peak rate will actually be an average over the disk
-+ * surface. Hence, to not be too harsh with unlucky processes,
-+ * we keep a budget/3 margin of safety before declaring a
-+ * process slow.
-+ */
-+ return expected > (4 * bfqq->entity.budget) / 3;
-+}
-+
-+/*
-+ * To be deemed as soft real-time, an application must meet two
-+ * requirements. First, the application must not require an average
-+ * bandwidth higher than the approximate bandwidth required to playback or
-+ * record a compressed high-definition video.
-+ * The next function is invoked on the completion of the last request of a
-+ * batch, to compute the next-start time instant, soft_rt_next_start, such
-+ * that, if the next request of the application does not arrive before
-+ * soft_rt_next_start, then the above requirement on the bandwidth is met.
-+ *
-+ * The second requirement is that the request pattern of the application is
-+ * isochronous, i.e., that, after issuing a request or a batch of requests,
-+ * the application stops issuing new requests until all its pending requests
-+ * have been completed. After that, the application may issue a new batch,
-+ * and so on.
-+ * For this reason the next function is invoked to compute
-+ * soft_rt_next_start only for applications that meet this requirement,
-+ * whereas soft_rt_next_start is set to infinity for applications that do
-+ * not.
-+ *
-+ * Unfortunately, even a greedy application may happen to behave in an
-+ * isochronous way if the CPU load is high. In fact, the application may
-+ * stop issuing requests while the CPUs are busy serving other processes,
-+ * then restart, then stop again for a while, and so on. In addition, if
-+ * the disk achieves a low enough throughput with the request pattern
-+ * issued by the application (e.g., because the request pattern is random
-+ * and/or the device is slow), then the application may meet the above
-+ * bandwidth requirement too. To prevent such a greedy application to be
-+ * deemed as soft real-time, a further rule is used in the computation of
-+ * soft_rt_next_start: soft_rt_next_start must be higher than the current
-+ * time plus the maximum time for which the arrival of a request is waited
-+ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
-+ * This filters out greedy applications, as the latter issue instead their
-+ * next request as soon as possible after the last one has been completed
-+ * (in contrast, when a batch of requests is completed, a soft real-time
-+ * application spends some time processing data).
-+ *
-+ * Unfortunately, the last filter may easily generate false positives if
-+ * only bfqd->bfq_slice_idle is used as a reference time interval and one
-+ * or both the following cases occur:
-+ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
-+ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
-+ * HZ=100.
-+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
-+ * for a while, then suddenly 'jump' by several units to recover the lost
-+ * increments. This seems to happen, e.g., inside virtual machines.
-+ * To address this issue, we do not use as a reference time interval just
-+ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
-+ * particular we add the minimum number of jiffies for which the filter
-+ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
-+ * machines.
-+ */
-+static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ return max(bfqq->last_idle_bklogged +
-+ HZ * bfqq->service_from_backlogged /
-+ bfqd->bfq_wr_max_softrt_rate,
-+ jiffies + bfqq->bfqd->bfq_slice_idle + 4);
-+}
-+
-+/*
-+ * Return the largest-possible time instant such that, for as long as possible,
-+ * the current time will be lower than this time instant according to the macro
-+ * time_is_before_jiffies().
-+ */
-+static inline unsigned long bfq_infinity_from_now(unsigned long now)
-+{
-+ return now + ULONG_MAX / 2;
-+}
-+
-+/**
-+ * bfq_bfqq_expire - expire a queue.
-+ * @bfqd: device owning the queue.
-+ * @bfqq: the queue to expire.
-+ * @compensate: if true, compensate for the time spent idling.
-+ * @reason: the reason causing the expiration.
-+ *
-+ *
-+ * If the process associated to the queue is slow (i.e., seeky), or in
-+ * case of budget timeout, or, finally, if it is async, we
-+ * artificially charge it an entire budget (independently of the
-+ * actual service it received). As a consequence, the queue will get
-+ * higher timestamps than the correct ones upon reactivation, and
-+ * hence it will be rescheduled as if it had received more service
-+ * than what it actually received. In the end, this class of processes
-+ * will receive less service in proportion to how slowly they consume
-+ * their budgets (and hence how seriously they tend to lower the
-+ * throughput).
-+ *
-+ * In contrast, when a queue expires because it has been idling for
-+ * too much or because it exhausted its budget, we do not touch the
-+ * amount of service it has received. Hence when the queue will be
-+ * reactivated and its timestamps updated, the latter will be in sync
-+ * with the actual service received by the queue until expiration.
-+ *
-+ * Charging a full budget to the first type of queues and the exact
-+ * service to the others has the effect of using the WF2Q+ policy to
-+ * schedule the former on a timeslice basis, without violating the
-+ * service domain guarantees of the latter.
-+ */
-+static void bfq_bfqq_expire(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq,
-+ int compensate,
-+ enum bfqq_expiration reason)
-+{
-+ int slow;
-+ BUG_ON(bfqq != bfqd->in_service_queue);
-+
-+ /* Update disk peak rate for autotuning and check whether the
-+ * process is slow (see bfq_update_peak_rate).
-+ */
-+ slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
-+
-+ /*
-+ * As above explained, 'punish' slow (i.e., seeky), timed-out
-+ * and async queues, to favor sequential sync workloads.
-+ *
-+ * Processes doing I/O in the slower disk zones will tend to be
-+ * slow(er) even if not seeky. Hence, since the estimated peak
-+ * rate is actually an average over the disk surface, these
-+ * processes may timeout just for bad luck. To avoid punishing
-+ * them we do not charge a full budget to a process that
-+ * succeeded in consuming at least 2/3 of its budget.
-+ */
-+ if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
-+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3))
-+ bfq_bfqq_charge_full_budget(bfqq);
-+
-+ bfqq->service_from_backlogged += bfqq->entity.service;
-+
-+ if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
-+ !bfq_bfqq_constantly_seeky(bfqq)) {
-+ bfq_mark_bfqq_constantly_seeky(bfqq);
-+ if (!blk_queue_nonrot(bfqd->queue))
-+ bfqd->const_seeky_busy_in_flight_queues++;
-+ }
-+
-+ if (reason == BFQ_BFQQ_TOO_IDLE &&
-+ bfqq->entity.service <= 2 * bfqq->entity.budget / 10 )
-+ bfq_clear_bfqq_IO_bound(bfqq);
-+
-+ if (bfqd->low_latency && bfqq->wr_coeff == 1)
-+ bfqq->last_wr_start_finish = jiffies;
-+
-+ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
-+ RB_EMPTY_ROOT(&bfqq->sort_list)) {
-+ /*
-+ * If we get here, and there are no outstanding requests,
-+ * then the request pattern is isochronous (see the comments
-+ * to the function bfq_bfqq_softrt_next_start()). Hence we
-+ * can compute soft_rt_next_start. If, instead, the queue
-+ * still has outstanding requests, then we have to wait
-+ * for the completion of all the outstanding requests to
-+ * discover whether the request pattern is actually
-+ * isochronous.
-+ */
-+ if (bfqq->dispatched == 0)
-+ bfqq->soft_rt_next_start =
-+ bfq_bfqq_softrt_next_start(bfqd, bfqq);
-+ else {
-+ /*
-+ * The application is still waiting for the
-+ * completion of one or more requests:
-+ * prevent it from possibly being incorrectly
-+ * deemed as soft real-time by setting its
-+ * soft_rt_next_start to infinity. In fact,
-+ * without this assignment, the application
-+ * would be incorrectly deemed as soft
-+ * real-time if:
-+ * 1) it issued a new request before the
-+ * completion of all its in-flight
-+ * requests, and
-+ * 2) at that time, its soft_rt_next_start
-+ * happened to be in the past.
-+ */
-+ bfqq->soft_rt_next_start =
-+ bfq_infinity_from_now(jiffies);
-+ /*
-+ * Schedule an update of soft_rt_next_start to when
-+ * the task may be discovered to be isochronous.
-+ */
-+ bfq_mark_bfqq_softrt_update(bfqq);
-+ }
-+ }
-+
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
-+ slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
-+
-+ /*
-+ * Increase, decrease or leave budget unchanged according to
-+ * reason.
-+ */
-+ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
-+ __bfq_bfqq_expire(bfqd, bfqq);
-+}
-+
-+/*
-+ * Budget timeout is not implemented through a dedicated timer, but
-+ * just checked on request arrivals and completions, as well as on
-+ * idle timer expirations.
-+ */
-+static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
-+{
-+ if (bfq_bfqq_budget_new(bfqq) ||
-+ time_before(jiffies, bfqq->budget_timeout))
-+ return 0;
-+ return 1;
-+}
-+
-+/*
-+ * If we expire a queue that is waiting for the arrival of a new
-+ * request, we may prevent the fictitious timestamp back-shifting that
-+ * allows the guarantees of the queue to be preserved (see [1] for
-+ * this tricky aspect). Hence we return true only if this condition
-+ * does not hold, or if the queue is slow enough to deserve only to be
-+ * kicked off for preserving a high throughput.
-+*/
-+static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
-+{
-+ bfq_log_bfqq(bfqq->bfqd, bfqq,
-+ "may_budget_timeout: wait_request %d left %d timeout %d",
-+ bfq_bfqq_wait_request(bfqq),
-+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
-+ bfq_bfqq_budget_timeout(bfqq));
-+
-+ return (!bfq_bfqq_wait_request(bfqq) ||
-+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
-+ &&
-+ bfq_bfqq_budget_timeout(bfqq);
-+}
-+
-+/*
-+ * Device idling is allowed only for the queues for which this function
-+ * returns true. For this reason, the return value of this function plays a
-+ * critical role for both throughput boosting and service guarantees. The
-+ * return value is computed through a logical expression. In this rather
-+ * long comment, we try to briefly describe all the details and motivations
-+ * behind the components of this logical expression.
-+ *
-+ * First, the expression is false if bfqq is not sync, or if: bfqq happened
-+ * to become active during a large burst of queue activations, and the
-+ * pattern of requests bfqq contains boosts the throughput if bfqq is
-+ * expired. In fact, queues that became active during a large burst benefit
-+ * only from throughput, as discussed in the comments to bfq_handle_burst.
-+ * In this respect, expiring bfqq certainly boosts the throughput on NCQ-
-+ * capable flash-based devices, whereas, on rotational devices, it boosts
-+ * the throughput only if bfqq contains random requests.
-+ *
-+ * On the opposite end, if (a) bfqq is sync, (b) the above burst-related
-+ * condition does not hold, and (c) bfqq is being weight-raised, then the
-+ * expression always evaluates to true, as device idling is instrumental
-+ * for preserving low-latency guarantees (see [1]). If, instead, conditions
-+ * (a) and (b) do hold, but (c) does not, then the expression evaluates to
-+ * true only if: (1) bfqq is I/O-bound and has a non-null idle window, and
-+ * (2) at least one of the following two conditions holds.
-+ * The first condition is that the device is not performing NCQ, because
-+ * idling the device most certainly boosts the throughput if this condition
-+ * holds and bfqq is I/O-bound and has been granted a non-null idle window.
-+ * The second compound condition is made of the logical AND of two components.
-+ *
-+ * The first component is true only if there is no weight-raised busy
-+ * queue. This guarantees that the device is not idled for a sync non-
-+ * weight-raised queue when there are busy weight-raised queues. The former
-+ * is then expired immediately if empty. Combined with the timestamping
-+ * rules of BFQ (see [1] for details), this causes sync non-weight-raised
-+ * queues to get a lower number of requests served, and hence to ask for a
-+ * lower number of requests from the request pool, before the busy weight-
-+ * raised queues get served again.
-+ *
-+ * This is beneficial for the processes associated with weight-raised
-+ * queues, when the request pool is saturated (e.g., in the presence of
-+ * write hogs). In fact, if the processes associated with the other queues
-+ * ask for requests at a lower rate, then weight-raised processes have a
-+ * higher probability to get a request from the pool immediately (or at
-+ * least soon) when they need one. Hence they have a higher probability to
-+ * actually get a fraction of the disk throughput proportional to their
-+ * high weight. This is especially true with NCQ-capable drives, which
-+ * enqueue several requests in advance and further reorder internally-
-+ * queued requests.
-+ *
-+ * In the end, mistreating non-weight-raised queues when there are busy
-+ * weight-raised queues seems to mitigate starvation problems in the
-+ * presence of heavy write workloads and NCQ, and hence to guarantee a
-+ * higher application and system responsiveness in these hostile scenarios.
-+ *
-+ * If the first component of the compound condition is instead true, i.e.,
-+ * there is no weight-raised busy queue, then the second component of the
-+ * compound condition takes into account service-guarantee and throughput
-+ * issues related to NCQ (recall that the compound condition is evaluated
-+ * only if the device is detected as supporting NCQ).
-+ *
-+ * As for service guarantees, allowing the drive to enqueue more than one
-+ * request at a time, and hence delegating de facto final scheduling
-+ * decisions to the drive's internal scheduler, causes loss of control on
-+ * the actual request service order. In this respect, when the drive is
-+ * allowed to enqueue more than one request at a time, the service
-+ * distribution enforced by the drive's internal scheduler is likely to
-+ * coincide with the desired device-throughput distribution only in the
-+ * following, perfectly symmetric, scenario:
-+ * 1) all active queues have the same weight,
-+ * 2) all active groups at the same level in the groups tree have the same
-+ * weight,
-+ * 3) all active groups at the same level in the groups tree have the same
-+ * number of children.
-+ *
-+ * Even in such a scenario, sequential I/O may still receive a preferential
-+ * treatment, but this is not likely to be a big issue with flash-based
-+ * devices, because of their non-dramatic loss of throughput with random
-+ * I/O. Things do differ with HDDs, for which additional care is taken, as
-+ * explained after completing the discussion for flash-based devices.
-+ *
-+ * Unfortunately, keeping the necessary state for evaluating exactly the
-+ * above symmetry conditions would be quite complex and time-consuming.
-+ * Therefore BFQ evaluates instead the following stronger sub-conditions,
-+ * for which it is much easier to maintain the needed state:
-+ * 1) all active queues have the same weight,
-+ * 2) all active groups have the same weight,
-+ * 3) all active groups have at most one active child each.
-+ * In particular, the last two conditions are always true if hierarchical
-+ * support and the cgroups interface are not enabled, hence no state needs
-+ * to be maintained in this case.
-+ *
-+ * According to the above considerations, the second component of the
-+ * compound condition evaluates to true if any of the above symmetry
-+ * sub-condition does not hold, or the device is not flash-based. Therefore,
-+ * if also the first component is true, then idling is allowed for a sync
-+ * queue. These are the only sub-conditions considered if the device is
-+ * flash-based, as, for such a device, it is sensible to force idling only
-+ * for service-guarantee issues. In fact, as for throughput, idling
-+ * NCQ-capable flash-based devices would not boost the throughput even
-+ * with sequential I/O; rather it would lower the throughput in proportion
-+ * to how fast the device is. In the end, (only) if all the three
-+ * sub-conditions hold and the device is flash-based, the compound
-+ * condition evaluates to false and therefore no idling is performed.
-+ *
-+ * As already said, things change with a rotational device, where idling
-+ * boosts the throughput with sequential I/O (even with NCQ). Hence, for
-+ * such a device the second component of the compound condition evaluates
-+ * to true also if the following additional sub-condition does not hold:
-+ * the queue is constantly seeky. Unfortunately, this different behavior
-+ * with respect to flash-based devices causes an additional asymmetry: if
-+ * some sync queues enjoy idling and some other sync queues do not, then
-+ * the latter get a low share of the device throughput, simply because the
-+ * former get many requests served after being set as in service, whereas
-+ * the latter do not. As a consequence, to guarantee the desired throughput
-+ * distribution, on HDDs the compound expression evaluates to true (and
-+ * hence device idling is performed) also if the following last symmetry
-+ * condition does not hold: no other queue is benefiting from idling. Also
-+ * this last condition is actually replaced with a simpler-to-maintain and
-+ * stronger condition: there is no busy queue which is not constantly seeky
-+ * (and hence may also benefit from idling).
-+ *
-+ * To sum up, when all the required symmetry and throughput-boosting
-+ * sub-conditions hold, the second component of the compound condition
-+ * evaluates to false, and hence no idling is performed. This helps to
-+ * keep the drives' internal queues full on NCQ-capable devices, and hence
-+ * to boost the throughput, without causing 'almost' any loss of service
-+ * guarantees. The 'almost' follows from the fact that, if the internal
-+ * queue of one such device is filled while all the sub-conditions hold,
-+ * but at some point in time some sub-condition stops to hold, then it may
-+ * become impossible to let requests be served in the new desired order
-+ * until all the requests already queued in the device have been served.
-+ */
-+static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
-+{
-+ struct bfq_data *bfqd = bfqq->bfqd;
-+#ifdef CONFIG_CGROUP_BFQIO
-+#define symmetric_scenario (!bfqd->active_numerous_groups && \
-+ !bfq_differentiated_weights(bfqd))
-+#else
-+#define symmetric_scenario (!bfq_differentiated_weights(bfqd))
-+#endif
-+#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
-+ bfqd->busy_in_flight_queues == \
-+ bfqd->const_seeky_busy_in_flight_queues)
-+
-+#define cond_for_expiring_in_burst (bfq_bfqq_in_large_burst(bfqq) && \
-+ bfqd->hw_tag && \
-+ (blk_queue_nonrot(bfqd->queue) || \
-+ bfq_bfqq_constantly_seeky(bfqq)))
-+
-+/*
-+ * Condition for expiring a non-weight-raised queue (and hence not idling
-+ * the device).
-+ */
-+#define cond_for_expiring_non_wr (bfqd->hw_tag && \
-+ (bfqd->wr_busy_queues > 0 || \
-+ (symmetric_scenario && \
-+ (blk_queue_nonrot(bfqd->queue) || \
-+ cond_for_seeky_on_ncq_hdd))))
-+
-+ return bfq_bfqq_sync(bfqq) &&
-+ !cond_for_expiring_in_burst &&
-+ (bfqq->wr_coeff > 1 ||
-+ (bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_idle_window(bfqq) &&
-+ !cond_for_expiring_non_wr)
-+ );
-+}
-+
-+/*
-+ * If the in-service queue is empty but sync, and the function
-+ * bfq_bfqq_must_not_expire returns true, then:
-+ * 1) the queue must remain in service and cannot be expired, and
-+ * 2) the disk must be idled to wait for the possible arrival of a new
-+ * request for the queue.
-+ * See the comments to the function bfq_bfqq_must_not_expire for the reasons
-+ * why performing device idling is the best choice to boost the throughput
-+ * and preserve service guarantees when bfq_bfqq_must_not_expire itself
-+ * returns true.
-+ */
-+static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
-+{
-+ struct bfq_data *bfqd = bfqq->bfqd;
-+
-+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
-+ bfq_bfqq_must_not_expire(bfqq);
-+}
-+
-+/*
-+ * Select a queue for service. If we have a current queue in service,
-+ * check whether to continue servicing it, or retrieve and set a new one.
-+ */
-+static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
-+{
-+ struct bfq_queue *bfqq, *new_bfqq = NULL;
-+ struct request *next_rq;
-+ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
-+
-+ bfqq = bfqd->in_service_queue;
-+ if (bfqq == NULL)
-+ goto new_queue;
-+
-+ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
-+
-+ /*
-+ * If another queue has a request waiting within our mean seek
-+ * distance, let it run. The expire code will check for close
-+ * cooperators and put the close queue at the front of the
-+ * service tree. If possible, merge the expiring queue with the
-+ * new bfqq.
-+ */
-+ new_bfqq = bfq_close_cooperator(bfqd, bfqq);
-+ if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
-+ bfq_setup_merge(bfqq, new_bfqq);
-+
-+ if (bfq_may_expire_for_budg_timeout(bfqq) &&
-+ !timer_pending(&bfqd->idle_slice_timer) &&
-+ !bfq_bfqq_must_idle(bfqq))
-+ goto expire;
-+
-+ next_rq = bfqq->next_rq;
-+ /*
-+ * If bfqq has requests queued and it has enough budget left to
-+ * serve them, keep the queue, otherwise expire it.
-+ */
-+ if (next_rq != NULL) {
-+ if (bfq_serv_to_charge(next_rq, bfqq) >
-+ bfq_bfqq_budget_left(bfqq)) {
-+ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
-+ goto expire;
-+ } else {
-+ /*
-+ * The idle timer may be pending because we may
-+ * not disable disk idling even when a new request
-+ * arrives.
-+ */
-+ if (timer_pending(&bfqd->idle_slice_timer)) {
-+ /*
-+ * If we get here: 1) at least a new request
-+ * has arrived but we have not disabled the
-+ * timer because the request was too small,
-+ * 2) then the block layer has unplugged
-+ * the device, causing the dispatch to be
-+ * invoked.
-+ *
-+ * Since the device is unplugged, now the
-+ * requests are probably large enough to
-+ * provide a reasonable throughput.
-+ * So we disable idling.
-+ */
-+ bfq_clear_bfqq_wait_request(bfqq);
-+ del_timer(&bfqd->idle_slice_timer);
-+ }
-+ if (new_bfqq == NULL)
-+ goto keep_queue;
-+ else
-+ goto expire;
-+ }
-+ }
-+
-+ /*
-+ * No requests pending. If the in-service queue still has requests
-+ * in flight (possibly waiting for a completion) or is idling for a
-+ * new request, then keep it.
-+ */
-+ if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
-+ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
-+ bfqq = NULL;
-+ goto keep_queue;
-+ } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
-+ /*
-+ * Expiring the queue because there is a close cooperator,
-+ * cancel timer.
-+ */
-+ bfq_clear_bfqq_wait_request(bfqq);
-+ del_timer(&bfqd->idle_slice_timer);
-+ }
-+
-+ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
-+expire:
-+ bfq_bfqq_expire(bfqd, bfqq, 0, reason);
-+new_queue:
-+ bfqq = bfq_set_in_service_queue(bfqd, new_bfqq);
-+ bfq_log(bfqd, "select_queue: new queue %d returned",
-+ bfqq != NULL ? bfqq->pid : 0);
-+keep_queue:
-+ return bfqq;
-+}
-+
-+static void bfq_update_wr_data(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ if (bfqq->wr_coeff > 1) { /* queue is being boosted */
-+ struct bfq_entity *entity = &bfqq->entity;
-+
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
-+ jiffies_to_msecs(jiffies -
-+ bfqq->last_wr_start_finish),
-+ jiffies_to_msecs(bfqq->wr_cur_max_time),
-+ bfqq->wr_coeff,
-+ bfqq->entity.weight, bfqq->entity.orig_weight);
-+
-+ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
-+ entity->orig_weight * bfqq->wr_coeff);
-+ if (entity->ioprio_changed)
-+ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
-+ /*
-+ * If the queue was activated in a burst, or
-+ * too much time has elapsed from the beginning
-+ * of this weight-raising, then end weight raising.
-+ */
-+ if (bfq_bfqq_in_large_burst(bfqq) ||
-+ time_is_before_jiffies(bfqq->last_wr_start_finish +
-+ bfqq->wr_cur_max_time)) {
-+ bfqq->last_wr_start_finish = jiffies;
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "wrais ending at %lu, rais_max_time %u",
-+ bfqq->last_wr_start_finish,
-+ jiffies_to_msecs(bfqq->wr_cur_max_time));
-+ bfq_bfqq_end_wr(bfqq);
-+ __bfq_entity_update_weight_prio(
-+ bfq_entity_service_tree(entity),
-+ entity);
-+ }
-+ }
-+}
-+
-+/*
-+ * Dispatch one request from bfqq, moving it to the request queue
-+ * dispatch list.
-+ */
-+static int bfq_dispatch_request(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ int dispatched = 0;
-+ struct request *rq;
-+ unsigned long service_to_charge;
-+
-+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
-+
-+ /* Follow expired path, else get first next available. */
-+ rq = bfq_check_fifo(bfqq);
-+ if (rq == NULL)
-+ rq = bfqq->next_rq;
-+ service_to_charge = bfq_serv_to_charge(rq, bfqq);
-+
-+ if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
-+ /*
-+ * This may happen if the next rq is chosen in fifo order
-+ * instead of sector order. The budget is properly
-+ * dimensioned to be always sufficient to serve the next
-+ * request only if it is chosen in sector order. The reason
-+ * is that it would be quite inefficient and little useful
-+ * to always make sure that the budget is large enough to
-+ * serve even the possible next rq in fifo order.
-+ * In fact, requests are seldom served in fifo order.
-+ *
-+ * Expire the queue for budget exhaustion, and make sure
-+ * that the next act_budget is enough to serve the next
-+ * request, even if it comes from the fifo expired path.
-+ */
-+ bfqq->next_rq = rq;
-+ /*
-+ * Since this dispatch is failed, make sure that
-+ * a new one will be performed
-+ */
-+ if (!bfqd->rq_in_driver)
-+ bfq_schedule_dispatch(bfqd);
-+ goto expire;
-+ }
-+
-+ /* Finally, insert request into driver dispatch list. */
-+ bfq_bfqq_served(bfqq, service_to_charge);
-+ bfq_dispatch_insert(bfqd->queue, rq);
-+
-+ bfq_update_wr_data(bfqd, bfqq);
-+
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "dispatched %u sec req (%llu), budg left %lu",
-+ blk_rq_sectors(rq),
-+ (long long unsigned)blk_rq_pos(rq),
-+ bfq_bfqq_budget_left(bfqq));
-+
-+ dispatched++;
-+
-+ if (bfqd->in_service_bic == NULL) {
-+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
-+ bfqd->in_service_bic = RQ_BIC(rq);
-+ }
-+
-+ if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
-+ dispatched >= bfqd->bfq_max_budget_async_rq) ||
-+ bfq_class_idle(bfqq)))
-+ goto expire;
-+
-+ return dispatched;
-+
-+expire:
-+ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
-+ return dispatched;
-+}
-+
-+static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
-+{
-+ int dispatched = 0;
-+
-+ while (bfqq->next_rq != NULL) {
-+ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
-+ dispatched++;
-+ }
-+
-+ BUG_ON(!list_empty(&bfqq->fifo));
-+ return dispatched;
-+}
-+
-+/*
-+ * Drain our current requests.
-+ * Used for barriers and when switching io schedulers on-the-fly.
-+ */
-+static int bfq_forced_dispatch(struct bfq_data *bfqd)
-+{
-+ struct bfq_queue *bfqq, *n;
-+ struct bfq_service_tree *st;
-+ int dispatched = 0;
-+
-+ bfqq = bfqd->in_service_queue;
-+ if (bfqq != NULL)
-+ __bfq_bfqq_expire(bfqd, bfqq);
-+
-+ /*
-+ * Loop through classes, and be careful to leave the scheduler
-+ * in a consistent state, as feedback mechanisms and vtime
-+ * updates cannot be disabled during the process.
-+ */
-+ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
-+ st = bfq_entity_service_tree(&bfqq->entity);
-+
-+ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
-+ bfqq->max_budget = bfq_max_budget(bfqd);
-+
-+ bfq_forget_idle(st);
-+ }
-+
-+ BUG_ON(bfqd->busy_queues != 0);
-+
-+ return dispatched;
-+}
-+
-+static int bfq_dispatch_requests(struct request_queue *q, int force)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+ struct bfq_queue *bfqq;
-+ int max_dispatch;
-+
-+ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
-+ if (bfqd->busy_queues == 0)
-+ return 0;
-+
-+ if (unlikely(force))
-+ return bfq_forced_dispatch(bfqd);
-+
-+ bfqq = bfq_select_queue(bfqd);
-+ if (bfqq == NULL)
-+ return 0;
-+
-+ max_dispatch = bfqd->bfq_quantum;
-+ if (bfq_class_idle(bfqq))
-+ max_dispatch = 1;
-+
-+ if (!bfq_bfqq_sync(bfqq))
-+ max_dispatch = bfqd->bfq_max_budget_async_rq;
-+
-+ if (bfqq->dispatched >= max_dispatch) {
-+ if (bfqd->busy_queues > 1)
-+ return 0;
-+ if (bfqq->dispatched >= 4 * max_dispatch)
-+ return 0;
-+ }
-+
-+ if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
-+ return 0;
-+
-+ bfq_clear_bfqq_wait_request(bfqq);
-+ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
-+
-+ if (!bfq_dispatch_request(bfqd, bfqq))
-+ return 0;
-+
-+ bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)",
-+ bfqq->pid, max_dispatch);
-+
-+ return 1;
-+}
-+
-+/*
-+ * Task holds one reference to the queue, dropped when task exits. Each rq
-+ * in-flight on this queue also holds a reference, dropped when rq is freed.
-+ *
-+ * Queue lock must be held here.
-+ */
-+static void bfq_put_queue(struct bfq_queue *bfqq)
-+{
-+ struct bfq_data *bfqd = bfqq->bfqd;
-+
-+ BUG_ON(atomic_read(&bfqq->ref) <= 0);
-+
-+ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
-+ atomic_read(&bfqq->ref));
-+ if (!atomic_dec_and_test(&bfqq->ref))
-+ return;
-+
-+ BUG_ON(rb_first(&bfqq->sort_list) != NULL);
-+ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
-+ BUG_ON(bfqq->entity.tree != NULL);
-+ BUG_ON(bfq_bfqq_busy(bfqq));
-+ BUG_ON(bfqd->in_service_queue == bfqq);
-+
-+ if (bfq_bfqq_sync(bfqq))
-+ /*
-+ * The fact that this queue is being destroyed does not
-+ * invalidate the fact that this queue may have been
-+ * activated during the current burst. As a consequence,
-+ * although the queue does not exist anymore, and hence
-+ * needs to be removed from the burst list if there,
-+ * the burst size has not to be decremented.
-+ */
-+ hlist_del_init(&bfqq->burst_list_node);
-+
-+ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
-+
-+ kmem_cache_free(bfq_pool, bfqq);
-+}
-+
-+static void bfq_put_cooperator(struct bfq_queue *bfqq)
-+{
-+ struct bfq_queue *__bfqq, *next;
-+
-+ /*
-+ * If this queue was scheduled to merge with another queue, be
-+ * sure to drop the reference taken on that queue (and others in
-+ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
-+ */
-+ __bfqq = bfqq->new_bfqq;
-+ while (__bfqq) {
-+ if (__bfqq == bfqq)
-+ break;
-+ next = __bfqq->new_bfqq;
-+ bfq_put_queue(__bfqq);
-+ __bfqq = next;
-+ }
-+}
-+
-+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-+{
-+ if (bfqq == bfqd->in_service_queue) {
-+ __bfq_bfqq_expire(bfqd, bfqq);
-+ bfq_schedule_dispatch(bfqd);
-+ }
-+
-+ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
-+ atomic_read(&bfqq->ref));
-+
-+ bfq_put_cooperator(bfqq);
-+
-+ bfq_put_queue(bfqq);
-+}
-+
-+static inline void bfq_init_icq(struct io_cq *icq)
-+{
-+ struct bfq_io_cq *bic = icq_to_bic(icq);
-+
-+ bic->ttime.last_end_request = jiffies;
-+}
-+
-+static void bfq_exit_icq(struct io_cq *icq)
-+{
-+ struct bfq_io_cq *bic = icq_to_bic(icq);
-+ struct bfq_data *bfqd = bic_to_bfqd(bic);
-+
-+ if (bic->bfqq[BLK_RW_ASYNC]) {
-+ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
-+ bic->bfqq[BLK_RW_ASYNC] = NULL;
-+ }
-+
-+ if (bic->bfqq[BLK_RW_SYNC]) {
-+ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
-+ bic->bfqq[BLK_RW_SYNC] = NULL;
-+ }
-+}
-+
-+/*
-+ * Update the entity prio values; note that the new values will not
-+ * be used until the next (re)activation.
-+ */
-+static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
-+{
-+ struct task_struct *tsk = current;
-+ int ioprio_class;
-+
-+ if (!bfq_bfqq_prio_changed(bfqq))
-+ return;
-+
-+ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
-+ switch (ioprio_class) {
-+ default:
-+ dev_err(bfqq->bfqd->queue->backing_dev_info.dev,
-+ "bfq: bad prio class %d\n", ioprio_class);
-+ case IOPRIO_CLASS_NONE:
-+ /*
-+ * No prio set, inherit CPU scheduling settings.
-+ */
-+ bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
-+ bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
-+ break;
-+ case IOPRIO_CLASS_RT:
-+ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
-+ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
-+ break;
-+ case IOPRIO_CLASS_BE:
-+ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
-+ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
-+ break;
-+ case IOPRIO_CLASS_IDLE:
-+ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
-+ bfqq->entity.new_ioprio = 7;
-+ bfq_clear_bfqq_idle_window(bfqq);
-+ break;
-+ }
-+
-+ if (bfqq->entity.new_ioprio < 0 ||
-+ bfqq->entity.new_ioprio >= IOPRIO_BE_NR) {
-+ printk(KERN_CRIT "bfq_init_prio_data: new_ioprio %d\n",
-+ bfqq->entity.new_ioprio);
-+ BUG();
-+ }
-+
-+ bfqq->entity.ioprio_changed = 1;
-+
-+ bfq_clear_bfqq_prio_changed(bfqq);
-+}
-+
-+static void bfq_changed_ioprio(struct bfq_io_cq *bic)
-+{
-+ struct bfq_data *bfqd;
-+ struct bfq_queue *bfqq, *new_bfqq;
-+ struct bfq_group *bfqg;
-+ unsigned long uninitialized_var(flags);
-+ int ioprio = bic->icq.ioc->ioprio;
-+
-+ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
-+ &flags);
-+ /*
-+ * This condition may trigger on a newly created bic, be sure to
-+ * drop the lock before returning.
-+ */
-+ if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
-+ goto out;
-+
-+ bfqq = bic->bfqq[BLK_RW_ASYNC];
-+ if (bfqq != NULL) {
-+ bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
-+ sched_data);
-+ new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
-+ GFP_ATOMIC);
-+ if (new_bfqq != NULL) {
-+ bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "changed_ioprio: bfqq %p %d",
-+ bfqq, atomic_read(&bfqq->ref));
-+ bfq_put_queue(bfqq);
-+ }
-+ }
-+
-+ bfqq = bic->bfqq[BLK_RW_SYNC];
-+ if (bfqq != NULL)
-+ bfq_mark_bfqq_prio_changed(bfqq);
-+
-+ bic->ioprio = ioprio;
-+
-+out:
-+ bfq_put_bfqd_unlock(bfqd, &flags);
-+}
-+
-+static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+ pid_t pid, int is_sync)
-+{
-+ RB_CLEAR_NODE(&bfqq->entity.rb_node);
-+ INIT_LIST_HEAD(&bfqq->fifo);
-+ INIT_HLIST_NODE(&bfqq->burst_list_node);
-+
-+ atomic_set(&bfqq->ref, 0);
-+ bfqq->bfqd = bfqd;
-+
-+ bfq_mark_bfqq_prio_changed(bfqq);
-+
-+ if (is_sync) {
-+ if (!bfq_class_idle(bfqq))
-+ bfq_mark_bfqq_idle_window(bfqq);
-+ bfq_mark_bfqq_sync(bfqq);
-+ }
-+ bfq_mark_bfqq_IO_bound(bfqq);
-+
-+ /* Tentative initial value to trade off between thr and lat */
-+ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
-+ bfqq->pid = pid;
-+
-+ bfqq->wr_coeff = 1;
-+ bfqq->last_wr_start_finish = 0;
-+ /*
-+ * Set to the value for which bfqq will not be deemed as
-+ * soft rt when it becomes backlogged.
-+ */
-+ bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies);
-+}
-+
-+static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
-+ struct bfq_group *bfqg,
-+ int is_sync,
-+ struct bfq_io_cq *bic,
-+ gfp_t gfp_mask)
-+{
-+ struct bfq_queue *bfqq, *new_bfqq = NULL;
-+
-+retry:
-+ /* bic always exists here */
-+ bfqq = bic_to_bfqq(bic, is_sync);
-+
-+ /*
-+ * Always try a new alloc if we fall back to the OOM bfqq
-+ * originally, since it should just be a temporary situation.
-+ */
-+ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
-+ bfqq = NULL;
-+ if (new_bfqq != NULL) {
-+ bfqq = new_bfqq;
-+ new_bfqq = NULL;
-+ } else if (gfp_mask & __GFP_WAIT) {
-+ spin_unlock_irq(bfqd->queue->queue_lock);
-+ new_bfqq = kmem_cache_alloc_node(bfq_pool,
-+ gfp_mask | __GFP_ZERO,
-+ bfqd->queue->node);
-+ spin_lock_irq(bfqd->queue->queue_lock);
-+ if (new_bfqq != NULL)
-+ goto retry;
-+ } else {
-+ bfqq = kmem_cache_alloc_node(bfq_pool,
-+ gfp_mask | __GFP_ZERO,
-+ bfqd->queue->node);
-+ }
-+
-+ if (bfqq != NULL) {
-+ bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
-+ bfq_init_prio_data(bfqq, bic);
-+ bfq_init_entity(&bfqq->entity, bfqg);
-+ bfq_log_bfqq(bfqd, bfqq, "allocated");
-+ } else {
-+ bfqq = &bfqd->oom_bfqq;
-+ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
-+ }
-+ }
-+
-+ if (new_bfqq != NULL)
-+ kmem_cache_free(bfq_pool, new_bfqq);
-+
-+ return bfqq;
-+}
-+
-+static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
-+ struct bfq_group *bfqg,
-+ int ioprio_class, int ioprio)
-+{
-+ switch (ioprio_class) {
-+ case IOPRIO_CLASS_RT:
-+ return &bfqg->async_bfqq[0][ioprio];
-+ case IOPRIO_CLASS_NONE:
-+ ioprio = IOPRIO_NORM;
-+ /* fall through */
-+ case IOPRIO_CLASS_BE:
-+ return &bfqg->async_bfqq[1][ioprio];
-+ case IOPRIO_CLASS_IDLE:
-+ return &bfqg->async_idle_bfqq;
-+ default:
-+ BUG();
-+ }
-+}
-+
-+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
-+ struct bfq_group *bfqg, int is_sync,
-+ struct bfq_io_cq *bic, gfp_t gfp_mask)
-+{
-+ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
-+ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
-+ struct bfq_queue **async_bfqq = NULL;
-+ struct bfq_queue *bfqq = NULL;
-+
-+ if (!is_sync) {
-+ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
-+ ioprio);
-+ bfqq = *async_bfqq;
-+ }
-+
-+ if (bfqq == NULL)
-+ bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
-+
-+ /*
-+ * Pin the queue now that it's allocated, scheduler exit will
-+ * prune it.
-+ */
-+ if (!is_sync && *async_bfqq == NULL) {
-+ atomic_inc(&bfqq->ref);
-+ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
-+ bfqq, atomic_read(&bfqq->ref));
-+ *async_bfqq = bfqq;
-+ }
-+
-+ atomic_inc(&bfqq->ref);
-+ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
-+ atomic_read(&bfqq->ref));
-+ return bfqq;
-+}
-+
-+static void bfq_update_io_thinktime(struct bfq_data *bfqd,
-+ struct bfq_io_cq *bic)
-+{
-+ unsigned long elapsed = jiffies - bic->ttime.last_end_request;
-+ unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
-+
-+ bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
-+ bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
-+ bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) /
-+ bic->ttime.ttime_samples;
-+}
-+
-+static void bfq_update_io_seektime(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq,
-+ struct request *rq)
-+{
-+ sector_t sdist;
-+ u64 total;
-+
-+ if (bfqq->last_request_pos < blk_rq_pos(rq))
-+ sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
-+ else
-+ sdist = bfqq->last_request_pos - blk_rq_pos(rq);
-+
-+ /*
-+ * Don't allow the seek distance to get too large from the
-+ * odd fragment, pagein, etc.
-+ */
-+ if (bfqq->seek_samples == 0) /* first request, not really a seek */
-+ sdist = 0;
-+ else if (bfqq->seek_samples <= 60) /* second & third seek */
-+ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
-+ else
-+ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
-+
-+ bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
-+ bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
-+ total = bfqq->seek_total + (bfqq->seek_samples/2);
-+ do_div(total, bfqq->seek_samples);
-+ bfqq->seek_mean = (sector_t)total;
-+
-+ bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
-+ (u64)bfqq->seek_mean);
-+}
-+
-+/*
-+ * Disable idle window if the process thinks too long or seeks so much that
-+ * it doesn't matter.
-+ */
-+static void bfq_update_idle_window(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq,
-+ struct bfq_io_cq *bic)
-+{
-+ int enable_idle;
-+
-+ /* Don't idle for async or idle io prio class. */
-+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
-+ return;
-+
-+ enable_idle = bfq_bfqq_idle_window(bfqq);
-+
-+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
-+ bfqd->bfq_slice_idle == 0 ||
-+ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
-+ bfqq->wr_coeff == 1))
-+ enable_idle = 0;
-+ else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
-+ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
-+ bfqq->wr_coeff == 1)
-+ enable_idle = 0;
-+ else
-+ enable_idle = 1;
-+ }
-+ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
-+ enable_idle);
-+
-+ if (enable_idle)
-+ bfq_mark_bfqq_idle_window(bfqq);
-+ else
-+ bfq_clear_bfqq_idle_window(bfqq);
-+}
-+
-+/*
-+ * Called when a new fs request (rq) is added to bfqq. Check if there's
-+ * something we should do about it.
-+ */
-+static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+ struct request *rq)
-+{
-+ struct bfq_io_cq *bic = RQ_BIC(rq);
-+
-+ if (rq->cmd_flags & REQ_META)
-+ bfqq->meta_pending++;
-+
-+ bfq_update_io_thinktime(bfqd, bic);
-+ bfq_update_io_seektime(bfqd, bfqq, rq);
-+ if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) {
-+ bfq_clear_bfqq_constantly_seeky(bfqq);
-+ if (!blk_queue_nonrot(bfqd->queue)) {
-+ BUG_ON(!bfqd->const_seeky_busy_in_flight_queues);
-+ bfqd->const_seeky_busy_in_flight_queues--;
-+ }
-+ }
-+ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
-+ !BFQQ_SEEKY(bfqq))
-+ bfq_update_idle_window(bfqd, bfqq, bic);
-+
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
-+ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
-+ (long long unsigned)bfqq->seek_mean);
-+
-+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
-+
-+ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
-+ int small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
-+ blk_rq_sectors(rq) < 32;
-+ int budget_timeout = bfq_bfqq_budget_timeout(bfqq);
-+
-+ /*
-+ * There is just this request queued: if the request
-+ * is small and the queue is not to be expired, then
-+ * just exit.
-+ *
-+ * In this way, if the disk is being idled to wait for
-+ * a new request from the in-service queue, we avoid
-+ * unplugging the device and committing the disk to serve
-+ * just a small request. On the contrary, we wait for
-+ * the block layer to decide when to unplug the device:
-+ * hopefully, new requests will be merged to this one
-+ * quickly, then the device will be unplugged and
-+ * larger requests will be dispatched.
-+ */
-+ if (small_req && !budget_timeout)
-+ return;
-+
-+ /*
-+ * A large enough request arrived, or the queue is to
-+ * be expired: in both cases disk idling is to be
-+ * stopped, so clear wait_request flag and reset
-+ * timer.
-+ */
-+ bfq_clear_bfqq_wait_request(bfqq);
-+ del_timer(&bfqd->idle_slice_timer);
-+
-+ /*
-+ * The queue is not empty, because a new request just
-+ * arrived. Hence we can safely expire the queue, in
-+ * case of budget timeout, without risking that the
-+ * timestamps of the queue are not updated correctly.
-+ * See [1] for more details.
-+ */
-+ if (budget_timeout)
-+ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
-+
-+ /*
-+ * Let the request rip immediately, or let a new queue be
-+ * selected if bfqq has just been expired.
-+ */
-+ __blk_run_queue(bfqd->queue);
-+ }
-+}
-+
-+static void bfq_insert_request(struct request_queue *q, struct request *rq)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
-+
-+ assert_spin_locked(bfqd->queue->queue_lock);
-+ bfq_init_prio_data(bfqq, RQ_BIC(rq));
-+
-+ bfq_add_request(rq);
-+
-+ rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
-+ list_add_tail(&rq->queuelist, &bfqq->fifo);
-+
-+ bfq_rq_enqueued(bfqd, bfqq, rq);
-+}
-+
-+static void bfq_update_hw_tag(struct bfq_data *bfqd)
-+{
-+ bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
-+ bfqd->rq_in_driver);
-+
-+ if (bfqd->hw_tag == 1)
-+ return;
-+
-+ /*
-+ * This sample is valid if the number of outstanding requests
-+ * is large enough to allow a queueing behavior. Note that the
-+ * sum is not exact, as it's not taking into account deactivated
-+ * requests.
-+ */
-+ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
-+ return;
-+
-+ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
-+ return;
-+
-+ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
-+ bfqd->max_rq_in_driver = 0;
-+ bfqd->hw_tag_samples = 0;
-+}
-+
-+static void bfq_completed_request(struct request_queue *q, struct request *rq)
-+{
-+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
-+ struct bfq_data *bfqd = bfqq->bfqd;
-+ bool sync = bfq_bfqq_sync(bfqq);
-+
-+ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)",
-+ blk_rq_sectors(rq), sync);
-+
-+ bfq_update_hw_tag(bfqd);
-+
-+ BUG_ON(!bfqd->rq_in_driver);
-+ BUG_ON(!bfqq->dispatched);
-+ bfqd->rq_in_driver--;
-+ bfqq->dispatched--;
-+
-+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
-+ bfq_weights_tree_remove(bfqd, &bfqq->entity,
-+ &bfqd->queue_weights_tree);
-+ if (!blk_queue_nonrot(bfqd->queue)) {
-+ BUG_ON(!bfqd->busy_in_flight_queues);
-+ bfqd->busy_in_flight_queues--;
-+ if (bfq_bfqq_constantly_seeky(bfqq)) {
-+ BUG_ON(!bfqd->
-+ const_seeky_busy_in_flight_queues);
-+ bfqd->const_seeky_busy_in_flight_queues--;
-+ }
-+ }
-+ }
-+
-+ if (sync) {
-+ bfqd->sync_flight--;
-+ RQ_BIC(rq)->ttime.last_end_request = jiffies;
-+ }
-+
-+ /*
-+ * If we are waiting to discover whether the request pattern of the
-+ * task associated with the queue is actually isochronous, and
-+ * both requisites for this condition to hold are satisfied, then
-+ * compute soft_rt_next_start (see the comments to the function
-+ * bfq_bfqq_softrt_next_start()).
-+ */
-+ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
-+ RB_EMPTY_ROOT(&bfqq->sort_list))
-+ bfqq->soft_rt_next_start =
-+ bfq_bfqq_softrt_next_start(bfqd, bfqq);
-+
-+ /*
-+ * If this is the in-service queue, check if it needs to be expired,
-+ * or if we want to idle in case it has no pending requests.
-+ */
-+ if (bfqd->in_service_queue == bfqq) {
-+ if (bfq_bfqq_budget_new(bfqq))
-+ bfq_set_budget_timeout(bfqd);
-+
-+ if (bfq_bfqq_must_idle(bfqq)) {
-+ bfq_arm_slice_timer(bfqd);
-+ goto out;
-+ } else if (bfq_may_expire_for_budg_timeout(bfqq))
-+ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
-+ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
-+ (bfqq->dispatched == 0 ||
-+ !bfq_bfqq_must_not_expire(bfqq)))
-+ bfq_bfqq_expire(bfqd, bfqq, 0,
-+ BFQ_BFQQ_NO_MORE_REQUESTS);
-+ }
-+
-+ if (!bfqd->rq_in_driver)
-+ bfq_schedule_dispatch(bfqd);
-+
-+out:
-+ return;
-+}
-+
-+static inline int __bfq_may_queue(struct bfq_queue *bfqq)
-+{
-+ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
-+ bfq_clear_bfqq_must_alloc(bfqq);
-+ return ELV_MQUEUE_MUST;
-+ }
-+
-+ return ELV_MQUEUE_MAY;
-+}
-+
-+static int bfq_may_queue(struct request_queue *q, int rw)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+ struct task_struct *tsk = current;
-+ struct bfq_io_cq *bic;
-+ struct bfq_queue *bfqq;
-+
-+ /*
-+ * Don't force setup of a queue from here, as a call to may_queue
-+ * does not necessarily imply that a request actually will be
-+ * queued. So just lookup a possibly existing queue, or return
-+ * 'may queue' if that fails.
-+ */
-+ bic = bfq_bic_lookup(bfqd, tsk->io_context);
-+ if (bic == NULL)
-+ return ELV_MQUEUE_MAY;
-+
-+ bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
-+ if (bfqq != NULL) {
-+ bfq_init_prio_data(bfqq, bic);
-+
-+ return __bfq_may_queue(bfqq);
-+ }
-+
-+ return ELV_MQUEUE_MAY;
-+}
-+
-+/*
-+ * Queue lock held here.
-+ */
-+static void bfq_put_request(struct request *rq)
-+{
-+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
-+
-+ if (bfqq != NULL) {
-+ const int rw = rq_data_dir(rq);
-+
-+ BUG_ON(!bfqq->allocated[rw]);
-+ bfqq->allocated[rw]--;
-+
-+ rq->elv.priv[0] = NULL;
-+ rq->elv.priv[1] = NULL;
-+
-+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
-+ bfqq, atomic_read(&bfqq->ref));
-+ bfq_put_queue(bfqq);
-+ }
-+}
-+
-+static struct bfq_queue *
-+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
-+ struct bfq_queue *bfqq)
-+{
-+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
-+ (long unsigned)bfqq->new_bfqq->pid);
-+ bic_set_bfqq(bic, bfqq->new_bfqq, 1);
-+ bfq_mark_bfqq_coop(bfqq->new_bfqq);
-+ bfq_put_queue(bfqq);
-+ return bic_to_bfqq(bic, 1);
-+}
-+
-+/*
-+ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
-+ * was the last process referring to said bfqq.
-+ */
-+static struct bfq_queue *
-+bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
-+{
-+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
-+ if (bfqq_process_refs(bfqq) == 1) {
-+ bfqq->pid = current->pid;
-+ bfq_clear_bfqq_coop(bfqq);
-+ bfq_clear_bfqq_split_coop(bfqq);
-+ return bfqq;
-+ }
-+
-+ bic_set_bfqq(bic, NULL, 1);
-+
-+ bfq_put_cooperator(bfqq);
-+
-+ bfq_put_queue(bfqq);
-+ return NULL;
-+}
-+
-+/*
-+ * Allocate bfq data structures associated with this request.
-+ */
-+static int bfq_set_request(struct request_queue *q, struct request *rq,
-+ struct bio *bio, gfp_t gfp_mask)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
-+ const int rw = rq_data_dir(rq);
-+ const int is_sync = rq_is_sync(rq);
-+ struct bfq_queue *bfqq;
-+ struct bfq_group *bfqg;
-+ unsigned long flags;
-+
-+ might_sleep_if(gfp_mask & __GFP_WAIT);
-+
-+ bfq_changed_ioprio(bic);
-+
-+ spin_lock_irqsave(q->queue_lock, flags);
-+
-+ if (bic == NULL)
-+ goto queue_fail;
-+
-+ bfqg = bfq_bic_update_cgroup(bic);
-+
-+new_queue:
-+ bfqq = bic_to_bfqq(bic, is_sync);
-+ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
-+ bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
-+ bic_set_bfqq(bic, bfqq, is_sync);
-+ } else {
-+ /*
-+ * If the queue was seeky for too long, break it apart.
-+ */
-+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
-+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
-+ bfqq = bfq_split_bfqq(bic, bfqq);
-+ if (!bfqq)
-+ goto new_queue;
-+ }
-+
-+ /*
-+ * Check to see if this queue is scheduled to merge with
-+ * another closely cooperating queue. The merging of queues
-+ * happens here as it must be done in process context.
-+ * The reference on new_bfqq was taken in merge_bfqqs.
-+ */
-+ if (bfqq->new_bfqq != NULL)
-+ bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
-+ }
-+
-+ bfqq->allocated[rw]++;
-+ atomic_inc(&bfqq->ref);
-+ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
-+ atomic_read(&bfqq->ref));
-+
-+ rq->elv.priv[0] = bic;
-+ rq->elv.priv[1] = bfqq;
-+
-+ spin_unlock_irqrestore(q->queue_lock, flags);
-+
-+ return 0;
-+
-+queue_fail:
-+ bfq_schedule_dispatch(bfqd);
-+ spin_unlock_irqrestore(q->queue_lock, flags);
-+
-+ return 1;
-+}
-+
-+static void bfq_kick_queue(struct work_struct *work)
-+{
-+ struct bfq_data *bfqd =
-+ container_of(work, struct bfq_data, unplug_work);
-+ struct request_queue *q = bfqd->queue;
-+
-+ spin_lock_irq(q->queue_lock);
-+ __blk_run_queue(q);
-+ spin_unlock_irq(q->queue_lock);
-+}
-+
-+/*
-+ * Handler of the expiration of the timer running if the in-service queue
-+ * is idling inside its time slice.
-+ */
-+static void bfq_idle_slice_timer(unsigned long data)
-+{
-+ struct bfq_data *bfqd = (struct bfq_data *)data;
-+ struct bfq_queue *bfqq;
-+ unsigned long flags;
-+ enum bfqq_expiration reason;
-+
-+ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
-+
-+ bfqq = bfqd->in_service_queue;
-+ /*
-+ * Theoretical race here: the in-service queue can be NULL or
-+ * different from the queue that was idling if the timer handler
-+ * spins on the queue_lock and a new request arrives for the
-+ * current queue and there is a full dispatch cycle that changes
-+ * the in-service queue. This can hardly happen, but in the worst
-+ * case we just expire a queue too early.
-+ */
-+ if (bfqq != NULL) {
-+ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
-+ if (bfq_bfqq_budget_timeout(bfqq))
-+ /*
-+ * Also here the queue can be safely expired
-+ * for budget timeout without wasting
-+ * guarantees
-+ */
-+ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
-+ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
-+ /*
-+ * The queue may not be empty upon timer expiration,
-+ * because we may not disable the timer when the
-+ * first request of the in-service queue arrives
-+ * during disk idling.
-+ */
-+ reason = BFQ_BFQQ_TOO_IDLE;
-+ else
-+ goto schedule_dispatch;
-+
-+ bfq_bfqq_expire(bfqd, bfqq, 1, reason);
-+ }
-+
-+schedule_dispatch:
-+ bfq_schedule_dispatch(bfqd);
-+
-+ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
-+}
-+
-+static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
-+{
-+ del_timer_sync(&bfqd->idle_slice_timer);
-+ cancel_work_sync(&bfqd->unplug_work);
-+}
-+
-+static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
-+ struct bfq_queue **bfqq_ptr)
-+{
-+ struct bfq_group *root_group = bfqd->root_group;
-+ struct bfq_queue *bfqq = *bfqq_ptr;
-+
-+ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
-+ if (bfqq != NULL) {
-+ bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
-+ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
-+ bfqq, atomic_read(&bfqq->ref));
-+ bfq_put_queue(bfqq);
-+ *bfqq_ptr = NULL;
-+ }
-+}
-+
-+/*
-+ * Release all the bfqg references to its async queues. If we are
-+ * deallocating the group these queues may still contain requests, so
-+ * we reparent them to the root cgroup (i.e., the only one that will
-+ * exist for sure until all the requests on a device are gone).
-+ */
-+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
-+{
-+ int i, j;
-+
-+ for (i = 0; i < 2; i++)
-+ for (j = 0; j < IOPRIO_BE_NR; j++)
-+ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
-+
-+ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
-+}
-+
-+static void bfq_exit_queue(struct elevator_queue *e)
-+{
-+ struct bfq_data *bfqd = e->elevator_data;
-+ struct request_queue *q = bfqd->queue;
-+ struct bfq_queue *bfqq, *n;
-+
-+ bfq_shutdown_timer_wq(bfqd);
-+
-+ spin_lock_irq(q->queue_lock);
-+
-+ BUG_ON(bfqd->in_service_queue != NULL);
-+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
-+ bfq_deactivate_bfqq(bfqd, bfqq, 0);
-+
-+ bfq_disconnect_groups(bfqd);
-+ spin_unlock_irq(q->queue_lock);
-+
-+ bfq_shutdown_timer_wq(bfqd);
-+
-+ synchronize_rcu();
-+
-+ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
-+
-+ bfq_free_root_group(bfqd);
-+ kfree(bfqd);
-+}
-+
-+static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
-+{
-+ struct bfq_group *bfqg;
-+ struct bfq_data *bfqd;
-+ struct elevator_queue *eq;
-+
-+ eq = elevator_alloc(q, e);
-+ if (eq == NULL)
-+ return -ENOMEM;
-+
-+ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
-+ if (bfqd == NULL) {
-+ kobject_put(&eq->kobj);
-+ return -ENOMEM;
-+ }
-+ eq->elevator_data = bfqd;
-+
-+ /*
-+ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
-+ * Grab a permanent reference to it, so that the normal code flow
-+ * will not attempt to free it.
-+ */
-+ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
-+ atomic_inc(&bfqd->oom_bfqq.ref);
-+ bfqd->oom_bfqq.entity.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
-+ bfqd->oom_bfqq.entity.new_ioprio_class = IOPRIO_CLASS_BE;
-+ /*
-+ * Trigger weight initialization, according to ioprio, at the
-+ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
-+ * class won't be changed any more.
-+ */
-+ bfqd->oom_bfqq.entity.ioprio_changed = 1;
-+
-+ bfqd->queue = q;
-+
-+ spin_lock_irq(q->queue_lock);
-+ q->elevator = eq;
-+ spin_unlock_irq(q->queue_lock);
-+
-+ bfqg = bfq_alloc_root_group(bfqd, q->node);
-+ if (bfqg == NULL) {
-+ kfree(bfqd);
-+ kobject_put(&eq->kobj);
-+ return -ENOMEM;
-+ }
-+
-+ bfqd->root_group = bfqg;
-+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
-+#ifdef CONFIG_CGROUP_BFQIO
-+ bfqd->active_numerous_groups = 0;
-+#endif
-+
-+ init_timer(&bfqd->idle_slice_timer);
-+ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
-+ bfqd->idle_slice_timer.data = (unsigned long)bfqd;
-+
-+ bfqd->rq_pos_tree = RB_ROOT;
-+ bfqd->queue_weights_tree = RB_ROOT;
-+ bfqd->group_weights_tree = RB_ROOT;
-+
-+ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
-+
-+ INIT_LIST_HEAD(&bfqd->active_list);
-+ INIT_LIST_HEAD(&bfqd->idle_list);
-+ INIT_HLIST_HEAD(&bfqd->burst_list);
-+
-+ bfqd->hw_tag = -1;
-+
-+ bfqd->bfq_max_budget = bfq_default_max_budget;
-+
-+ bfqd->bfq_quantum = bfq_quantum;
-+ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
-+ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
-+ bfqd->bfq_back_max = bfq_back_max;
-+ bfqd->bfq_back_penalty = bfq_back_penalty;
-+ bfqd->bfq_slice_idle = bfq_slice_idle;
-+ bfqd->bfq_class_idle_last_service = 0;
-+ bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
-+ bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
-+ bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
-+
-+ bfqd->bfq_coop_thresh = 2;
-+ bfqd->bfq_failed_cooperations = 7000;
-+ bfqd->bfq_requests_within_timer = 120;
-+
-+ bfqd->bfq_large_burst_thresh = 11;
-+ bfqd->bfq_burst_interval = msecs_to_jiffies(500);
-+
-+ bfqd->low_latency = true;
-+
-+ bfqd->bfq_wr_coeff = 20;
-+ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
-+ bfqd->bfq_wr_max_time = 0;
-+ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
-+ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
-+ bfqd->bfq_wr_max_softrt_rate = 7000; /*
-+ * Approximate rate required
-+ * to playback or record a
-+ * high-definition compressed
-+ * video.
-+ */
-+ bfqd->wr_busy_queues = 0;
-+ bfqd->busy_in_flight_queues = 0;
-+ bfqd->const_seeky_busy_in_flight_queues = 0;
-+
-+ /*
-+ * Begin by assuming, optimistically, that the device peak rate is
-+ * equal to the highest reference rate.
-+ */
-+ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
-+ T_fast[blk_queue_nonrot(bfqd->queue)];
-+ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)];
-+ bfqd->device_speed = BFQ_BFQD_FAST;
-+
-+ return 0;
-+}
-+
-+static void bfq_slab_kill(void)
-+{
-+ if (bfq_pool != NULL)
-+ kmem_cache_destroy(bfq_pool);
-+}
-+
-+static int __init bfq_slab_setup(void)
-+{
-+ bfq_pool = KMEM_CACHE(bfq_queue, 0);
-+ if (bfq_pool == NULL)
-+ return -ENOMEM;
-+ return 0;
-+}
-+
-+static ssize_t bfq_var_show(unsigned int var, char *page)
-+{
-+ return sprintf(page, "%d\n", var);
-+}
-+
-+static ssize_t bfq_var_store(unsigned long *var, const char *page,
-+ size_t count)
-+{
-+ unsigned long new_val;
-+ int ret = kstrtoul(page, 10, &new_val);
-+
-+ if (ret == 0)
-+ *var = new_val;
-+
-+ return count;
-+}
-+
-+static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
-+{
-+ struct bfq_data *bfqd = e->elevator_data;
-+ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
-+ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
-+ jiffies_to_msecs(bfq_wr_duration(bfqd)));
-+}
-+
-+static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
-+{
-+ struct bfq_queue *bfqq;
-+ struct bfq_data *bfqd = e->elevator_data;
-+ ssize_t num_char = 0;
-+
-+ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
-+ bfqd->queued);
-+
-+ spin_lock_irq(bfqd->queue->queue_lock);
-+
-+ num_char += sprintf(page + num_char, "Active:\n");
-+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
-+ num_char += sprintf(page + num_char,
-+ "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n",
-+ bfqq->pid,
-+ bfqq->entity.weight,
-+ bfqq->queued[0],
-+ bfqq->queued[1],
-+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
-+ jiffies_to_msecs(bfqq->wr_cur_max_time));
-+ }
-+
-+ num_char += sprintf(page + num_char, "Idle:\n");
-+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
-+ num_char += sprintf(page + num_char,
-+ "pid%d: weight %hu, dur %d/%u\n",
-+ bfqq->pid,
-+ bfqq->entity.weight,
-+ jiffies_to_msecs(jiffies -
-+ bfqq->last_wr_start_finish),
-+ jiffies_to_msecs(bfqq->wr_cur_max_time));
-+ }
-+
-+ spin_unlock_irq(bfqd->queue->queue_lock);
-+
-+ return num_char;
-+}
-+
-+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
-+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
-+{ \
-+ struct bfq_data *bfqd = e->elevator_data; \
-+ unsigned int __data = __VAR; \
-+ if (__CONV) \
-+ __data = jiffies_to_msecs(__data); \
-+ return bfq_var_show(__data, (page)); \
-+}
-+SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
-+SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
-+SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
-+SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
-+SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
-+SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
-+SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
-+SHOW_FUNCTION(bfq_max_budget_async_rq_show,
-+ bfqd->bfq_max_budget_async_rq, 0);
-+SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
-+SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
-+SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
-+SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
-+SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
-+SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
-+SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
-+ 1);
-+SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
-+#undef SHOW_FUNCTION
-+
-+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
-+static ssize_t \
-+__FUNC(struct elevator_queue *e, const char *page, size_t count) \
-+{ \
-+ struct bfq_data *bfqd = e->elevator_data; \
-+ unsigned long uninitialized_var(__data); \
-+ int ret = bfq_var_store(&__data, (page), count); \
-+ if (__data < (MIN)) \
-+ __data = (MIN); \
-+ else if (__data > (MAX)) \
-+ __data = (MAX); \
-+ if (__CONV) \
-+ *(__PTR) = msecs_to_jiffies(__data); \
-+ else \
-+ *(__PTR) = __data; \
-+ return ret; \
-+}
-+STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
-+STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
-+ INT_MAX, 1);
-+STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
-+ INT_MAX, 1);
-+STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
-+STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
-+ INT_MAX, 0);
-+STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
-+STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
-+ 1, INT_MAX, 0);
-+STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
-+ INT_MAX, 1);
-+STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
-+STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
-+STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
-+ 1);
-+STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
-+ INT_MAX, 1);
-+STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
-+ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
-+STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
-+ INT_MAX, 0);
-+#undef STORE_FUNCTION
-+
-+/* do nothing for the moment */
-+static ssize_t bfq_weights_store(struct elevator_queue *e,
-+ const char *page, size_t count)
-+{
-+ return count;
-+}
-+
-+static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
-+{
-+ u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
-+
-+ if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
-+ return bfq_calc_max_budget(bfqd->peak_rate, timeout);
-+ else
-+ return bfq_default_max_budget;
-+}
-+
-+static ssize_t bfq_max_budget_store(struct elevator_queue *e,
-+ const char *page, size_t count)
-+{
-+ struct bfq_data *bfqd = e->elevator_data;
-+ unsigned long uninitialized_var(__data);
-+ int ret = bfq_var_store(&__data, (page), count);
-+
-+ if (__data == 0)
-+ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
-+ else {
-+ if (__data > INT_MAX)
-+ __data = INT_MAX;
-+ bfqd->bfq_max_budget = __data;
-+ }
-+
-+ bfqd->bfq_user_max_budget = __data;
-+
-+ return ret;
-+}
-+
-+static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
-+ const char *page, size_t count)
-+{
-+ struct bfq_data *bfqd = e->elevator_data;
-+ unsigned long uninitialized_var(__data);
-+ int ret = bfq_var_store(&__data, (page), count);
-+
-+ if (__data < 1)
-+ __data = 1;
-+ else if (__data > INT_MAX)
-+ __data = INT_MAX;
-+
-+ bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
-+ if (bfqd->bfq_user_max_budget == 0)
-+ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
-+
-+ return ret;
-+}
-+
-+static ssize_t bfq_low_latency_store(struct elevator_queue *e,
-+ const char *page, size_t count)
-+{
-+ struct bfq_data *bfqd = e->elevator_data;
-+ unsigned long uninitialized_var(__data);
-+ int ret = bfq_var_store(&__data, (page), count);
-+
-+ if (__data > 1)
-+ __data = 1;
-+ if (__data == 0 && bfqd->low_latency != 0)
-+ bfq_end_wr(bfqd);
-+ bfqd->low_latency = __data;
-+
-+ return ret;
-+}
-+
-+#define BFQ_ATTR(name) \
-+ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
-+
-+static struct elv_fs_entry bfq_attrs[] = {
-+ BFQ_ATTR(quantum),
-+ BFQ_ATTR(fifo_expire_sync),
-+ BFQ_ATTR(fifo_expire_async),
-+ BFQ_ATTR(back_seek_max),
-+ BFQ_ATTR(back_seek_penalty),
-+ BFQ_ATTR(slice_idle),
-+ BFQ_ATTR(max_budget),
-+ BFQ_ATTR(max_budget_async_rq),
-+ BFQ_ATTR(timeout_sync),
-+ BFQ_ATTR(timeout_async),
-+ BFQ_ATTR(low_latency),
-+ BFQ_ATTR(wr_coeff),
-+ BFQ_ATTR(wr_max_time),
-+ BFQ_ATTR(wr_rt_max_time),
-+ BFQ_ATTR(wr_min_idle_time),
-+ BFQ_ATTR(wr_min_inter_arr_async),
-+ BFQ_ATTR(wr_max_softrt_rate),
-+ BFQ_ATTR(weights),
-+ __ATTR_NULL
-+};
-+
-+static struct elevator_type iosched_bfq = {
-+ .ops = {
-+ .elevator_merge_fn = bfq_merge,
-+ .elevator_merged_fn = bfq_merged_request,
-+ .elevator_merge_req_fn = bfq_merged_requests,
-+ .elevator_allow_merge_fn = bfq_allow_merge,
-+ .elevator_dispatch_fn = bfq_dispatch_requests,
-+ .elevator_add_req_fn = bfq_insert_request,
-+ .elevator_activate_req_fn = bfq_activate_request,
-+ .elevator_deactivate_req_fn = bfq_deactivate_request,
-+ .elevator_completed_req_fn = bfq_completed_request,
-+ .elevator_former_req_fn = elv_rb_former_request,
-+ .elevator_latter_req_fn = elv_rb_latter_request,
-+ .elevator_init_icq_fn = bfq_init_icq,
-+ .elevator_exit_icq_fn = bfq_exit_icq,
-+ .elevator_set_req_fn = bfq_set_request,
-+ .elevator_put_req_fn = bfq_put_request,
-+ .elevator_may_queue_fn = bfq_may_queue,
-+ .elevator_init_fn = bfq_init_queue,
-+ .elevator_exit_fn = bfq_exit_queue,
-+ },
-+ .icq_size = sizeof(struct bfq_io_cq),
-+ .icq_align = __alignof__(struct bfq_io_cq),
-+ .elevator_attrs = bfq_attrs,
-+ .elevator_name = "bfq",
-+ .elevator_owner = THIS_MODULE,
-+};
-+
-+static int __init bfq_init(void)
-+{
-+ /*
-+ * Can be 0 on HZ < 1000 setups.
-+ */
-+ if (bfq_slice_idle == 0)
-+ bfq_slice_idle = 1;
-+
-+ if (bfq_timeout_async == 0)
-+ bfq_timeout_async = 1;
-+
-+ if (bfq_slab_setup())
-+ return -ENOMEM;
-+
-+ /*
-+ * Times to load large popular applications for the typical systems
-+ * installed on the reference devices (see the comments before the
-+ * definitions of the two arrays).
-+ */
-+ T_slow[0] = msecs_to_jiffies(2600);
-+ T_slow[1] = msecs_to_jiffies(1000);
-+ T_fast[0] = msecs_to_jiffies(5500);
-+ T_fast[1] = msecs_to_jiffies(2000);
-+
-+ /*
-+ * Thresholds that determine the switch between speed classes (see
-+ * the comments before the definition of the array).
-+ */
-+ device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2;
-+ device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
-+
-+ elv_register(&iosched_bfq);
-+ pr_info("BFQ I/O-scheduler version: v7r7");
-+
-+ return 0;
-+}
-+
-+static void __exit bfq_exit(void)
-+{
-+ elv_unregister(&iosched_bfq);
-+ bfq_slab_kill();
-+}
-+
-+module_init(bfq_init);
-+module_exit(bfq_exit);
-+
-+MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
-+MODULE_LICENSE("GPL");
-diff --git a/block/bfq-sched.c b/block/bfq-sched.c
-new file mode 100644
-index 0000000..2931563
---- /dev/null
-+++ b/block/bfq-sched.c
-@@ -0,0 +1,1214 @@
-+/*
-+ * BFQ: Hierarchical B-WF2Q+ scheduler.
-+ *
-+ * Based on ideas and code from CFQ:
-+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
-+ *
-+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
-+ * Paolo Valente <paolo.valente@unimore.it>
-+ *
-+ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
-+ */
-+
-+#ifdef CONFIG_CGROUP_BFQIO
-+#define for_each_entity(entity) \
-+ for (; entity != NULL; entity = entity->parent)
-+
-+#define for_each_entity_safe(entity, parent) \
-+ for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
-+
-+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
-+ int extract,
-+ struct bfq_data *bfqd);
-+
-+static inline void bfq_update_budget(struct bfq_entity *next_in_service)
-+{
-+ struct bfq_entity *bfqg_entity;
-+ struct bfq_group *bfqg;
-+ struct bfq_sched_data *group_sd;
-+
-+ BUG_ON(next_in_service == NULL);
-+
-+ group_sd = next_in_service->sched_data;
-+
-+ bfqg = container_of(group_sd, struct bfq_group, sched_data);
-+ /*
-+ * bfq_group's my_entity field is not NULL only if the group
-+ * is not the root group. We must not touch the root entity
-+ * as it must never become an in-service entity.
-+ */
-+ bfqg_entity = bfqg->my_entity;
-+ if (bfqg_entity != NULL)
-+ bfqg_entity->budget = next_in_service->budget;
-+}
-+
-+static int bfq_update_next_in_service(struct bfq_sched_data *sd)
-+{
-+ struct bfq_entity *next_in_service;
-+
-+ if (sd->in_service_entity != NULL)
-+ /* will update/requeue at the end of service */
-+ return 0;
-+
-+ /*
-+ * NOTE: this can be improved in many ways, such as returning
-+ * 1 (and thus propagating upwards the update) only when the
-+ * budget changes, or caching the bfqq that will be scheduled
-+ * next from this subtree. By now we worry more about
-+ * correctness than about performance...
-+ */
-+ next_in_service = bfq_lookup_next_entity(sd, 0, NULL);
-+ sd->next_in_service = next_in_service;
-+
-+ if (next_in_service != NULL)
-+ bfq_update_budget(next_in_service);
-+
-+ return 1;
-+}
-+
-+static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
-+ struct bfq_entity *entity)
-+{
-+ BUG_ON(sd->next_in_service != entity);
-+}
-+#else
-+#define for_each_entity(entity) \
-+ for (; entity != NULL; entity = NULL)
-+
-+#define for_each_entity_safe(entity, parent) \
-+ for (parent = NULL; entity != NULL; entity = parent)
-+
-+static inline int bfq_update_next_in_service(struct bfq_sched_data *sd)
-+{
-+ return 0;
-+}
-+
-+static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
-+ struct bfq_entity *entity)
-+{
-+}
-+
-+static inline void bfq_update_budget(struct bfq_entity *next_in_service)
-+{
-+}
-+#endif
-+
-+/*
-+ * Shift for timestamp calculations. This actually limits the maximum
-+ * service allowed in one timestamp delta (small shift values increase it),
-+ * the maximum total weight that can be used for the queues in the system
-+ * (big shift values increase it), and the period of virtual time
-+ * wraparounds.
-+ */
-+#define WFQ_SERVICE_SHIFT 22
-+
-+/**
-+ * bfq_gt - compare two timestamps.
-+ * @a: first ts.
-+ * @b: second ts.
-+ *
-+ * Return @a > @b, dealing with wrapping correctly.
-+ */
-+static inline int bfq_gt(u64 a, u64 b)
-+{
-+ return (s64)(a - b) > 0;
-+}
-+
-+static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
-+{
-+ struct bfq_queue *bfqq = NULL;
-+
-+ BUG_ON(entity == NULL);
-+
-+ if (entity->my_sched_data == NULL)
-+ bfqq = container_of(entity, struct bfq_queue, entity);
-+
-+ return bfqq;
-+}
-+
-+
-+/**
-+ * bfq_delta - map service into the virtual time domain.
-+ * @service: amount of service.
-+ * @weight: scale factor (weight of an entity or weight sum).
-+ */
-+static inline u64 bfq_delta(unsigned long service,
-+ unsigned long weight)
-+{
-+ u64 d = (u64)service << WFQ_SERVICE_SHIFT;
-+
-+ do_div(d, weight);
-+ return d;
-+}
-+
-+/**
-+ * bfq_calc_finish - assign the finish time to an entity.
-+ * @entity: the entity to act upon.
-+ * @service: the service to be charged to the entity.
-+ */
-+static inline void bfq_calc_finish(struct bfq_entity *entity,
-+ unsigned long service)
-+{
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+
-+ BUG_ON(entity->weight == 0);
-+
-+ entity->finish = entity->start +
-+ bfq_delta(service, entity->weight);
-+
-+ if (bfqq != NULL) {
-+ bfq_log_bfqq(bfqq->bfqd, bfqq,
-+ "calc_finish: serv %lu, w %d",
-+ service, entity->weight);
-+ bfq_log_bfqq(bfqq->bfqd, bfqq,
-+ "calc_finish: start %llu, finish %llu, delta %llu",
-+ entity->start, entity->finish,
-+ bfq_delta(service, entity->weight));
-+ }
-+}
-+
-+/**
-+ * bfq_entity_of - get an entity from a node.
-+ * @node: the node field of the entity.
-+ *
-+ * Convert a node pointer to the relative entity. This is used only
-+ * to simplify the logic of some functions and not as the generic
-+ * conversion mechanism because, e.g., in the tree walking functions,
-+ * the check for a %NULL value would be redundant.
-+ */
-+static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
-+{
-+ struct bfq_entity *entity = NULL;
-+
-+ if (node != NULL)
-+ entity = rb_entry(node, struct bfq_entity, rb_node);
-+
-+ return entity;
-+}
-+
-+/**
-+ * bfq_extract - remove an entity from a tree.
-+ * @root: the tree root.
-+ * @entity: the entity to remove.
-+ */
-+static inline void bfq_extract(struct rb_root *root,
-+ struct bfq_entity *entity)
-+{
-+ BUG_ON(entity->tree != root);
-+
-+ entity->tree = NULL;
-+ rb_erase(&entity->rb_node, root);
-+}
-+
-+/**
-+ * bfq_idle_extract - extract an entity from the idle tree.
-+ * @st: the service tree of the owning @entity.
-+ * @entity: the entity being removed.
-+ */
-+static void bfq_idle_extract(struct bfq_service_tree *st,
-+ struct bfq_entity *entity)
-+{
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+ struct rb_node *next;
-+
-+ BUG_ON(entity->tree != &st->idle);
-+
-+ if (entity == st->first_idle) {
-+ next = rb_next(&entity->rb_node);
-+ st->first_idle = bfq_entity_of(next);
-+ }
-+
-+ if (entity == st->last_idle) {
-+ next = rb_prev(&entity->rb_node);
-+ st->last_idle = bfq_entity_of(next);
-+ }
-+
-+ bfq_extract(&st->idle, entity);
-+
-+ if (bfqq != NULL)
-+ list_del(&bfqq->bfqq_list);
-+}
-+
-+/**
-+ * bfq_insert - generic tree insertion.
-+ * @root: tree root.
-+ * @entity: entity to insert.
-+ *
-+ * This is used for the idle and the active tree, since they are both
-+ * ordered by finish time.
-+ */
-+static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
-+{
-+ struct bfq_entity *entry;
-+ struct rb_node **node = &root->rb_node;
-+ struct rb_node *parent = NULL;
-+
-+ BUG_ON(entity->tree != NULL);
-+
-+ while (*node != NULL) {
-+ parent = *node;
-+ entry = rb_entry(parent, struct bfq_entity, rb_node);
-+
-+ if (bfq_gt(entry->finish, entity->finish))
-+ node = &parent->rb_left;
-+ else
-+ node = &parent->rb_right;
-+ }
-+
-+ rb_link_node(&entity->rb_node, parent, node);
-+ rb_insert_color(&entity->rb_node, root);
-+
-+ entity->tree = root;
-+}
-+
-+/**
-+ * bfq_update_min - update the min_start field of a entity.
-+ * @entity: the entity to update.
-+ * @node: one of its children.
-+ *
-+ * This function is called when @entity may store an invalid value for
-+ * min_start due to updates to the active tree. The function assumes
-+ * that the subtree rooted at @node (which may be its left or its right
-+ * child) has a valid min_start value.
-+ */
-+static inline void bfq_update_min(struct bfq_entity *entity,
-+ struct rb_node *node)
-+{
-+ struct bfq_entity *child;
-+
-+ if (node != NULL) {
-+ child = rb_entry(node, struct bfq_entity, rb_node);
-+ if (bfq_gt(entity->min_start, child->min_start))
-+ entity->min_start = child->min_start;
-+ }
-+}
-+
-+/**
-+ * bfq_update_active_node - recalculate min_start.
-+ * @node: the node to update.
-+ *
-+ * @node may have changed position or one of its children may have moved,
-+ * this function updates its min_start value. The left and right subtrees
-+ * are assumed to hold a correct min_start value.
-+ */
-+static inline void bfq_update_active_node(struct rb_node *node)
-+{
-+ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
-+
-+ entity->min_start = entity->start;
-+ bfq_update_min(entity, node->rb_right);
-+ bfq_update_min(entity, node->rb_left);
-+}
-+
-+/**
-+ * bfq_update_active_tree - update min_start for the whole active tree.
-+ * @node: the starting node.
-+ *
-+ * @node must be the deepest modified node after an update. This function
-+ * updates its min_start using the values held by its children, assuming
-+ * that they did not change, and then updates all the nodes that may have
-+ * changed in the path to the root. The only nodes that may have changed
-+ * are the ones in the path or their siblings.
-+ */
-+static void bfq_update_active_tree(struct rb_node *node)
-+{
-+ struct rb_node *parent;
-+
-+up:
-+ bfq_update_active_node(node);
-+
-+ parent = rb_parent(node);
-+ if (parent == NULL)
-+ return;
-+
-+ if (node == parent->rb_left && parent->rb_right != NULL)
-+ bfq_update_active_node(parent->rb_right);
-+ else if (parent->rb_left != NULL)
-+ bfq_update_active_node(parent->rb_left);
-+
-+ node = parent;
-+ goto up;
-+}
-+
-+static void bfq_weights_tree_add(struct bfq_data *bfqd,
-+ struct bfq_entity *entity,
-+ struct rb_root *root);
-+
-+static void bfq_weights_tree_remove(struct bfq_data *bfqd,
-+ struct bfq_entity *entity,
-+ struct rb_root *root);
-+
-+
-+/**
-+ * bfq_active_insert - insert an entity in the active tree of its
-+ * group/device.
-+ * @st: the service tree of the entity.
-+ * @entity: the entity being inserted.
-+ *
-+ * The active tree is ordered by finish time, but an extra key is kept
-+ * per each node, containing the minimum value for the start times of
-+ * its children (and the node itself), so it's possible to search for
-+ * the eligible node with the lowest finish time in logarithmic time.
-+ */
-+static void bfq_active_insert(struct bfq_service_tree *st,
-+ struct bfq_entity *entity)
-+{
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+ struct rb_node *node = &entity->rb_node;
-+#ifdef CONFIG_CGROUP_BFQIO
-+ struct bfq_sched_data *sd = NULL;
-+ struct bfq_group *bfqg = NULL;
-+ struct bfq_data *bfqd = NULL;
-+#endif
-+
-+ bfq_insert(&st->active, entity);
-+
-+ if (node->rb_left != NULL)
-+ node = node->rb_left;
-+ else if (node->rb_right != NULL)
-+ node = node->rb_right;
-+
-+ bfq_update_active_tree(node);
-+
-+#ifdef CONFIG_CGROUP_BFQIO
-+ sd = entity->sched_data;
-+ bfqg = container_of(sd, struct bfq_group, sched_data);
-+ BUG_ON(!bfqg);
-+ bfqd = (struct bfq_data *)bfqg->bfqd;
-+#endif
-+ if (bfqq != NULL)
-+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
-+#ifdef CONFIG_CGROUP_BFQIO
-+ else { /* bfq_group */
-+ BUG_ON(!bfqd);
-+ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
-+ }
-+ if (bfqg != bfqd->root_group) {
-+ BUG_ON(!bfqg);
-+ BUG_ON(!bfqd);
-+ bfqg->active_entities++;
-+ if (bfqg->active_entities == 2)
-+ bfqd->active_numerous_groups++;
-+ }
-+#endif
-+}
-+
-+/**
-+ * bfq_ioprio_to_weight - calc a weight from an ioprio.
-+ * @ioprio: the ioprio value to convert.
-+ */
-+static inline unsigned short bfq_ioprio_to_weight(int ioprio)
-+{
-+ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
-+ return IOPRIO_BE_NR - ioprio;
-+}
-+
-+/**
-+ * bfq_weight_to_ioprio - calc an ioprio from a weight.
-+ * @weight: the weight value to convert.
-+ *
-+ * To preserve as mush as possible the old only-ioprio user interface,
-+ * 0 is used as an escape ioprio value for weights (numerically) equal or
-+ * larger than IOPRIO_BE_NR
-+ */
-+static inline unsigned short bfq_weight_to_ioprio(int weight)
-+{
-+ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
-+ return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
-+}
-+
-+static inline void bfq_get_entity(struct bfq_entity *entity)
-+{
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+
-+ if (bfqq != NULL) {
-+ atomic_inc(&bfqq->ref);
-+ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
-+ bfqq, atomic_read(&bfqq->ref));
-+ }
-+}
-+
-+/**
-+ * bfq_find_deepest - find the deepest node that an extraction can modify.
-+ * @node: the node being removed.
-+ *
-+ * Do the first step of an extraction in an rb tree, looking for the
-+ * node that will replace @node, and returning the deepest node that
-+ * the following modifications to the tree can touch. If @node is the
-+ * last node in the tree return %NULL.
-+ */
-+static struct rb_node *bfq_find_deepest(struct rb_node *node)
-+{
-+ struct rb_node *deepest;
-+
-+ if (node->rb_right == NULL && node->rb_left == NULL)
-+ deepest = rb_parent(node);
-+ else if (node->rb_right == NULL)
-+ deepest = node->rb_left;
-+ else if (node->rb_left == NULL)
-+ deepest = node->rb_right;
-+ else {
-+ deepest = rb_next(node);
-+ if (deepest->rb_right != NULL)
-+ deepest = deepest->rb_right;
-+ else if (rb_parent(deepest) != node)
-+ deepest = rb_parent(deepest);
-+ }
-+
-+ return deepest;
-+}
-+
-+/**
-+ * bfq_active_extract - remove an entity from the active tree.
-+ * @st: the service_tree containing the tree.
-+ * @entity: the entity being removed.
-+ */
-+static void bfq_active_extract(struct bfq_service_tree *st,
-+ struct bfq_entity *entity)
-+{
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+ struct rb_node *node;
-+#ifdef CONFIG_CGROUP_BFQIO
-+ struct bfq_sched_data *sd = NULL;
-+ struct bfq_group *bfqg = NULL;
-+ struct bfq_data *bfqd = NULL;
-+#endif
-+
-+ node = bfq_find_deepest(&entity->rb_node);
-+ bfq_extract(&st->active, entity);
-+
-+ if (node != NULL)
-+ bfq_update_active_tree(node);
-+
-+#ifdef CONFIG_CGROUP_BFQIO
-+ sd = entity->sched_data;
-+ bfqg = container_of(sd, struct bfq_group, sched_data);
-+ BUG_ON(!bfqg);
-+ bfqd = (struct bfq_data *)bfqg->bfqd;
-+#endif
-+ if (bfqq != NULL)
-+ list_del(&bfqq->bfqq_list);
-+#ifdef CONFIG_CGROUP_BFQIO
-+ else { /* bfq_group */
-+ BUG_ON(!bfqd);
-+ bfq_weights_tree_remove(bfqd, entity,
-+ &bfqd->group_weights_tree);
-+ }
-+ if (bfqg != bfqd->root_group) {
-+ BUG_ON(!bfqg);
-+ BUG_ON(!bfqd);
-+ BUG_ON(!bfqg->active_entities);
-+ bfqg->active_entities--;
-+ if (bfqg->active_entities == 1) {
-+ BUG_ON(!bfqd->active_numerous_groups);
-+ bfqd->active_numerous_groups--;
-+ }
-+ }
-+#endif
-+}
-+
-+/**
-+ * bfq_idle_insert - insert an entity into the idle tree.
-+ * @st: the service tree containing the tree.
-+ * @entity: the entity to insert.
-+ */
-+static void bfq_idle_insert(struct bfq_service_tree *st,
-+ struct bfq_entity *entity)
-+{
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+ struct bfq_entity *first_idle = st->first_idle;
-+ struct bfq_entity *last_idle = st->last_idle;
-+
-+ if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
-+ st->first_idle = entity;
-+ if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
-+ st->last_idle = entity;
-+
-+ bfq_insert(&st->idle, entity);
-+
-+ if (bfqq != NULL)
-+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
-+}
-+
-+/**
-+ * bfq_forget_entity - remove an entity from the wfq trees.
-+ * @st: the service tree.
-+ * @entity: the entity being removed.
-+ *
-+ * Update the device status and forget everything about @entity, putting
-+ * the device reference to it, if it is a queue. Entities belonging to
-+ * groups are not refcounted.
-+ */
-+static void bfq_forget_entity(struct bfq_service_tree *st,
-+ struct bfq_entity *entity)
-+{
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+ struct bfq_sched_data *sd;
-+
-+ BUG_ON(!entity->on_st);
-+
-+ entity->on_st = 0;
-+ st->wsum -= entity->weight;
-+ if (bfqq != NULL) {
-+ sd = entity->sched_data;
-+ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
-+ bfqq, atomic_read(&bfqq->ref));
-+ bfq_put_queue(bfqq);
-+ }
-+}
-+
-+/**
-+ * bfq_put_idle_entity - release the idle tree ref of an entity.
-+ * @st: service tree for the entity.
-+ * @entity: the entity being released.
-+ */
-+static void bfq_put_idle_entity(struct bfq_service_tree *st,
-+ struct bfq_entity *entity)
-+{
-+ bfq_idle_extract(st, entity);
-+ bfq_forget_entity(st, entity);
-+}
-+
-+/**
-+ * bfq_forget_idle - update the idle tree if necessary.
-+ * @st: the service tree to act upon.
-+ *
-+ * To preserve the global O(log N) complexity we only remove one entry here;
-+ * as the idle tree will not grow indefinitely this can be done safely.
-+ */
-+static void bfq_forget_idle(struct bfq_service_tree *st)
-+{
-+ struct bfq_entity *first_idle = st->first_idle;
-+ struct bfq_entity *last_idle = st->last_idle;
-+
-+ if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
-+ !bfq_gt(last_idle->finish, st->vtime)) {
-+ /*
-+ * Forget the whole idle tree, increasing the vtime past
-+ * the last finish time of idle entities.
-+ */
-+ st->vtime = last_idle->finish;
-+ }
-+
-+ if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
-+ bfq_put_idle_entity(st, first_idle);
-+}
-+
-+static struct bfq_service_tree *
-+__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
-+ struct bfq_entity *entity)
-+{
-+ struct bfq_service_tree *new_st = old_st;
-+
-+ if (entity->ioprio_changed) {
-+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-+ unsigned short prev_weight, new_weight;
-+ struct bfq_data *bfqd = NULL;
-+ struct rb_root *root;
-+#ifdef CONFIG_CGROUP_BFQIO
-+ struct bfq_sched_data *sd;
-+ struct bfq_group *bfqg;
-+#endif
-+
-+ if (bfqq != NULL)
-+ bfqd = bfqq->bfqd;
-+#ifdef CONFIG_CGROUP_BFQIO
-+ else {
-+ sd = entity->my_sched_data;
-+ bfqg = container_of(sd, struct bfq_group, sched_data);
-+ BUG_ON(!bfqg);
-+ bfqd = (struct bfq_data *)bfqg->bfqd;
-+ BUG_ON(!bfqd);
-+ }
-+#endif
-+
-+ BUG_ON(old_st->wsum < entity->weight);
-+ old_st->wsum -= entity->weight;
-+
-+ if (entity->new_weight != entity->orig_weight) {
-+ if (entity->new_weight < BFQ_MIN_WEIGHT ||
-+ entity->new_weight > BFQ_MAX_WEIGHT) {
-+ printk(KERN_CRIT "update_weight_prio: "
-+ "new_weight %d\n",
-+ entity->new_weight);
-+ BUG();
-+ }
-+ entity->orig_weight = entity->new_weight;
-+ entity->ioprio =
-+ bfq_weight_to_ioprio(entity->orig_weight);
-+ } else if (entity->new_ioprio != entity->ioprio) {
-+ entity->ioprio = entity->new_ioprio;
-+ entity->orig_weight =
-+ bfq_ioprio_to_weight(entity->ioprio);
-+ } else
-+ entity->new_weight = entity->orig_weight =
-+ bfq_ioprio_to_weight(entity->ioprio);
-+
-+ entity->ioprio_class = entity->new_ioprio_class;
-+ entity->ioprio_changed = 0;
-+
-+ /*
-+ * NOTE: here we may be changing the weight too early,
-+ * this will cause unfairness. The correct approach
-+ * would have required additional complexity to defer
-+ * weight changes to the proper time instants (i.e.,
-+ * when entity->finish <= old_st->vtime).
-+ */
-+ new_st = bfq_entity_service_tree(entity);
-+
-+ prev_weight = entity->weight;
-+ new_weight = entity->orig_weight *
-+ (bfqq != NULL ? bfqq->wr_coeff : 1);
-+ /*
-+ * If the weight of the entity changes, remove the entity
-+ * from its old weight counter (if there is a counter
-+ * associated with the entity), and add it to the counter
-+ * associated with its new weight.
-+ */
-+ if (prev_weight != new_weight) {
-+ root = bfqq ? &bfqd->queue_weights_tree :
-+ &bfqd->group_weights_tree;
-+ bfq_weights_tree_remove(bfqd, entity, root);
-+ }
-+ entity->weight = new_weight;
-+ /*
-+ * Add the entity to its weights tree only if it is
-+ * not associated with a weight-raised queue.
-+ */
-+ if (prev_weight != new_weight &&
-+ (bfqq ? bfqq->wr_coeff == 1 : 1))
-+ /* If we get here, root has been initialized. */
-+ bfq_weights_tree_add(bfqd, entity, root);
-+
-+ new_st->wsum += entity->weight;
-+
-+ if (new_st != old_st)
-+ entity->start = new_st->vtime;
-+ }
-+
-+ return new_st;
-+}
-+
-+/**
-+ * bfq_bfqq_served - update the scheduler status after selection for
-+ * service.
-+ * @bfqq: the queue being served.
-+ * @served: bytes to transfer.
-+ *
-+ * NOTE: this can be optimized, as the timestamps of upper level entities
-+ * are synchronized every time a new bfqq is selected for service. By now,
-+ * we keep it to better check consistency.
-+ */
-+static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
-+{
-+ struct bfq_entity *entity = &bfqq->entity;
-+ struct bfq_service_tree *st;
-+
-+ for_each_entity(entity) {
-+ st = bfq_entity_service_tree(entity);
-+
-+ entity->service += served;
-+ BUG_ON(entity->service > entity->budget);
-+ BUG_ON(st->wsum == 0);
-+
-+ st->vtime += bfq_delta(served, st->wsum);
-+ bfq_forget_idle(st);
-+ }
-+ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
-+}
-+
-+/**
-+ * bfq_bfqq_charge_full_budget - set the service to the entity budget.
-+ * @bfqq: the queue that needs a service update.
-+ *
-+ * When it's not possible to be fair in the service domain, because
-+ * a queue is not consuming its budget fast enough (the meaning of
-+ * fast depends on the timeout parameter), we charge it a full
-+ * budget. In this way we should obtain a sort of time-domain
-+ * fairness among all the seeky/slow queues.
-+ */
-+static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
-+{
-+ struct bfq_entity *entity = &bfqq->entity;
-+
-+ bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
-+
-+ bfq_bfqq_served(bfqq, entity->budget - entity->service);
-+}
-+
-+/**
-+ * __bfq_activate_entity - activate an entity.
-+ * @entity: the entity being activated.
-+ *
-+ * Called whenever an entity is activated, i.e., it is not active and one
-+ * of its children receives a new request, or has to be reactivated due to
-+ * budget exhaustion. It uses the current budget of the entity (and the
-+ * service received if @entity is active) of the queue to calculate its
-+ * timestamps.
-+ */
-+static void __bfq_activate_entity(struct bfq_entity *entity)
-+{
-+ struct bfq_sched_data *sd = entity->sched_data;
-+ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
-+
-+ if (entity == sd->in_service_entity) {
-+ BUG_ON(entity->tree != NULL);
-+ /*
-+ * If we are requeueing the current entity we have
-+ * to take care of not charging to it service it has
-+ * not received.
-+ */
-+ bfq_calc_finish(entity, entity->service);
-+ entity->start = entity->finish;
-+ sd->in_service_entity = NULL;
-+ } else if (entity->tree == &st->active) {
-+ /*
-+ * Requeueing an entity due to a change of some
-+ * next_in_service entity below it. We reuse the
-+ * old start time.
-+ */
-+ bfq_active_extract(st, entity);
-+ } else if (entity->tree == &st->idle) {
-+ /*
-+ * Must be on the idle tree, bfq_idle_extract() will
-+ * check for that.
-+ */
-+ bfq_idle_extract(st, entity);
-+ entity->start = bfq_gt(st->vtime, entity->finish) ?
-+ st->vtime : entity->finish;
-+ } else {
-+ /*
-+ * The finish time of the entity may be invalid, and
-+ * it is in the past for sure, otherwise the queue
-+ * would have been on the idle tree.
-+ */
-+ entity->start = st->vtime;
-+ st->wsum += entity->weight;
-+ bfq_get_entity(entity);
-+
-+ BUG_ON(entity->on_st);
-+ entity->on_st = 1;
-+ }
-+
-+ st = __bfq_entity_update_weight_prio(st, entity);
-+ bfq_calc_finish(entity, entity->budget);
-+ bfq_active_insert(st, entity);
-+}
-+
-+/**
-+ * bfq_activate_entity - activate an entity and its ancestors if necessary.
-+ * @entity: the entity to activate.
-+ *
-+ * Activate @entity and all the entities on the path from it to the root.
-+ */
-+static void bfq_activate_entity(struct bfq_entity *entity)
-+{
-+ struct bfq_sched_data *sd;
-+
-+ for_each_entity(entity) {
-+ __bfq_activate_entity(entity);
-+
-+ sd = entity->sched_data;
-+ if (!bfq_update_next_in_service(sd))
-+ /*
-+ * No need to propagate the activation to the
-+ * upper entities, as they will be updated when
-+ * the in-service entity is rescheduled.
-+ */
-+ break;
-+ }
-+}
-+
-+/**
-+ * __bfq_deactivate_entity - deactivate an entity from its service tree.
-+ * @entity: the entity to deactivate.
-+ * @requeue: if false, the entity will not be put into the idle tree.
-+ *
-+ * Deactivate an entity, independently from its previous state. If the
-+ * entity was not on a service tree just return, otherwise if it is on
-+ * any scheduler tree, extract it from that tree, and if necessary
-+ * and if the caller did not specify @requeue, put it on the idle tree.
-+ *
-+ * Return %1 if the caller should update the entity hierarchy, i.e.,
-+ * if the entity was in service or if it was the next_in_service for
-+ * its sched_data; return %0 otherwise.
-+ */
-+static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
-+{
-+ struct bfq_sched_data *sd = entity->sched_data;
-+ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
-+ int was_in_service = entity == sd->in_service_entity;
-+ int ret = 0;
-+
-+ if (!entity->on_st)
-+ return 0;
-+
-+ BUG_ON(was_in_service && entity->tree != NULL);
-+
-+ if (was_in_service) {
-+ bfq_calc_finish(entity, entity->service);
-+ sd->in_service_entity = NULL;
-+ } else if (entity->tree == &st->active)
-+ bfq_active_extract(st, entity);
-+ else if (entity->tree == &st->idle)
-+ bfq_idle_extract(st, entity);
-+ else if (entity->tree != NULL)
-+ BUG();
-+
-+ if (was_in_service || sd->next_in_service == entity)
-+ ret = bfq_update_next_in_service(sd);
-+
-+ if (!requeue || !bfq_gt(entity->finish, st->vtime))
-+ bfq_forget_entity(st, entity);
-+ else
-+ bfq_idle_insert(st, entity);
-+
-+ BUG_ON(sd->in_service_entity == entity);
-+ BUG_ON(sd->next_in_service == entity);
-+
-+ return ret;
-+}
-+
-+/**
-+ * bfq_deactivate_entity - deactivate an entity.
-+ * @entity: the entity to deactivate.
-+ * @requeue: true if the entity can be put on the idle tree
-+ */
-+static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
-+{
-+ struct bfq_sched_data *sd;
-+ struct bfq_entity *parent;
-+
-+ for_each_entity_safe(entity, parent) {
-+ sd = entity->sched_data;
-+
-+ if (!__bfq_deactivate_entity(entity, requeue))
-+ /*
-+ * The parent entity is still backlogged, and
-+ * we don't need to update it as it is still
-+ * in service.
-+ */
-+ break;
-+
-+ if (sd->next_in_service != NULL)
-+ /*
-+ * The parent entity is still backlogged and
-+ * the budgets on the path towards the root
-+ * need to be updated.
-+ */
-+ goto update;
-+
-+ /*
-+ * If we reach there the parent is no more backlogged and
-+ * we want to propagate the dequeue upwards.
-+ */
-+ requeue = 1;
-+ }
-+
-+ return;
-+
-+update:
-+ entity = parent;
-+ for_each_entity(entity) {
-+ __bfq_activate_entity(entity);
-+
-+ sd = entity->sched_data;
-+ if (!bfq_update_next_in_service(sd))
-+ break;
-+ }
-+}
-+
-+/**
-+ * bfq_update_vtime - update vtime if necessary.
-+ * @st: the service tree to act upon.
-+ *
-+ * If necessary update the service tree vtime to have at least one
-+ * eligible entity, skipping to its start time. Assumes that the
-+ * active tree of the device is not empty.
-+ *
-+ * NOTE: this hierarchical implementation updates vtimes quite often,
-+ * we may end up with reactivated processes getting timestamps after a
-+ * vtime skip done because we needed a ->first_active entity on some
-+ * intermediate node.
-+ */
-+static void bfq_update_vtime(struct bfq_service_tree *st)
-+{
-+ struct bfq_entity *entry;
-+ struct rb_node *node = st->active.rb_node;
-+
-+ entry = rb_entry(node, struct bfq_entity, rb_node);
-+ if (bfq_gt(entry->min_start, st->vtime)) {
-+ st->vtime = entry->min_start;
-+ bfq_forget_idle(st);
-+ }
-+}
-+
-+/**
-+ * bfq_first_active_entity - find the eligible entity with
-+ * the smallest finish time
-+ * @st: the service tree to select from.
-+ *
-+ * This function searches the first schedulable entity, starting from the
-+ * root of the tree and going on the left every time on this side there is
-+ * a subtree with at least one eligible (start >= vtime) entity. The path on
-+ * the right is followed only if a) the left subtree contains no eligible
-+ * entities and b) no eligible entity has been found yet.
-+ */
-+static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
-+{
-+ struct bfq_entity *entry, *first = NULL;
-+ struct rb_node *node = st->active.rb_node;
-+
-+ while (node != NULL) {
-+ entry = rb_entry(node, struct bfq_entity, rb_node);
-+left:
-+ if (!bfq_gt(entry->start, st->vtime))
-+ first = entry;
-+
-+ BUG_ON(bfq_gt(entry->min_start, st->vtime));
-+
-+ if (node->rb_left != NULL) {
-+ entry = rb_entry(node->rb_left,
-+ struct bfq_entity, rb_node);
-+ if (!bfq_gt(entry->min_start, st->vtime)) {
-+ node = node->rb_left;
-+ goto left;
-+ }
-+ }
-+ if (first != NULL)
-+ break;
-+ node = node->rb_right;
-+ }
-+
-+ BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
-+ return first;
-+}
-+
-+/**
-+ * __bfq_lookup_next_entity - return the first eligible entity in @st.
-+ * @st: the service tree.
-+ *
-+ * Update the virtual time in @st and return the first eligible entity
-+ * it contains.
-+ */
-+static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
-+ bool force)
-+{
-+ struct bfq_entity *entity, *new_next_in_service = NULL;
-+
-+ if (RB_EMPTY_ROOT(&st->active))
-+ return NULL;
-+
-+ bfq_update_vtime(st);
-+ entity = bfq_first_active_entity(st);
-+ BUG_ON(bfq_gt(entity->start, st->vtime));
-+
-+ /*
-+ * If the chosen entity does not match with the sched_data's
-+ * next_in_service and we are forcedly serving the IDLE priority
-+ * class tree, bubble up budget update.
-+ */
-+ if (unlikely(force && entity != entity->sched_data->next_in_service)) {
-+ new_next_in_service = entity;
-+ for_each_entity(new_next_in_service)
-+ bfq_update_budget(new_next_in_service);
-+ }
-+
-+ return entity;
-+}
-+
-+/**
-+ * bfq_lookup_next_entity - return the first eligible entity in @sd.
-+ * @sd: the sched_data.
-+ * @extract: if true the returned entity will be also extracted from @sd.
-+ *
-+ * NOTE: since we cache the next_in_service entity at each level of the
-+ * hierarchy, the complexity of the lookup can be decreased with
-+ * absolutely no effort just returning the cached next_in_service value;
-+ * we prefer to do full lookups to test the consistency of * the data
-+ * structures.
-+ */
-+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
-+ int extract,
-+ struct bfq_data *bfqd)
-+{
-+ struct bfq_service_tree *st = sd->service_tree;
-+ struct bfq_entity *entity;
-+ int i = 0;
-+
-+ BUG_ON(sd->in_service_entity != NULL);
-+
-+ if (bfqd != NULL &&
-+ jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
-+ entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
-+ true);
-+ if (entity != NULL) {
-+ i = BFQ_IOPRIO_CLASSES - 1;
-+ bfqd->bfq_class_idle_last_service = jiffies;
-+ sd->next_in_service = entity;
-+ }
-+ }
-+ for (; i < BFQ_IOPRIO_CLASSES; i++) {
-+ entity = __bfq_lookup_next_entity(st + i, false);
-+ if (entity != NULL) {
-+ if (extract) {
-+ bfq_check_next_in_service(sd, entity);
-+ bfq_active_extract(st + i, entity);
-+ sd->in_service_entity = entity;
-+ sd->next_in_service = NULL;
-+ }
-+ break;
-+ }
-+ }
-+
-+ return entity;
-+}
-+
-+/*
-+ * Get next queue for service.
-+ */
-+static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
-+{
-+ struct bfq_entity *entity = NULL;
-+ struct bfq_sched_data *sd;
-+ struct bfq_queue *bfqq;
-+
-+ BUG_ON(bfqd->in_service_queue != NULL);
-+
-+ if (bfqd->busy_queues == 0)
-+ return NULL;
-+
-+ sd = &bfqd->root_group->sched_data;
-+ for (; sd != NULL; sd = entity->my_sched_data) {
-+ entity = bfq_lookup_next_entity(sd, 1, bfqd);
-+ BUG_ON(entity == NULL);
-+ entity->service = 0;
-+ }
-+
-+ bfqq = bfq_entity_to_bfqq(entity);
-+ BUG_ON(bfqq == NULL);
-+
-+ return bfqq;
-+}
-+
-+/*
-+ * Forced extraction of the given queue.
-+ */
-+static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ struct bfq_entity *entity;
-+ struct bfq_sched_data *sd;
-+
-+ BUG_ON(bfqd->in_service_queue != NULL);
-+
-+ entity = &bfqq->entity;
-+ /*
-+ * Bubble up extraction/update from the leaf to the root.
-+ */
-+ for_each_entity(entity) {
-+ sd = entity->sched_data;
-+ bfq_update_budget(entity);
-+ bfq_update_vtime(bfq_entity_service_tree(entity));
-+ bfq_active_extract(bfq_entity_service_tree(entity), entity);
-+ sd->in_service_entity = entity;
-+ sd->next_in_service = NULL;
-+ entity->service = 0;
-+ }
-+
-+ return;
-+}
-+
-+static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
-+{
-+ if (bfqd->in_service_bic != NULL) {
-+ put_io_context(bfqd->in_service_bic->icq.ioc);
-+ bfqd->in_service_bic = NULL;
-+ }
-+
-+ bfqd->in_service_queue = NULL;
-+ del_timer(&bfqd->idle_slice_timer);
-+}
-+
-+static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+ int requeue)
-+{
-+ struct bfq_entity *entity = &bfqq->entity;
-+
-+ if (bfqq == bfqd->in_service_queue)
-+ __bfq_bfqd_reset_in_service(bfqd);
-+
-+ bfq_deactivate_entity(entity, requeue);
-+}
-+
-+static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-+{
-+ struct bfq_entity *entity = &bfqq->entity;
-+
-+ bfq_activate_entity(entity);
-+}
-+
-+/*
-+ * Called when the bfqq no longer has requests pending, remove it from
-+ * the service tree.
-+ */
-+static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+ int requeue)
-+{
-+ BUG_ON(!bfq_bfqq_busy(bfqq));
-+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
-+
-+ bfq_log_bfqq(bfqd, bfqq, "del from busy");
-+
-+ bfq_clear_bfqq_busy(bfqq);
-+
-+ BUG_ON(bfqd->busy_queues == 0);
-+ bfqd->busy_queues--;
-+
-+ if (!bfqq->dispatched) {
-+ bfq_weights_tree_remove(bfqd, &bfqq->entity,
-+ &bfqd->queue_weights_tree);
-+ if (!blk_queue_nonrot(bfqd->queue)) {
-+ BUG_ON(!bfqd->busy_in_flight_queues);
-+ bfqd->busy_in_flight_queues--;
-+ if (bfq_bfqq_constantly_seeky(bfqq)) {
-+ BUG_ON(!bfqd->
-+ const_seeky_busy_in_flight_queues);
-+ bfqd->const_seeky_busy_in_flight_queues--;
-+ }
-+ }
-+ }
-+ if (bfqq->wr_coeff > 1)
-+ bfqd->wr_busy_queues--;
-+
-+ bfq_deactivate_bfqq(bfqd, bfqq, requeue);
-+}
-+
-+/*
-+ * Called when an inactive queue receives a new request.
-+ */
-+static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-+{
-+ BUG_ON(bfq_bfqq_busy(bfqq));
-+ BUG_ON(bfqq == bfqd->in_service_queue);
-+
-+ bfq_log_bfqq(bfqd, bfqq, "add to busy");
-+
-+ bfq_activate_bfqq(bfqd, bfqq);
-+
-+ bfq_mark_bfqq_busy(bfqq);
-+ bfqd->busy_queues++;
-+
-+ if (!bfqq->dispatched) {
-+ if (bfqq->wr_coeff == 1)
-+ bfq_weights_tree_add(bfqd, &bfqq->entity,
-+ &bfqd->queue_weights_tree);
-+ if (!blk_queue_nonrot(bfqd->queue)) {
-+ bfqd->busy_in_flight_queues++;
-+ if (bfq_bfqq_constantly_seeky(bfqq))
-+ bfqd->const_seeky_busy_in_flight_queues++;
-+ }
-+ }
-+ if (bfqq->wr_coeff > 1)
-+ bfqd->wr_busy_queues++;
-+}
-diff --git a/block/bfq.h b/block/bfq.h
-new file mode 100644
-index 0000000..518f2ac
---- /dev/null
-+++ b/block/bfq.h
-@@ -0,0 +1,775 @@
-+/*
-+ * BFQ-v7r7 for 4.0.0: data structures and common functions prototypes.
-+ *
-+ * Based on ideas and code from CFQ:
-+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
-+ *
-+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
-+ * Paolo Valente <paolo.valente@unimore.it>
-+ *
-+ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
-+ */
-+
-+#ifndef _BFQ_H
-+#define _BFQ_H
-+
-+#include <linux/blktrace_api.h>
-+#include <linux/hrtimer.h>
-+#include <linux/ioprio.h>
-+#include <linux/rbtree.h>
-+
-+#define BFQ_IOPRIO_CLASSES 3
-+#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
-+
-+#define BFQ_MIN_WEIGHT 1
-+#define BFQ_MAX_WEIGHT 1000
-+
-+#define BFQ_DEFAULT_QUEUE_IOPRIO 4
-+
-+#define BFQ_DEFAULT_GRP_WEIGHT 10
-+#define BFQ_DEFAULT_GRP_IOPRIO 0
-+#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
-+
-+struct bfq_entity;
-+
-+/**
-+ * struct bfq_service_tree - per ioprio_class service tree.
-+ * @active: tree for active entities (i.e., those backlogged).
-+ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
-+ * @first_idle: idle entity with minimum F_i.
-+ * @last_idle: idle entity with maximum F_i.
-+ * @vtime: scheduler virtual time.
-+ * @wsum: scheduler weight sum; active and idle entities contribute to it.
-+ *
-+ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
-+ * ioprio_class has its own independent scheduler, and so its own
-+ * bfq_service_tree. All the fields are protected by the queue lock
-+ * of the containing bfqd.
-+ */
-+struct bfq_service_tree {
-+ struct rb_root active;
-+ struct rb_root idle;
-+
-+ struct bfq_entity *first_idle;
-+ struct bfq_entity *last_idle;
-+
-+ u64 vtime;
-+ unsigned long wsum;
-+};
-+
-+/**
-+ * struct bfq_sched_data - multi-class scheduler.
-+ * @in_service_entity: entity in service.
-+ * @next_in_service: head-of-the-line entity in the scheduler.
-+ * @service_tree: array of service trees, one per ioprio_class.
-+ *
-+ * bfq_sched_data is the basic scheduler queue. It supports three
-+ * ioprio_classes, and can be used either as a toplevel queue or as
-+ * an intermediate queue on a hierarchical setup.
-+ * @next_in_service points to the active entity of the sched_data
-+ * service trees that will be scheduled next.
-+ *
-+ * The supported ioprio_classes are the same as in CFQ, in descending
-+ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
-+ * Requests from higher priority queues are served before all the
-+ * requests from lower priority queues; among requests of the same
-+ * queue requests are served according to B-WF2Q+.
-+ * All the fields are protected by the queue lock of the containing bfqd.
-+ */
-+struct bfq_sched_data {
-+ struct bfq_entity *in_service_entity;
-+ struct bfq_entity *next_in_service;
-+ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
-+};
-+
-+/**
-+ * struct bfq_weight_counter - counter of the number of all active entities
-+ * with a given weight.
-+ * @weight: weight of the entities that this counter refers to.
-+ * @num_active: number of active entities with this weight.
-+ * @weights_node: weights tree member (see bfq_data's @queue_weights_tree
-+ * and @group_weights_tree).
-+ */
-+struct bfq_weight_counter {
-+ short int weight;
-+ unsigned int num_active;
-+ struct rb_node weights_node;
-+};
-+
-+/**
-+ * struct bfq_entity - schedulable entity.
-+ * @rb_node: service_tree member.
-+ * @weight_counter: pointer to the weight counter associated with this entity.
-+ * @on_st: flag, true if the entity is on a tree (either the active or
-+ * the idle one of its service_tree).
-+ * @finish: B-WF2Q+ finish timestamp (aka F_i).
-+ * @start: B-WF2Q+ start timestamp (aka S_i).
-+ * @tree: tree the entity is enqueued into; %NULL if not on a tree.
-+ * @min_start: minimum start time of the (active) subtree rooted at
-+ * this entity; used for O(log N) lookups into active trees.
-+ * @service: service received during the last round of service.
-+ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
-+ * @weight: weight of the queue
-+ * @parent: parent entity, for hierarchical scheduling.
-+ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
-+ * associated scheduler queue, %NULL on leaf nodes.
-+ * @sched_data: the scheduler queue this entity belongs to.
-+ * @ioprio: the ioprio in use.
-+ * @new_weight: when a weight change is requested, the new weight value.
-+ * @orig_weight: original weight, used to implement weight boosting
-+ * @new_ioprio: when an ioprio change is requested, the new ioprio value.
-+ * @ioprio_class: the ioprio_class in use.
-+ * @new_ioprio_class: when an ioprio_class change is requested, the new
-+ * ioprio_class value.
-+ * @ioprio_changed: flag, true when the user requested a weight, ioprio or
-+ * ioprio_class change.
-+ *
-+ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
-+ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
-+ * entity belongs to the sched_data of the parent group in the cgroup
-+ * hierarchy. Non-leaf entities have also their own sched_data, stored
-+ * in @my_sched_data.
-+ *
-+ * Each entity stores independently its priority values; this would
-+ * allow different weights on different devices, but this
-+ * functionality is not exported to userspace by now. Priorities and
-+ * weights are updated lazily, first storing the new values into the
-+ * new_* fields, then setting the @ioprio_changed flag. As soon as
-+ * there is a transition in the entity state that allows the priority
-+ * update to take place the effective and the requested priority
-+ * values are synchronized.
-+ *
-+ * Unless cgroups are used, the weight value is calculated from the
-+ * ioprio to export the same interface as CFQ. When dealing with
-+ * ``well-behaved'' queues (i.e., queues that do not spend too much
-+ * time to consume their budget and have true sequential behavior, and
-+ * when there are no external factors breaking anticipation) the
-+ * relative weights at each level of the cgroups hierarchy should be
-+ * guaranteed. All the fields are protected by the queue lock of the
-+ * containing bfqd.
-+ */
-+struct bfq_entity {
-+ struct rb_node rb_node;
-+ struct bfq_weight_counter *weight_counter;
-+
-+ int on_st;
-+
-+ u64 finish;
-+ u64 start;
-+
-+ struct rb_root *tree;
-+
-+ u64 min_start;
-+
-+ unsigned long service, budget;
-+ unsigned short weight, new_weight;
-+ unsigned short orig_weight;
-+
-+ struct bfq_entity *parent;
-+
-+ struct bfq_sched_data *my_sched_data;
-+ struct bfq_sched_data *sched_data;
-+
-+ unsigned short ioprio, new_ioprio;
-+ unsigned short ioprio_class, new_ioprio_class;
-+
-+ int ioprio_changed;
-+};
-+
-+struct bfq_group;
-+
-+/**
-+ * struct bfq_queue - leaf schedulable entity.
-+ * @ref: reference counter.
-+ * @bfqd: parent bfq_data.
-+ * @new_bfqq: shared bfq_queue if queue is cooperating with
-+ * one or more other queues.
-+ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
-+ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
-+ * @sort_list: sorted list of pending requests.
-+ * @next_rq: if fifo isn't expired, next request to serve.
-+ * @queued: nr of requests queued in @sort_list.
-+ * @allocated: currently allocated requests.
-+ * @meta_pending: pending metadata requests.
-+ * @fifo: fifo list of requests in sort_list.
-+ * @entity: entity representing this queue in the scheduler.
-+ * @max_budget: maximum budget allowed from the feedback mechanism.
-+ * @budget_timeout: budget expiration (in jiffies).
-+ * @dispatched: number of requests on the dispatch list or inside driver.
-+ * @flags: status flags.
-+ * @bfqq_list: node for active/idle bfqq list inside our bfqd.
-+ * @burst_list_node: node for the device's burst list.
-+ * @seek_samples: number of seeks sampled
-+ * @seek_total: sum of the distances of the seeks sampled
-+ * @seek_mean: mean seek distance
-+ * @last_request_pos: position of the last request enqueued
-+ * @requests_within_timer: number of consecutive pairs of request completion
-+ * and arrival, such that the queue becomes idle
-+ * after the completion, but the next request arrives
-+ * within an idle time slice; used only if the queue's
-+ * IO_bound has been cleared.
-+ * @pid: pid of the process owning the queue, used for logging purposes.
-+ * @last_wr_start_finish: start time of the current weight-raising period if
-+ * the @bfq-queue is being weight-raised, otherwise
-+ * finish time of the last weight-raising period
-+ * @wr_cur_max_time: current max raising time for this queue
-+ * @soft_rt_next_start: minimum time instant such that, only if a new
-+ * request is enqueued after this time instant in an
-+ * idle @bfq_queue with no outstanding requests, then
-+ * the task associated with the queue it is deemed as
-+ * soft real-time (see the comments to the function
-+ * bfq_bfqq_softrt_next_start()).
-+ * @last_idle_bklogged: time of the last transition of the @bfq_queue from
-+ * idle to backlogged
-+ * @service_from_backlogged: cumulative service received from the @bfq_queue
-+ * since the last transition from idle to
-+ * backlogged
-+ *
-+ * A bfq_queue is a leaf request queue; it can be associated with an io_context
-+ * or more, if it is async or shared between cooperating processes. @cgroup
-+ * holds a reference to the cgroup, to be sure that it does not disappear while
-+ * a bfqq still references it (mostly to avoid races between request issuing and
-+ * task migration followed by cgroup destruction).
-+ * All the fields are protected by the queue lock of the containing bfqd.
-+ */
-+struct bfq_queue {
-+ atomic_t ref;
-+ struct bfq_data *bfqd;
-+
-+ /* fields for cooperating queues handling */
-+ struct bfq_queue *new_bfqq;
-+ struct rb_node pos_node;
-+ struct rb_root *pos_root;
-+
-+ struct rb_root sort_list;
-+ struct request *next_rq;
-+ int queued[2];
-+ int allocated[2];
-+ int meta_pending;
-+ struct list_head fifo;
-+
-+ struct bfq_entity entity;
-+
-+ unsigned long max_budget;
-+ unsigned long budget_timeout;
-+
-+ int dispatched;
-+
-+ unsigned int flags;
-+
-+ struct list_head bfqq_list;
-+
-+ struct hlist_node burst_list_node;
-+
-+ unsigned int seek_samples;
-+ u64 seek_total;
-+ sector_t seek_mean;
-+ sector_t last_request_pos;
-+
-+ unsigned int requests_within_timer;
-+
-+ pid_t pid;
-+
-+ /* weight-raising fields */
-+ unsigned long wr_cur_max_time;
-+ unsigned long soft_rt_next_start;
-+ unsigned long last_wr_start_finish;
-+ unsigned int wr_coeff;
-+ unsigned long last_idle_bklogged;
-+ unsigned long service_from_backlogged;
-+};
-+
-+/**
-+ * struct bfq_ttime - per process thinktime stats.
-+ * @ttime_total: total process thinktime
-+ * @ttime_samples: number of thinktime samples
-+ * @ttime_mean: average process thinktime
-+ */
-+struct bfq_ttime {
-+ unsigned long last_end_request;
-+
-+ unsigned long ttime_total;
-+ unsigned long ttime_samples;
-+ unsigned long ttime_mean;
-+};
-+
-+/**
-+ * struct bfq_io_cq - per (request_queue, io_context) structure.
-+ * @icq: associated io_cq structure
-+ * @bfqq: array of two process queues, the sync and the async
-+ * @ttime: associated @bfq_ttime struct
-+ */
-+struct bfq_io_cq {
-+ struct io_cq icq; /* must be the first member */
-+ struct bfq_queue *bfqq[2];
-+ struct bfq_ttime ttime;
-+ int ioprio;
-+};
-+
-+enum bfq_device_speed {
-+ BFQ_BFQD_FAST,
-+ BFQ_BFQD_SLOW,
-+};
-+
-+/**
-+ * struct bfq_data - per device data structure.
-+ * @queue: request queue for the managed device.
-+ * @root_group: root bfq_group for the device.
-+ * @rq_pos_tree: rbtree sorted by next_request position, used when
-+ * determining if two or more queues have interleaving
-+ * requests (see bfq_close_cooperator()).
-+ * @active_numerous_groups: number of bfq_groups containing more than one
-+ * active @bfq_entity.
-+ * @queue_weights_tree: rbtree of weight counters of @bfq_queues, sorted by
-+ * weight. Used to keep track of whether all @bfq_queues
-+ * have the same weight. The tree contains one counter
-+ * for each distinct weight associated to some active
-+ * and not weight-raised @bfq_queue (see the comments to
-+ * the functions bfq_weights_tree_[add|remove] for
-+ * further details).
-+ * @group_weights_tree: rbtree of non-queue @bfq_entity weight counters, sorted
-+ * by weight. Used to keep track of whether all
-+ * @bfq_groups have the same weight. The tree contains
-+ * one counter for each distinct weight associated to
-+ * some active @bfq_group (see the comments to the
-+ * functions bfq_weights_tree_[add|remove] for further
-+ * details).
-+ * @busy_queues: number of bfq_queues containing requests (including the
-+ * queue in service, even if it is idling).
-+ * @busy_in_flight_queues: number of @bfq_queues containing pending or
-+ * in-flight requests, plus the @bfq_queue in
-+ * service, even if idle but waiting for the
-+ * possible arrival of its next sync request. This
-+ * field is updated only if the device is rotational,
-+ * but used only if the device is also NCQ-capable.
-+ * The reason why the field is updated also for non-
-+ * NCQ-capable rotational devices is related to the
-+ * fact that the value of @hw_tag may be set also
-+ * later than when busy_in_flight_queues may need to
-+ * be incremented for the first time(s). Taking also
-+ * this possibility into account, to avoid unbalanced
-+ * increments/decrements, would imply more overhead
-+ * than just updating busy_in_flight_queues
-+ * regardless of the value of @hw_tag.
-+ * @const_seeky_busy_in_flight_queues: number of constantly-seeky @bfq_queues
-+ * (that is, seeky queues that expired
-+ * for budget timeout at least once)
-+ * containing pending or in-flight
-+ * requests, including the in-service
-+ * @bfq_queue if constantly seeky. This
-+ * field is updated only if the device
-+ * is rotational, but used only if the
-+ * device is also NCQ-capable (see the
-+ * comments to @busy_in_flight_queues).
-+ * @wr_busy_queues: number of weight-raised busy @bfq_queues.
-+ * @queued: number of queued requests.
-+ * @rq_in_driver: number of requests dispatched and waiting for completion.
-+ * @sync_flight: number of sync requests in the driver.
-+ * @max_rq_in_driver: max number of reqs in driver in the last
-+ * @hw_tag_samples completed requests.
-+ * @hw_tag_samples: nr of samples used to calculate hw_tag.
-+ * @hw_tag: flag set to one if the driver is showing a queueing behavior.
-+ * @budgets_assigned: number of budgets assigned.
-+ * @idle_slice_timer: timer set when idling for the next sequential request
-+ * from the queue in service.
-+ * @unplug_work: delayed work to restart dispatching on the request queue.
-+ * @in_service_queue: bfq_queue in service.
-+ * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue.
-+ * @last_position: on-disk position of the last served request.
-+ * @last_budget_start: beginning of the last budget.
-+ * @last_idling_start: beginning of the last idle slice.
-+ * @peak_rate: peak transfer rate observed for a budget.
-+ * @peak_rate_samples: number of samples used to calculate @peak_rate.
-+ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
-+ * rescheduling.
-+ * @group_list: list of all the bfq_groups active on the device.
-+ * @active_list: list of all the bfq_queues active on the device.
-+ * @idle_list: list of all the bfq_queues idle on the device.
-+ * @bfq_quantum: max number of requests dispatched per dispatch round.
-+ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
-+ * requests are served in fifo order.
-+ * @bfq_back_penalty: weight of backward seeks wrt forward ones.
-+ * @bfq_back_max: maximum allowed backward seek.
-+ * @bfq_slice_idle: maximum idling time.
-+ * @bfq_user_max_budget: user-configured max budget value
-+ * (0 for auto-tuning).
-+ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
-+ * async queues.
-+ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
-+ * to prevent seeky queues to impose long latencies to well
-+ * behaved ones (this also implies that seeky queues cannot
-+ * receive guarantees in the service domain; after a timeout
-+ * they are charged for the whole allocated budget, to try
-+ * to preserve a behavior reasonably fair among them, but
-+ * without service-domain guarantees).
-+ * @bfq_coop_thresh: number of queue merges after which a @bfq_queue is
-+ * no more granted any weight-raising.
-+ * @bfq_failed_cooperations: number of consecutive failed cooperation
-+ * chances after which weight-raising is restored
-+ * to a queue subject to more than bfq_coop_thresh
-+ * queue merges.
-+ * @bfq_requests_within_timer: number of consecutive requests that must be
-+ * issued within the idle time slice to set
-+ * again idling to a queue which was marked as
-+ * non-I/O-bound (see the definition of the
-+ * IO_bound flag for further details).
-+ * @last_ins_in_burst: last time at which a queue entered the current
-+ * burst of queues being activated shortly after
-+ * each other; for more details about this and the
-+ * following parameters related to a burst of
-+ * activations, see the comments to the function
-+ * @bfq_handle_burst.
-+ * @bfq_burst_interval: reference time interval used to decide whether a
-+ * queue has been activated shortly after
-+ * @last_ins_in_burst.
-+ * @burst_size: number of queues in the current burst of queue activations.
-+ * @bfq_large_burst_thresh: maximum burst size above which the current
-+ * queue-activation burst is deemed as 'large'.
-+ * @large_burst: true if a large queue-activation burst is in progress.
-+ * @burst_list: head of the burst list (as for the above fields, more details
-+ * in the comments to the function bfq_handle_burst).
-+ * @low_latency: if set to true, low-latency heuristics are enabled.
-+ * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised
-+ * queue is multiplied.
-+ * @bfq_wr_max_time: maximum duration of a weight-raising period (jiffies).
-+ * @bfq_wr_rt_max_time: maximum duration for soft real-time processes.
-+ * @bfq_wr_min_idle_time: minimum idle period after which weight-raising
-+ * may be reactivated for a queue (in jiffies).
-+ * @bfq_wr_min_inter_arr_async: minimum period between request arrivals
-+ * after which weight-raising may be
-+ * reactivated for an already busy queue
-+ * (in jiffies).
-+ * @bfq_wr_max_softrt_rate: max service-rate for a soft real-time queue,
-+ * sectors per seconds.
-+ * @RT_prod: cached value of the product R*T used for computing the maximum
-+ * duration of the weight raising automatically.
-+ * @device_speed: device-speed class for the low-latency heuristic.
-+ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions.
-+ *
-+ * All the fields are protected by the @queue lock.
-+ */
-+struct bfq_data {
-+ struct request_queue *queue;
-+
-+ struct bfq_group *root_group;
-+ struct rb_root rq_pos_tree;
-+
-+#ifdef CONFIG_CGROUP_BFQIO
-+ int active_numerous_groups;
-+#endif
-+
-+ struct rb_root queue_weights_tree;
-+ struct rb_root group_weights_tree;
-+
-+ int busy_queues;
-+ int busy_in_flight_queues;
-+ int const_seeky_busy_in_flight_queues;
-+ int wr_busy_queues;
-+ int queued;
-+ int rq_in_driver;
-+ int sync_flight;
-+
-+ int max_rq_in_driver;
-+ int hw_tag_samples;
-+ int hw_tag;
-+
-+ int budgets_assigned;
-+
-+ struct timer_list idle_slice_timer;
-+ struct work_struct unplug_work;
-+
-+ struct bfq_queue *in_service_queue;
-+ struct bfq_io_cq *in_service_bic;
-+
-+ sector_t last_position;
-+
-+ ktime_t last_budget_start;
-+ ktime_t last_idling_start;
-+ int peak_rate_samples;
-+ u64 peak_rate;
-+ unsigned long bfq_max_budget;
-+
-+ struct hlist_head group_list;
-+ struct list_head active_list;
-+ struct list_head idle_list;
-+
-+ unsigned int bfq_quantum;
-+ unsigned int bfq_fifo_expire[2];
-+ unsigned int bfq_back_penalty;
-+ unsigned int bfq_back_max;
-+ unsigned int bfq_slice_idle;
-+ u64 bfq_class_idle_last_service;
-+
-+ unsigned int bfq_user_max_budget;
-+ unsigned int bfq_max_budget_async_rq;
-+ unsigned int bfq_timeout[2];
-+
-+ unsigned int bfq_coop_thresh;
-+ unsigned int bfq_failed_cooperations;
-+ unsigned int bfq_requests_within_timer;
-+
-+ unsigned long last_ins_in_burst;
-+ unsigned long bfq_burst_interval;
-+ int burst_size;
-+ unsigned long bfq_large_burst_thresh;
-+ bool large_burst;
-+ struct hlist_head burst_list;
-+
-+ bool low_latency;
-+
-+ /* parameters of the low_latency heuristics */
-+ unsigned int bfq_wr_coeff;
-+ unsigned int bfq_wr_max_time;
-+ unsigned int bfq_wr_rt_max_time;
-+ unsigned int bfq_wr_min_idle_time;
-+ unsigned long bfq_wr_min_inter_arr_async;
-+ unsigned int bfq_wr_max_softrt_rate;
-+ u64 RT_prod;
-+ enum bfq_device_speed device_speed;
-+
-+ struct bfq_queue oom_bfqq;
-+};
-+
-+enum bfqq_state_flags {
-+ BFQ_BFQQ_FLAG_busy = 0, /* has requests or is in service */
-+ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
-+ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
-+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
-+ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
-+ BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
-+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
-+ BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
-+ BFQ_BFQQ_FLAG_IO_bound, /*
-+ * bfqq has timed-out at least once
-+ * having consumed at most 2/10 of
-+ * its budget
-+ */
-+ BFQ_BFQQ_FLAG_in_large_burst, /*
-+ * bfqq activated in a large burst,
-+ * see comments to bfq_handle_burst.
-+ */
-+ BFQ_BFQQ_FLAG_constantly_seeky, /*
-+ * bfqq has proved to be slow and
-+ * seeky until budget timeout
-+ */
-+ BFQ_BFQQ_FLAG_softrt_update, /*
-+ * may need softrt-next-start
-+ * update
-+ */
-+ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
-+ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */
-+};
-+
-+#define BFQ_BFQQ_FNS(name) \
-+static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
-+{ \
-+ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
-+} \
-+static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
-+{ \
-+ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
-+} \
-+static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
-+{ \
-+ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
-+}
-+
-+BFQ_BFQQ_FNS(busy);
-+BFQ_BFQQ_FNS(wait_request);
-+BFQ_BFQQ_FNS(must_alloc);
-+BFQ_BFQQ_FNS(fifo_expire);
-+BFQ_BFQQ_FNS(idle_window);
-+BFQ_BFQQ_FNS(prio_changed);
-+BFQ_BFQQ_FNS(sync);
-+BFQ_BFQQ_FNS(budget_new);
-+BFQ_BFQQ_FNS(IO_bound);
-+BFQ_BFQQ_FNS(in_large_burst);
-+BFQ_BFQQ_FNS(constantly_seeky);
-+BFQ_BFQQ_FNS(coop);
-+BFQ_BFQQ_FNS(split_coop);
-+BFQ_BFQQ_FNS(softrt_update);
-+#undef BFQ_BFQQ_FNS
-+
-+/* Logging facilities. */
-+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
-+ blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
-+
-+#define bfq_log(bfqd, fmt, args...) \
-+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
-+
-+/* Expiration reasons. */
-+enum bfqq_expiration {
-+ BFQ_BFQQ_TOO_IDLE = 0, /*
-+ * queue has been idling for
-+ * too long
-+ */
-+ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
-+ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
-+ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
-+};
-+
-+#ifdef CONFIG_CGROUP_BFQIO
-+/**
-+ * struct bfq_group - per (device, cgroup) data structure.
-+ * @entity: schedulable entity to insert into the parent group sched_data.
-+ * @sched_data: own sched_data, to contain child entities (they may be
-+ * both bfq_queues and bfq_groups).
-+ * @group_node: node to be inserted into the bfqio_cgroup->group_data
-+ * list of the containing cgroup's bfqio_cgroup.
-+ * @bfqd_node: node to be inserted into the @bfqd->group_list list
-+ * of the groups active on the same device; used for cleanup.
-+ * @bfqd: the bfq_data for the device this group acts upon.
-+ * @async_bfqq: array of async queues for all the tasks belonging to
-+ * the group, one queue per ioprio value per ioprio_class,
-+ * except for the idle class that has only one queue.
-+ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
-+ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
-+ * to avoid too many special cases during group creation/
-+ * migration.
-+ * @active_entities: number of active entities belonging to the group;
-+ * unused for the root group. Used to know whether there
-+ * are groups with more than one active @bfq_entity
-+ * (see the comments to the function
-+ * bfq_bfqq_must_not_expire()).
-+ *
-+ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
-+ * there is a set of bfq_groups, each one collecting the lower-level
-+ * entities belonging to the group that are acting on the same device.
-+ *
-+ * Locking works as follows:
-+ * o @group_node is protected by the bfqio_cgroup lock, and is accessed
-+ * via RCU from its readers.
-+ * o @bfqd is protected by the queue lock, RCU is used to access it
-+ * from the readers.
-+ * o All the other fields are protected by the @bfqd queue lock.
-+ */
-+struct bfq_group {
-+ struct bfq_entity entity;
-+ struct bfq_sched_data sched_data;
-+
-+ struct hlist_node group_node;
-+ struct hlist_node bfqd_node;
-+
-+ void *bfqd;
-+
-+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
-+ struct bfq_queue *async_idle_bfqq;
-+
-+ struct bfq_entity *my_entity;
-+
-+ int active_entities;
-+};
-+
-+/**
-+ * struct bfqio_cgroup - bfq cgroup data structure.
-+ * @css: subsystem state for bfq in the containing cgroup.
-+ * @online: flag marked when the subsystem is inserted.
-+ * @weight: cgroup weight.
-+ * @ioprio: cgroup ioprio.
-+ * @ioprio_class: cgroup ioprio_class.
-+ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
-+ * @group_data: list containing the bfq_group belonging to this cgroup.
-+ *
-+ * @group_data is accessed using RCU, with @lock protecting the updates,
-+ * @ioprio and @ioprio_class are protected by @lock.
-+ */
-+struct bfqio_cgroup {
-+ struct cgroup_subsys_state css;
-+ bool online;
-+
-+ unsigned short weight, ioprio, ioprio_class;
-+
-+ spinlock_t lock;
-+ struct hlist_head group_data;
-+};
-+#else
-+struct bfq_group {
-+ struct bfq_sched_data sched_data;
-+
-+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
-+ struct bfq_queue *async_idle_bfqq;
-+};
-+#endif
-+
-+static inline struct bfq_service_tree *
-+bfq_entity_service_tree(struct bfq_entity *entity)
-+{
-+ struct bfq_sched_data *sched_data = entity->sched_data;
-+ unsigned int idx = entity->ioprio_class - 1;
-+
-+ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
-+ BUG_ON(sched_data == NULL);
-+
-+ return sched_data->service_tree + idx;
-+}
-+
-+static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic,
-+ bool is_sync)
-+{
-+ return bic->bfqq[is_sync];
-+}
-+
-+static inline void bic_set_bfqq(struct bfq_io_cq *bic,
-+ struct bfq_queue *bfqq, bool is_sync)
-+{
-+ bic->bfqq[is_sync] = bfqq;
-+}
-+
-+static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
-+{
-+ return bic->icq.q->elevator->elevator_data;
-+}
-+
-+/**
-+ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
-+ * @ptr: a pointer to a bfqd.
-+ * @flags: storage for the flags to be saved.
-+ *
-+ * This function allows bfqg->bfqd to be protected by the
-+ * queue lock of the bfqd they reference; the pointer is dereferenced
-+ * under RCU, so the storage for bfqd is assured to be safe as long
-+ * as the RCU read side critical section does not end. After the
-+ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
-+ * sure that no other writer accessed it. If we raced with a writer,
-+ * the function returns NULL, with the queue unlocked, otherwise it
-+ * returns the dereferenced pointer, with the queue locked.
-+ */
-+static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
-+ unsigned long *flags)
-+{
-+ struct bfq_data *bfqd;
-+
-+ rcu_read_lock();
-+ bfqd = rcu_dereference(*(struct bfq_data **)ptr);
-+
-+ if (bfqd != NULL) {
-+ spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
-+ if (*ptr == bfqd)
-+ goto out;
-+ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
-+ }
-+
-+ bfqd = NULL;
-+out:
-+ rcu_read_unlock();
-+ return bfqd;
-+}
-+
-+static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
-+ unsigned long *flags)
-+{
-+ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
-+}
-+
-+static void bfq_changed_ioprio(struct bfq_io_cq *bic);
-+static void bfq_put_queue(struct bfq_queue *bfqq);
-+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
-+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
-+ struct bfq_group *bfqg, int is_sync,
-+ struct bfq_io_cq *bic, gfp_t gfp_mask);
-+static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
-+ struct bfq_group *bfqg);
-+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
-+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
-+
-+#endif /* _BFQ_H */
---
-2.1.0
-
diff --git a/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch b/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch
deleted file mode 100644
index 53267cdd..00000000
--- a/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch
+++ /dev/null
@@ -1,1222 +0,0 @@
-From d49cf2e7913ec1c4b86a9de657140d9ec5fa8c19 Mon Sep 17 00:00:00 2001
-From: Mauro Andreolini <mauro.andreolini@unimore.it>
-Date: Thu, 18 Dec 2014 21:32:08 +0100
-Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r7 for
- 4.0.0
-
-A set of processes may happen to perform interleaved reads, i.e.,requests
-whose union would give rise to a sequential read pattern. There are two
-typical cases: in the first case, processes read fixed-size chunks of
-data at a fixed distance from each other, while in the second case processes
-may read variable-size chunks at variable distances. The latter case occurs
-for example with QEMU, which splits the I/O generated by the guest into
-multiple chunks, and lets these chunks be served by a pool of cooperating
-processes, iteratively assigning the next chunk of I/O to the first
-available process. CFQ uses actual queue merging for the first type of
-rocesses, whereas it uses preemption to get a sequential read pattern out
-of the read requests performed by the second type of processes. In the end
-it uses two different mechanisms to achieve the same goal: boosting the
-throughput with interleaved I/O.
-
-This patch introduces Early Queue Merge (EQM), a unified mechanism to get a
-sequential read pattern with both types of processes. The main idea is
-checking newly arrived requests against the next request of the active queue
-both in case of actual request insert and in case of request merge. By doing
-so, both the types of processes can be handled by just merging their queues.
-EQM is then simpler and more compact than the pair of mechanisms used in
-CFQ.
-
-Finally, EQM also preserves the typical low-latency properties of BFQ, by
-properly restoring the weight-raising state of a queue when it gets back to
-a non-merged state.
-
-Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
-Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
-Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
----
- block/bfq-iosched.c | 751 +++++++++++++++++++++++++++++++++++++---------------
- block/bfq-sched.c | 28 --
- block/bfq.h | 54 +++-
- 3 files changed, 581 insertions(+), 252 deletions(-)
-
-diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
-index 97ee934..328f33c 100644
---- a/block/bfq-iosched.c
-+++ b/block/bfq-iosched.c
-@@ -571,6 +571,57 @@ static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
- return dur;
- }
-
-+static inline unsigned
-+bfq_bfqq_cooperations(struct bfq_queue *bfqq)
-+{
-+ return bfqq->bic ? bfqq->bic->cooperations : 0;
-+}
-+
-+static inline void
-+bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
-+{
-+ if (bic->saved_idle_window)
-+ bfq_mark_bfqq_idle_window(bfqq);
-+ else
-+ bfq_clear_bfqq_idle_window(bfqq);
-+ if (bic->saved_IO_bound)
-+ bfq_mark_bfqq_IO_bound(bfqq);
-+ else
-+ bfq_clear_bfqq_IO_bound(bfqq);
-+ /* Assuming that the flag in_large_burst is already correctly set */
-+ if (bic->wr_time_left && bfqq->bfqd->low_latency &&
-+ !bfq_bfqq_in_large_burst(bfqq) &&
-+ bic->cooperations < bfqq->bfqd->bfq_coop_thresh) {
-+ /*
-+ * Start a weight raising period with the duration given by
-+ * the raising_time_left snapshot.
-+ */
-+ if (bfq_bfqq_busy(bfqq))
-+ bfqq->bfqd->wr_busy_queues++;
-+ bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff;
-+ bfqq->wr_cur_max_time = bic->wr_time_left;
-+ bfqq->last_wr_start_finish = jiffies;
-+ bfqq->entity.ioprio_changed = 1;
-+ }
-+ /*
-+ * Clear wr_time_left to prevent bfq_bfqq_save_state() from
-+ * getting confused about the queue's need of a weight-raising
-+ * period.
-+ */
-+ bic->wr_time_left = 0;
-+}
-+
-+/* Must be called with the queue_lock held. */
-+static int bfqq_process_refs(struct bfq_queue *bfqq)
-+{
-+ int process_refs, io_refs;
-+
-+ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
-+ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
-+ BUG_ON(process_refs < 0);
-+ return process_refs;
-+}
-+
- /* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
- static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
- struct bfq_queue *bfqq)
-@@ -815,7 +866,7 @@ static void bfq_add_request(struct request *rq)
- bfq_rq_pos_tree_add(bfqd, bfqq);
-
- if (!bfq_bfqq_busy(bfqq)) {
-- bool soft_rt,
-+ bool soft_rt, coop_or_in_burst,
- idle_for_long_time = time_is_before_jiffies(
- bfqq->budget_timeout +
- bfqd->bfq_wr_min_idle_time);
-@@ -839,11 +890,12 @@ static void bfq_add_request(struct request *rq)
- bfqd->last_ins_in_burst = jiffies;
- }
-
-+ coop_or_in_burst = bfq_bfqq_in_large_burst(bfqq) ||
-+ bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh;
- soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
-- !bfq_bfqq_in_large_burst(bfqq) &&
-+ !coop_or_in_burst &&
- time_is_before_jiffies(bfqq->soft_rt_next_start);
-- interactive = !bfq_bfqq_in_large_burst(bfqq) &&
-- idle_for_long_time;
-+ interactive = !coop_or_in_burst && idle_for_long_time;
- entity->budget = max_t(unsigned long, bfqq->max_budget,
- bfq_serv_to_charge(next_rq, bfqq));
-
-@@ -862,11 +914,20 @@ static void bfq_add_request(struct request *rq)
- if (!bfqd->low_latency)
- goto add_bfqq_busy;
-
-+ if (bfq_bfqq_just_split(bfqq))
-+ goto set_ioprio_changed;
-+
- /*
-- * If the queue is not being boosted and has been idle
-- * for enough time, start a weight-raising period
-+ * If the queue:
-+ * - is not being boosted,
-+ * - has been idle for enough time,
-+ * - is not a sync queue or is linked to a bfq_io_cq (it is
-+ * shared "for its nature" or it is not shared and its
-+ * requests have not been redirected to a shared queue)
-+ * start a weight-raising period.
- */
-- if (old_wr_coeff == 1 && (interactive || soft_rt)) {
-+ if (old_wr_coeff == 1 && (interactive || soft_rt) &&
-+ (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
- bfqq->wr_coeff = bfqd->bfq_wr_coeff;
- if (interactive)
- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
-@@ -880,7 +941,7 @@ static void bfq_add_request(struct request *rq)
- } else if (old_wr_coeff > 1) {
- if (interactive)
- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
-- else if (bfq_bfqq_in_large_burst(bfqq) ||
-+ else if (coop_or_in_burst ||
- (bfqq->wr_cur_max_time ==
- bfqd->bfq_wr_rt_max_time &&
- !soft_rt)) {
-@@ -899,18 +960,18 @@ static void bfq_add_request(struct request *rq)
- /*
- *
- * The remaining weight-raising time is lower
-- * than bfqd->bfq_wr_rt_max_time, which
-- * means that the application is enjoying
-- * weight raising either because deemed soft-
-- * rt in the near past, or because deemed
-- * interactive a long ago. In both cases,
-- * resetting now the current remaining weight-
-- * raising time for the application to the
-- * weight-raising duration for soft rt
-- * applications would not cause any latency
-- * increase for the application (as the new
-- * duration would be higher than the remaining
-- * time).
-+ * than bfqd->bfq_wr_rt_max_time, which means
-+ * that the application is enjoying weight
-+ * raising either because deemed soft-rt in
-+ * the near past, or because deemed interactive
-+ * a long ago.
-+ * In both cases, resetting now the current
-+ * remaining weight-raising time for the
-+ * application to the weight-raising duration
-+ * for soft rt applications would not cause any
-+ * latency increase for the application (as the
-+ * new duration would be higher than the
-+ * remaining time).
- *
- * In addition, the application is now meeting
- * the requirements for being deemed soft rt.
-@@ -945,6 +1006,7 @@ static void bfq_add_request(struct request *rq)
- bfqd->bfq_wr_rt_max_time;
- }
- }
-+set_ioprio_changed:
- if (old_wr_coeff != bfqq->wr_coeff)
- entity->ioprio_changed = 1;
- add_bfqq_busy:
-@@ -1156,90 +1218,35 @@ static void bfq_end_wr(struct bfq_data *bfqd)
- spin_unlock_irq(bfqd->queue->queue_lock);
- }
-
--static int bfq_allow_merge(struct request_queue *q, struct request *rq,
-- struct bio *bio)
-+static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
- {
-- struct bfq_data *bfqd = q->elevator->elevator_data;
-- struct bfq_io_cq *bic;
-- struct bfq_queue *bfqq;
--
-- /*
-- * Disallow merge of a sync bio into an async request.
-- */
-- if (bfq_bio_sync(bio) && !rq_is_sync(rq))
-- return 0;
--
-- /*
-- * Lookup the bfqq that this bio will be queued with. Allow
-- * merge only if rq is queued there.
-- * Queue lock is held here.
-- */
-- bic = bfq_bic_lookup(bfqd, current->io_context);
-- if (bic == NULL)
-- return 0;
--
-- bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
-- return bfqq == RQ_BFQQ(rq);
--}
--
--static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
-- struct bfq_queue *bfqq)
--{
-- if (bfqq != NULL) {
-- bfq_mark_bfqq_must_alloc(bfqq);
-- bfq_mark_bfqq_budget_new(bfqq);
-- bfq_clear_bfqq_fifo_expire(bfqq);
--
-- bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
--
-- bfq_log_bfqq(bfqd, bfqq,
-- "set_in_service_queue, cur-budget = %lu",
-- bfqq->entity.budget);
-- }
--
-- bfqd->in_service_queue = bfqq;
--}
--
--/*
-- * Get and set a new queue for service.
-- */
--static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd,
-- struct bfq_queue *bfqq)
--{
-- if (!bfqq)
-- bfqq = bfq_get_next_queue(bfqd);
-+ if (request)
-+ return blk_rq_pos(io_struct);
- else
-- bfq_get_next_queue_forced(bfqd, bfqq);
--
-- __bfq_set_in_service_queue(bfqd, bfqq);
-- return bfqq;
-+ return ((struct bio *)io_struct)->bi_iter.bi_sector;
- }
-
--static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
-- struct request *rq)
-+static inline sector_t bfq_dist_from(sector_t pos1,
-+ sector_t pos2)
- {
-- if (blk_rq_pos(rq) >= bfqd->last_position)
-- return blk_rq_pos(rq) - bfqd->last_position;
-+ if (pos1 >= pos2)
-+ return pos1 - pos2;
- else
-- return bfqd->last_position - blk_rq_pos(rq);
-+ return pos2 - pos1;
- }
-
--/*
-- * Return true if bfqq has no request pending and rq is close enough to
-- * bfqd->last_position, or if rq is closer to bfqd->last_position than
-- * bfqq->next_rq
-- */
--static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
-+static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
-+ sector_t sector)
- {
-- return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
-+ return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
-+ BFQQ_SEEK_THR;
- }
-
--static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
-+static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
- {
- struct rb_root *root = &bfqd->rq_pos_tree;
- struct rb_node *parent, *node;
- struct bfq_queue *__bfqq;
-- sector_t sector = bfqd->last_position;
-
- if (RB_EMPTY_ROOT(root))
- return NULL;
-@@ -1258,7 +1265,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
- * next_request position).
- */
- __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
-- if (bfq_rq_close(bfqd, __bfqq->next_rq))
-+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
- return __bfqq;
-
- if (blk_rq_pos(__bfqq->next_rq) < sector)
-@@ -1269,7 +1276,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
- return NULL;
-
- __bfqq = rb_entry(node, struct bfq_queue, pos_node);
-- if (bfq_rq_close(bfqd, __bfqq->next_rq))
-+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
- return __bfqq;
-
- return NULL;
-@@ -1278,14 +1285,12 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
- /*
- * bfqd - obvious
- * cur_bfqq - passed in so that we don't decide that the current queue
-- * is closely cooperating with itself.
-- *
-- * We are assuming that cur_bfqq has dispatched at least one request,
-- * and that bfqd->last_position reflects a position on the disk associated
-- * with the I/O issued by cur_bfqq.
-+ * is closely cooperating with itself
-+ * sector - used as a reference point to search for a close queue
- */
- static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
-- struct bfq_queue *cur_bfqq)
-+ struct bfq_queue *cur_bfqq,
-+ sector_t sector)
- {
- struct bfq_queue *bfqq;
-
-@@ -1305,7 +1310,7 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
- * working closely on the same area of the disk. In that case,
- * we can group them together and don't waste time idling.
- */
-- bfqq = bfqq_close(bfqd);
-+ bfqq = bfqq_close(bfqd, sector);
- if (bfqq == NULL || bfqq == cur_bfqq)
- return NULL;
-
-@@ -1332,6 +1337,315 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
- return bfqq;
- }
-
-+static struct bfq_queue *
-+bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
-+{
-+ int process_refs, new_process_refs;
-+ struct bfq_queue *__bfqq;
-+
-+ /*
-+ * If there are no process references on the new_bfqq, then it is
-+ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
-+ * may have dropped their last reference (not just their last process
-+ * reference).
-+ */
-+ if (!bfqq_process_refs(new_bfqq))
-+ return NULL;
-+
-+ /* Avoid a circular list and skip interim queue merges. */
-+ while ((__bfqq = new_bfqq->new_bfqq)) {
-+ if (__bfqq == bfqq)
-+ return NULL;
-+ new_bfqq = __bfqq;
-+ }
-+
-+ process_refs = bfqq_process_refs(bfqq);
-+ new_process_refs = bfqq_process_refs(new_bfqq);
-+ /*
-+ * If the process for the bfqq has gone away, there is no
-+ * sense in merging the queues.
-+ */
-+ if (process_refs == 0 || new_process_refs == 0)
-+ return NULL;
-+
-+ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
-+ new_bfqq->pid);
-+
-+ /*
-+ * Merging is just a redirection: the requests of the process
-+ * owning one of the two queues are redirected to the other queue.
-+ * The latter queue, in its turn, is set as shared if this is the
-+ * first time that the requests of some process are redirected to
-+ * it.
-+ *
-+ * We redirect bfqq to new_bfqq and not the opposite, because we
-+ * are in the context of the process owning bfqq, hence we have
-+ * the io_cq of this process. So we can immediately configure this
-+ * io_cq to redirect the requests of the process to new_bfqq.
-+ *
-+ * NOTE, even if new_bfqq coincides with the in-service queue, the
-+ * io_cq of new_bfqq is not available, because, if the in-service
-+ * queue is shared, bfqd->in_service_bic may not point to the
-+ * io_cq of the in-service queue.
-+ * Redirecting the requests of the process owning bfqq to the
-+ * currently in-service queue is in any case the best option, as
-+ * we feed the in-service queue with new requests close to the
-+ * last request served and, by doing so, hopefully increase the
-+ * throughput.
-+ */
-+ bfqq->new_bfqq = new_bfqq;
-+ atomic_add(process_refs, &new_bfqq->ref);
-+ return new_bfqq;
-+}
-+
-+/*
-+ * Attempt to schedule a merge of bfqq with the currently in-service queue
-+ * or with a close queue among the scheduled queues.
-+ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
-+ * structure otherwise.
-+ *
-+ * The OOM queue is not allowed to participate to cooperation: in fact, since
-+ * the requests temporarily redirected to the OOM queue could be redirected
-+ * again to dedicated queues at any time, the state needed to correctly
-+ * handle merging with the OOM queue would be quite complex and expensive
-+ * to maintain. Besides, in such a critical condition as an out of memory,
-+ * the benefits of queue merging may be little relevant, or even negligible.
-+ */
-+static struct bfq_queue *
-+bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+ void *io_struct, bool request)
-+{
-+ struct bfq_queue *in_service_bfqq, *new_bfqq;
-+
-+ if (bfqq->new_bfqq)
-+ return bfqq->new_bfqq;
-+
-+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
-+ return NULL;
-+
-+ in_service_bfqq = bfqd->in_service_queue;
-+
-+ if (in_service_bfqq == NULL || in_service_bfqq == bfqq ||
-+ !bfqd->in_service_bic ||
-+ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
-+ goto check_scheduled;
-+
-+ if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq))
-+ goto check_scheduled;
-+
-+ if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq))
-+ goto check_scheduled;
-+
-+ if (in_service_bfqq->entity.parent != bfqq->entity.parent)
-+ goto check_scheduled;
-+
-+ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
-+ bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) {
-+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
-+ if (new_bfqq != NULL)
-+ return new_bfqq; /* Merge with in-service queue */
-+ }
-+
-+ /*
-+ * Check whether there is a cooperator among currently scheduled
-+ * queues. The only thing we need is that the bio/request is not
-+ * NULL, as we need it to establish whether a cooperator exists.
-+ */
-+check_scheduled:
-+ new_bfqq = bfq_close_cooperator(bfqd, bfqq,
-+ bfq_io_struct_pos(io_struct, request));
-+ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq))
-+ return bfq_setup_merge(bfqq, new_bfqq);
-+
-+ return NULL;
-+}
-+
-+static inline void
-+bfq_bfqq_save_state(struct bfq_queue *bfqq)
-+{
-+ /*
-+ * If bfqq->bic == NULL, the queue is already shared or its requests
-+ * have already been redirected to a shared queue; both idle window
-+ * and weight raising state have already been saved. Do nothing.
-+ */
-+ if (bfqq->bic == NULL)
-+ return;
-+ if (bfqq->bic->wr_time_left)
-+ /*
-+ * This is the queue of a just-started process, and would
-+ * deserve weight raising: we set wr_time_left to the full
-+ * weight-raising duration to trigger weight-raising when
-+ * and if the queue is split and the first request of the
-+ * queue is enqueued.
-+ */
-+ bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd);
-+ else if (bfqq->wr_coeff > 1) {
-+ unsigned long wr_duration =
-+ jiffies - bfqq->last_wr_start_finish;
-+ /*
-+ * It may happen that a queue's weight raising period lasts
-+ * longer than its wr_cur_max_time, as weight raising is
-+ * handled only when a request is enqueued or dispatched (it
-+ * does not use any timer). If the weight raising period is
-+ * about to end, don't save it.
-+ */
-+ if (bfqq->wr_cur_max_time <= wr_duration)
-+ bfqq->bic->wr_time_left = 0;
-+ else
-+ bfqq->bic->wr_time_left =
-+ bfqq->wr_cur_max_time - wr_duration;
-+ /*
-+ * The bfq_queue is becoming shared or the requests of the
-+ * process owning the queue are being redirected to a shared
-+ * queue. Stop the weight raising period of the queue, as in
-+ * both cases it should not be owned by an interactive or
-+ * soft real-time application.
-+ */
-+ bfq_bfqq_end_wr(bfqq);
-+ } else
-+ bfqq->bic->wr_time_left = 0;
-+ bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
-+ bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
-+ bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
-+ bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
-+ bfqq->bic->cooperations++;
-+ bfqq->bic->failed_cooperations = 0;
-+}
-+
-+static inline void
-+bfq_get_bic_reference(struct bfq_queue *bfqq)
-+{
-+ /*
-+ * If bfqq->bic has a non-NULL value, the bic to which it belongs
-+ * is about to begin using a shared bfq_queue.
-+ */
-+ if (bfqq->bic)
-+ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
-+}
-+
-+static void
-+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
-+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
-+{
-+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
-+ (long unsigned)new_bfqq->pid);
-+ /* Save weight raising and idle window of the merged queues */
-+ bfq_bfqq_save_state(bfqq);
-+ bfq_bfqq_save_state(new_bfqq);
-+ if (bfq_bfqq_IO_bound(bfqq))
-+ bfq_mark_bfqq_IO_bound(new_bfqq);
-+ bfq_clear_bfqq_IO_bound(bfqq);
-+ /*
-+ * Grab a reference to the bic, to prevent it from being destroyed
-+ * before being possibly touched by a bfq_split_bfqq().
-+ */
-+ bfq_get_bic_reference(bfqq);
-+ bfq_get_bic_reference(new_bfqq);
-+ /*
-+ * Merge queues (that is, let bic redirect its requests to new_bfqq)
-+ */
-+ bic_set_bfqq(bic, new_bfqq, 1);
-+ bfq_mark_bfqq_coop(new_bfqq);
-+ /*
-+ * new_bfqq now belongs to at least two bics (it is a shared queue):
-+ * set new_bfqq->bic to NULL. bfqq either:
-+ * - does not belong to any bic any more, and hence bfqq->bic must
-+ * be set to NULL, or
-+ * - is a queue whose owning bics have already been redirected to a
-+ * different queue, hence the queue is destined to not belong to
-+ * any bic soon and bfqq->bic is already NULL (therefore the next
-+ * assignment causes no harm).
-+ */
-+ new_bfqq->bic = NULL;
-+ bfqq->bic = NULL;
-+ bfq_put_queue(bfqq);
-+}
-+
-+static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq)
-+{
-+ struct bfq_io_cq *bic = bfqq->bic;
-+ struct bfq_data *bfqd = bfqq->bfqd;
-+
-+ if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) {
-+ bic->failed_cooperations++;
-+ if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations)
-+ bic->cooperations = 0;
-+ }
-+}
-+
-+static int bfq_allow_merge(struct request_queue *q, struct request *rq,
-+ struct bio *bio)
-+{
-+ struct bfq_data *bfqd = q->elevator->elevator_data;
-+ struct bfq_io_cq *bic;
-+ struct bfq_queue *bfqq, *new_bfqq;
-+
-+ /*
-+ * Disallow merge of a sync bio into an async request.
-+ */
-+ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
-+ return 0;
-+
-+ /*
-+ * Lookup the bfqq that this bio will be queued with. Allow
-+ * merge only if rq is queued there.
-+ * Queue lock is held here.
-+ */
-+ bic = bfq_bic_lookup(bfqd, current->io_context);
-+ if (bic == NULL)
-+ return 0;
-+
-+ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
-+ /*
-+ * We take advantage of this function to perform an early merge
-+ * of the queues of possible cooperating processes.
-+ */
-+ if (bfqq != NULL) {
-+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
-+ if (new_bfqq != NULL) {
-+ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
-+ /*
-+ * If we get here, the bio will be queued in the
-+ * shared queue, i.e., new_bfqq, so use new_bfqq
-+ * to decide whether bio and rq can be merged.
-+ */
-+ bfqq = new_bfqq;
-+ } else
-+ bfq_bfqq_increase_failed_cooperations(bfqq);
-+ }
-+
-+ return bfqq == RQ_BFQQ(rq);
-+}
-+
-+static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
-+ struct bfq_queue *bfqq)
-+{
-+ if (bfqq != NULL) {
-+ bfq_mark_bfqq_must_alloc(bfqq);
-+ bfq_mark_bfqq_budget_new(bfqq);
-+ bfq_clear_bfqq_fifo_expire(bfqq);
-+
-+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
-+
-+ bfq_log_bfqq(bfqd, bfqq,
-+ "set_in_service_queue, cur-budget = %lu",
-+ bfqq->entity.budget);
-+ }
-+
-+ bfqd->in_service_queue = bfqq;
-+}
-+
-+/*
-+ * Get and set a new queue for service.
-+ */
-+static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
-+{
-+ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
-+
-+ __bfq_set_in_service_queue(bfqd, bfqq);
-+ return bfqq;
-+}
-+
- /*
- * If enough samples have been computed, return the current max budget
- * stored in bfqd, which is dynamically updated according to the
-@@ -1475,61 +1789,6 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
- return rq;
- }
-
--/* Must be called with the queue_lock held. */
--static int bfqq_process_refs(struct bfq_queue *bfqq)
--{
-- int process_refs, io_refs;
--
-- io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
-- process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
-- BUG_ON(process_refs < 0);
-- return process_refs;
--}
--
--static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
--{
-- int process_refs, new_process_refs;
-- struct bfq_queue *__bfqq;
--
-- /*
-- * If there are no process references on the new_bfqq, then it is
-- * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
-- * may have dropped their last reference (not just their last process
-- * reference).
-- */
-- if (!bfqq_process_refs(new_bfqq))
-- return;
--
-- /* Avoid a circular list and skip interim queue merges. */
-- while ((__bfqq = new_bfqq->new_bfqq)) {
-- if (__bfqq == bfqq)
-- return;
-- new_bfqq = __bfqq;
-- }
--
-- process_refs = bfqq_process_refs(bfqq);
-- new_process_refs = bfqq_process_refs(new_bfqq);
-- /*
-- * If the process for the bfqq has gone away, there is no
-- * sense in merging the queues.
-- */
-- if (process_refs == 0 || new_process_refs == 0)
-- return;
--
-- /*
-- * Merge in the direction of the lesser amount of work.
-- */
-- if (new_process_refs >= process_refs) {
-- bfqq->new_bfqq = new_bfqq;
-- atomic_add(process_refs, &new_bfqq->ref);
-- } else {
-- new_bfqq->new_bfqq = bfqq;
-- atomic_add(new_process_refs, &bfqq->ref);
-- }
-- bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
-- new_bfqq->pid);
--}
--
- static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
- {
- struct bfq_entity *entity = &bfqq->entity;
-@@ -2263,7 +2522,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
- */
- static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
- {
-- struct bfq_queue *bfqq, *new_bfqq = NULL;
-+ struct bfq_queue *bfqq;
- struct request *next_rq;
- enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
-
-@@ -2273,17 +2532,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
-
- bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
-
-- /*
-- * If another queue has a request waiting within our mean seek
-- * distance, let it run. The expire code will check for close
-- * cooperators and put the close queue at the front of the
-- * service tree. If possible, merge the expiring queue with the
-- * new bfqq.
-- */
-- new_bfqq = bfq_close_cooperator(bfqd, bfqq);
-- if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
-- bfq_setup_merge(bfqq, new_bfqq);
--
- if (bfq_may_expire_for_budg_timeout(bfqq) &&
- !timer_pending(&bfqd->idle_slice_timer) &&
- !bfq_bfqq_must_idle(bfqq))
-@@ -2322,10 +2570,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
- bfq_clear_bfqq_wait_request(bfqq);
- del_timer(&bfqd->idle_slice_timer);
- }
-- if (new_bfqq == NULL)
-- goto keep_queue;
-- else
-- goto expire;
-+ goto keep_queue;
- }
- }
-
-@@ -2334,40 +2579,30 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
- * in flight (possibly waiting for a completion) or is idling for a
- * new request, then keep it.
- */
-- if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
-- (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
-+ if (timer_pending(&bfqd->idle_slice_timer) ||
-+ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) {
- bfqq = NULL;
- goto keep_queue;
-- } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
-- /*
-- * Expiring the queue because there is a close cooperator,
-- * cancel timer.
-- */
-- bfq_clear_bfqq_wait_request(bfqq);
-- del_timer(&bfqd->idle_slice_timer);
- }
-
- reason = BFQ_BFQQ_NO_MORE_REQUESTS;
- expire:
- bfq_bfqq_expire(bfqd, bfqq, 0, reason);
- new_queue:
-- bfqq = bfq_set_in_service_queue(bfqd, new_bfqq);
-+ bfqq = bfq_set_in_service_queue(bfqd);
- bfq_log(bfqd, "select_queue: new queue %d returned",
- bfqq != NULL ? bfqq->pid : 0);
- keep_queue:
- return bfqq;
- }
-
--static void bfq_update_wr_data(struct bfq_data *bfqd,
-- struct bfq_queue *bfqq)
-+static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
- {
-- if (bfqq->wr_coeff > 1) { /* queue is being boosted */
-- struct bfq_entity *entity = &bfqq->entity;
--
-+ struct bfq_entity *entity = &bfqq->entity;
-+ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
- bfq_log_bfqq(bfqd, bfqq,
- "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
-- jiffies_to_msecs(jiffies -
-- bfqq->last_wr_start_finish),
-+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
- jiffies_to_msecs(bfqq->wr_cur_max_time),
- bfqq->wr_coeff,
- bfqq->entity.weight, bfqq->entity.orig_weight);
-@@ -2376,12 +2611,16 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
- entity->orig_weight * bfqq->wr_coeff);
- if (entity->ioprio_changed)
- bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
-+
- /*
- * If the queue was activated in a burst, or
- * too much time has elapsed from the beginning
-- * of this weight-raising, then end weight raising.
-+ * of this weight-raising period, or the queue has
-+ * exceeded the acceptable number of cooperations,
-+ * then end weight raising.
- */
- if (bfq_bfqq_in_large_burst(bfqq) ||
-+ bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh ||
- time_is_before_jiffies(bfqq->last_wr_start_finish +
- bfqq->wr_cur_max_time)) {
- bfqq->last_wr_start_finish = jiffies;
-@@ -2390,11 +2629,13 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
- bfqq->last_wr_start_finish,
- jiffies_to_msecs(bfqq->wr_cur_max_time));
- bfq_bfqq_end_wr(bfqq);
-- __bfq_entity_update_weight_prio(
-- bfq_entity_service_tree(entity),
-- entity);
- }
- }
-+ /* Update weight both if it must be raised and if it must be lowered */
-+ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
-+ __bfq_entity_update_weight_prio(
-+ bfq_entity_service_tree(entity),
-+ entity);
- }
-
- /*
-@@ -2642,6 +2883,25 @@ static inline void bfq_init_icq(struct io_cq *icq)
- struct bfq_io_cq *bic = icq_to_bic(icq);
-
- bic->ttime.last_end_request = jiffies;
-+ /*
-+ * A newly created bic indicates that the process has just
-+ * started doing I/O, and is probably mapping into memory its
-+ * executable and libraries: it definitely needs weight raising.
-+ * There is however the possibility that the process performs,
-+ * for a while, I/O close to some other process. EQM intercepts
-+ * this behavior and may merge the queue corresponding to the
-+ * process with some other queue, BEFORE the weight of the queue
-+ * is raised. Merged queues are not weight-raised (they are assumed
-+ * to belong to processes that benefit only from high throughput).
-+ * If the merge is basically the consequence of an accident, then
-+ * the queue will be split soon and will get back its old weight.
-+ * It is then important to write down somewhere that this queue
-+ * does need weight raising, even if it did not make it to get its
-+ * weight raised before being merged. To this purpose, we overload
-+ * the field raising_time_left and assign 1 to it, to mark the queue
-+ * as needing weight raising.
-+ */
-+ bic->wr_time_left = 1;
- }
-
- static void bfq_exit_icq(struct io_cq *icq)
-@@ -2655,6 +2915,13 @@ static void bfq_exit_icq(struct io_cq *icq)
- }
-
- if (bic->bfqq[BLK_RW_SYNC]) {
-+ /*
-+ * If the bic is using a shared queue, put the reference
-+ * taken on the io_context when the bic started using a
-+ * shared bfq_queue.
-+ */
-+ if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
-+ put_io_context(icq->ioc);
- bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
- bic->bfqq[BLK_RW_SYNC] = NULL;
- }
-@@ -2950,6 +3217,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
- if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
- return;
-
-+ /* Idle window just restored, statistics are meaningless. */
-+ if (bfq_bfqq_just_split(bfqq))
-+ return;
-+
- enable_idle = bfq_bfqq_idle_window(bfqq);
-
- if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
-@@ -2997,6 +3268,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
- if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
- !BFQQ_SEEKY(bfqq))
- bfq_update_idle_window(bfqd, bfqq, bic);
-+ bfq_clear_bfqq_just_split(bfqq);
-
- bfq_log_bfqq(bfqd, bfqq,
- "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
-@@ -3057,13 +3329,49 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
- static void bfq_insert_request(struct request_queue *q, struct request *rq)
- {
- struct bfq_data *bfqd = q->elevator->elevator_data;
-- struct bfq_queue *bfqq = RQ_BFQQ(rq);
-+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
-
- assert_spin_locked(bfqd->queue->queue_lock);
-+
-+ /*
-+ * An unplug may trigger a requeue of a request from the device
-+ * driver: make sure we are in process context while trying to
-+ * merge two bfq_queues.
-+ */
-+ if (!in_interrupt()) {
-+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
-+ if (new_bfqq != NULL) {
-+ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
-+ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
-+ /*
-+ * Release the request's reference to the old bfqq
-+ * and make sure one is taken to the shared queue.
-+ */
-+ new_bfqq->allocated[rq_data_dir(rq)]++;
-+ bfqq->allocated[rq_data_dir(rq)]--;
-+ atomic_inc(&new_bfqq->ref);
-+ bfq_put_queue(bfqq);
-+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
-+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
-+ bfqq, new_bfqq);
-+ rq->elv.priv[1] = new_bfqq;
-+ bfqq = new_bfqq;
-+ } else
-+ bfq_bfqq_increase_failed_cooperations(bfqq);
-+ }
-+
- bfq_init_prio_data(bfqq, RQ_BIC(rq));
-
- bfq_add_request(rq);
-
-+ /*
-+ * Here a newly-created bfq_queue has already started a weight-raising
-+ * period: clear raising_time_left to prevent bfq_bfqq_save_state()
-+ * from assigning it a full weight-raising period. See the detailed
-+ * comments about this field in bfq_init_icq().
-+ */
-+ if (bfqq->bic != NULL)
-+ bfqq->bic->wr_time_left = 0;
- rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
- list_add_tail(&rq->queuelist, &bfqq->fifo);
-
-@@ -3228,18 +3536,6 @@ static void bfq_put_request(struct request *rq)
- }
- }
-
--static struct bfq_queue *
--bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
-- struct bfq_queue *bfqq)
--{
-- bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
-- (long unsigned)bfqq->new_bfqq->pid);
-- bic_set_bfqq(bic, bfqq->new_bfqq, 1);
-- bfq_mark_bfqq_coop(bfqq->new_bfqq);
-- bfq_put_queue(bfqq);
-- return bic_to_bfqq(bic, 1);
--}
--
- /*
- * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
- * was the last process referring to said bfqq.
-@@ -3248,6 +3544,9 @@ static struct bfq_queue *
- bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
- {
- bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
-+
-+ put_io_context(bic->icq.ioc);
-+
- if (bfqq_process_refs(bfqq) == 1) {
- bfqq->pid = current->pid;
- bfq_clear_bfqq_coop(bfqq);
-@@ -3276,6 +3575,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
- struct bfq_queue *bfqq;
- struct bfq_group *bfqg;
- unsigned long flags;
-+ bool split = false;
-
- might_sleep_if(gfp_mask & __GFP_WAIT);
-
-@@ -3293,25 +3593,26 @@ new_queue:
- if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
- bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
- bic_set_bfqq(bic, bfqq, is_sync);
-+ if (split && is_sync) {
-+ if ((bic->was_in_burst_list && bfqd->large_burst) ||
-+ bic->saved_in_large_burst)
-+ bfq_mark_bfqq_in_large_burst(bfqq);
-+ else {
-+ bfq_clear_bfqq_in_large_burst(bfqq);
-+ if (bic->was_in_burst_list)
-+ hlist_add_head(&bfqq->burst_list_node,
-+ &bfqd->burst_list);
-+ }
-+ }
- } else {
-- /*
-- * If the queue was seeky for too long, break it apart.
-- */
-+ /* If the queue was seeky for too long, break it apart. */
- if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
- bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
- bfqq = bfq_split_bfqq(bic, bfqq);
-+ split = true;
- if (!bfqq)
- goto new_queue;
- }
--
-- /*
-- * Check to see if this queue is scheduled to merge with
-- * another closely cooperating queue. The merging of queues
-- * happens here as it must be done in process context.
-- * The reference on new_bfqq was taken in merge_bfqqs.
-- */
-- if (bfqq->new_bfqq != NULL)
-- bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
- }
-
- bfqq->allocated[rw]++;
-@@ -3322,6 +3623,26 @@ new_queue:
- rq->elv.priv[0] = bic;
- rq->elv.priv[1] = bfqq;
-
-+ /*
-+ * If a bfq_queue has only one process reference, it is owned
-+ * by only one bfq_io_cq: we can set the bic field of the
-+ * bfq_queue to the address of that structure. Also, if the
-+ * queue has just been split, mark a flag so that the
-+ * information is available to the other scheduler hooks.
-+ */
-+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
-+ bfqq->bic = bic;
-+ if (split) {
-+ bfq_mark_bfqq_just_split(bfqq);
-+ /*
-+ * If the queue has just been split from a shared
-+ * queue, restore the idle window and the possible
-+ * weight raising period.
-+ */
-+ bfq_bfqq_resume_state(bfqq, bic);
-+ }
-+ }
-+
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return 0;
-diff --git a/block/bfq-sched.c b/block/bfq-sched.c
-index 2931563..6764a7e 100644
---- a/block/bfq-sched.c
-+++ b/block/bfq-sched.c
-@@ -1091,34 +1091,6 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
- return bfqq;
- }
-
--/*
-- * Forced extraction of the given queue.
-- */
--static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
-- struct bfq_queue *bfqq)
--{
-- struct bfq_entity *entity;
-- struct bfq_sched_data *sd;
--
-- BUG_ON(bfqd->in_service_queue != NULL);
--
-- entity = &bfqq->entity;
-- /*
-- * Bubble up extraction/update from the leaf to the root.
-- */
-- for_each_entity(entity) {
-- sd = entity->sched_data;
-- bfq_update_budget(entity);
-- bfq_update_vtime(bfq_entity_service_tree(entity));
-- bfq_active_extract(bfq_entity_service_tree(entity), entity);
-- sd->in_service_entity = entity;
-- sd->next_in_service = NULL;
-- entity->service = 0;
-- }
--
-- return;
--}
--
- static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
- {
- if (bfqd->in_service_bic != NULL) {
-diff --git a/block/bfq.h b/block/bfq.h
-index 518f2ac..4f519ea 100644
---- a/block/bfq.h
-+++ b/block/bfq.h
-@@ -218,18 +218,21 @@ struct bfq_group;
- * idle @bfq_queue with no outstanding requests, then
- * the task associated with the queue it is deemed as
- * soft real-time (see the comments to the function
-- * bfq_bfqq_softrt_next_start()).
-+ * bfq_bfqq_softrt_next_start())
- * @last_idle_bklogged: time of the last transition of the @bfq_queue from
- * idle to backlogged
- * @service_from_backlogged: cumulative service received from the @bfq_queue
- * since the last transition from idle to
- * backlogged
-+ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
-+ * queue is shared
- *
-- * A bfq_queue is a leaf request queue; it can be associated with an io_context
-- * or more, if it is async or shared between cooperating processes. @cgroup
-- * holds a reference to the cgroup, to be sure that it does not disappear while
-- * a bfqq still references it (mostly to avoid races between request issuing and
-- * task migration followed by cgroup destruction).
-+ * A bfq_queue is a leaf request queue; it can be associated with an
-+ * io_context or more, if it is async or shared between cooperating
-+ * processes. @cgroup holds a reference to the cgroup, to be sure that it
-+ * does not disappear while a bfqq still references it (mostly to avoid
-+ * races between request issuing and task migration followed by cgroup
-+ * destruction).
- * All the fields are protected by the queue lock of the containing bfqd.
- */
- struct bfq_queue {
-@@ -269,6 +272,7 @@ struct bfq_queue {
- unsigned int requests_within_timer;
-
- pid_t pid;
-+ struct bfq_io_cq *bic;
-
- /* weight-raising fields */
- unsigned long wr_cur_max_time;
-@@ -298,12 +302,42 @@ struct bfq_ttime {
- * @icq: associated io_cq structure
- * @bfqq: array of two process queues, the sync and the async
- * @ttime: associated @bfq_ttime struct
-+ * @wr_time_left: snapshot of the time left before weight raising ends
-+ * for the sync queue associated to this process; this
-+ * snapshot is taken to remember this value while the weight
-+ * raising is suspended because the queue is merged with a
-+ * shared queue, and is used to set @raising_cur_max_time
-+ * when the queue is split from the shared queue and its
-+ * weight is raised again
-+ * @saved_idle_window: same purpose as the previous field for the idle
-+ * window
-+ * @saved_IO_bound: same purpose as the previous two fields for the I/O
-+ * bound classification of a queue
-+ * @saved_in_large_burst: same purpose as the previous fields for the
-+ * value of the field keeping the queue's belonging
-+ * to a large burst
-+ * @was_in_burst_list: true if the queue belonged to a burst list
-+ * before its merge with another cooperating queue
-+ * @cooperations: counter of consecutive successful queue merges underwent
-+ * by any of the process' @bfq_queues
-+ * @failed_cooperations: counter of consecutive failed queue merges of any
-+ * of the process' @bfq_queues
- */
- struct bfq_io_cq {
- struct io_cq icq; /* must be the first member */
- struct bfq_queue *bfqq[2];
- struct bfq_ttime ttime;
- int ioprio;
-+
-+ unsigned int wr_time_left;
-+ bool saved_idle_window;
-+ bool saved_IO_bound;
-+
-+ bool saved_in_large_burst;
-+ bool was_in_burst_list;
-+
-+ unsigned int cooperations;
-+ unsigned int failed_cooperations;
- };
-
- enum bfq_device_speed {
-@@ -539,7 +573,7 @@ enum bfqq_state_flags {
- BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
- BFQ_BFQQ_FLAG_sync, /* synchronous queue */
- BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
-- BFQ_BFQQ_FLAG_IO_bound, /*
-+ BFQ_BFQQ_FLAG_IO_bound, /*
- * bfqq has timed-out at least once
- * having consumed at most 2/10 of
- * its budget
-@@ -552,12 +586,13 @@ enum bfqq_state_flags {
- * bfqq has proved to be slow and
- * seeky until budget timeout
- */
-- BFQ_BFQQ_FLAG_softrt_update, /*
-+ BFQ_BFQQ_FLAG_softrt_update, /*
- * may need softrt-next-start
- * update
- */
- BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
-- BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */
-+ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be split */
-+ BFQ_BFQQ_FLAG_just_split, /* queue has just been split */
- };
-
- #define BFQ_BFQQ_FNS(name) \
-@@ -587,6 +622,7 @@ BFQ_BFQQ_FNS(in_large_burst);
- BFQ_BFQQ_FNS(constantly_seeky);
- BFQ_BFQQ_FNS(coop);
- BFQ_BFQQ_FNS(split_coop);
-+BFQ_BFQQ_FNS(just_split);
- BFQ_BFQQ_FNS(softrt_update);
- #undef BFQ_BFQQ_FNS
-
---
-2.1.0
-
diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
deleted file mode 100644
index c4efd06a..00000000
--- a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+++ /dev/null
@@ -1,402 +0,0 @@
-WARNING - this version of the patch works with version 4.9+ of gcc and with
-kernel version 3.15.x+ and should NOT be applied when compiling on older
-versions due to name changes of the flags with the 4.9 release of gcc.
-Use the older version of this patch hosted on the same github for older
-versions of gcc. For example:
-
-corei7 --> nehalem
-corei7-avx --> sandybridge
-core-avx-i --> ivybridge
-core-avx2 --> haswell
-
-For more, see: https://gcc.gnu.org/gcc-4.9/changes.html
-
-It also changes 'atom' to 'bonnell' in accordance with the gcc v4.9 changes.
-Note that upstream is using the deprecated 'match=atom' flags when I believe it
-should use the newer 'march=bonnell' flag for atom processors.
-
-I have made that change to this patch set as well. See the following kernel
-bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=77461
-
-This patch will expand the number of microarchitectures to include newer
-processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
-14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
-Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 1.5 Gen Core
-i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 (Sandybridge), Intel 3rd Gen
-Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core i3/i5/i7 (Haswell), Intel 5th
-Gen Core i3/i5/i7 (Broadwell), and the low power Silvermont series of Atom
-processors (Silvermont). It also offers the compiler the 'native' flag.
-
-Small but real speed increases are measurable using a make endpoint comparing
-a generic kernel to one built with one of the respective microarchs.
-
-See the following experimental evidence supporting this statement:
-https://github.com/graysky2/kernel_gcc_patch
-
-REQUIREMENTS
-linux version >=3.15
-gcc version >=4.9
-
---- a/arch/x86/include/asm/module.h 2014-06-16 16:44:27.000000000 -0400
-+++ b/arch/x86/include/asm/module.h 2015-03-07 03:27:32.556672424 -0500
-@@ -15,6 +15,22 @@
- #define MODULE_PROC_FAMILY "586MMX "
- #elif defined CONFIG_MCORE2
- #define MODULE_PROC_FAMILY "CORE2 "
-+#elif defined CONFIG_MNATIVE
-+#define MODULE_PROC_FAMILY "NATIVE "
-+#elif defined CONFIG_MNEHALEM
-+#define MODULE_PROC_FAMILY "NEHALEM "
-+#elif defined CONFIG_MWESTMERE
-+#define MODULE_PROC_FAMILY "WESTMERE "
-+#elif defined CONFIG_MSILVERMONT
-+#define MODULE_PROC_FAMILY "SILVERMONT "
-+#elif defined CONFIG_MSANDYBRIDGE
-+#define MODULE_PROC_FAMILY "SANDYBRIDGE "
-+#elif defined CONFIG_MIVYBRIDGE
-+#define MODULE_PROC_FAMILY "IVYBRIDGE "
-+#elif defined CONFIG_MHASWELL
-+#define MODULE_PROC_FAMILY "HASWELL "
-+#elif defined CONFIG_MBROADWELL
-+#define MODULE_PROC_FAMILY "BROADWELL "
- #elif defined CONFIG_MATOM
- #define MODULE_PROC_FAMILY "ATOM "
- #elif defined CONFIG_M686
-@@ -33,6 +49,20 @@
- #define MODULE_PROC_FAMILY "K7 "
- #elif defined CONFIG_MK8
- #define MODULE_PROC_FAMILY "K8 "
-+#elif defined CONFIG_MK8SSE3
-+#define MODULE_PROC_FAMILY "K8SSE3 "
-+#elif defined CONFIG_MK10
-+#define MODULE_PROC_FAMILY "K10 "
-+#elif defined CONFIG_MBARCELONA
-+#define MODULE_PROC_FAMILY "BARCELONA "
-+#elif defined CONFIG_MBOBCAT
-+#define MODULE_PROC_FAMILY "BOBCAT "
-+#elif defined CONFIG_MBULLDOZER
-+#define MODULE_PROC_FAMILY "BULLDOZER "
-+#elif defined CONFIG_MPILEDRIVER
-+#define MODULE_PROC_FAMILY "PILEDRIVER "
-+#elif defined CONFIG_MJAGUAR
-+#define MODULE_PROC_FAMILY "JAGUAR "
- #elif defined CONFIG_MELAN
- #define MODULE_PROC_FAMILY "ELAN "
- #elif defined CONFIG_MCRUSOE
---- a/arch/x86/Kconfig.cpu 2014-06-16 16:44:27.000000000 -0400
-+++ b/arch/x86/Kconfig.cpu 2015-03-07 03:32:14.337713226 -0500
-@@ -137,9 +137,8 @@ config MPENTIUM4
- -Paxville
- -Dempsey
-
--
- config MK6
-- bool "K6/K6-II/K6-III"
-+ bool "AMD K6/K6-II/K6-III"
- depends on X86_32
- ---help---
- Select this for an AMD K6-family processor. Enables use of
-@@ -147,7 +146,7 @@ config MK6
- flags to GCC.
-
- config MK7
-- bool "Athlon/Duron/K7"
-+ bool "AMD Athlon/Duron/K7"
- depends on X86_32
- ---help---
- Select this for an AMD Athlon K7-family processor. Enables use of
-@@ -155,12 +154,62 @@ config MK7
- flags to GCC.
-
- config MK8
-- bool "Opteron/Athlon64/Hammer/K8"
-+ bool "AMD Opteron/Athlon64/Hammer/K8"
- ---help---
- Select this for an AMD Opteron or Athlon64 Hammer-family processor.
- Enables use of some extended instructions, and passes appropriate
- optimization flags to GCC.
-
-+config MK8SSE3
-+ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
-+ ---help---
-+ Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
-+ Enables use of some extended instructions, and passes appropriate
-+ optimization flags to GCC.
-+
-+config MK10
-+ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
-+ ---help---
-+ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
-+ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
-+ Enables use of some extended instructions, and passes appropriate
-+ optimization flags to GCC.
-+
-+config MBARCELONA
-+ bool "AMD Barcelona"
-+ ---help---
-+ Select this for AMD Barcelona and newer processors.
-+
-+ Enables -march=barcelona
-+
-+config MBOBCAT
-+ bool "AMD Bobcat"
-+ ---help---
-+ Select this for AMD Bobcat processors.
-+
-+ Enables -march=btver1
-+
-+config MBULLDOZER
-+ bool "AMD Bulldozer"
-+ ---help---
-+ Select this for AMD Bulldozer processors.
-+
-+ Enables -march=bdver1
-+
-+config MPILEDRIVER
-+ bool "AMD Piledriver"
-+ ---help---
-+ Select this for AMD Piledriver processors.
-+
-+ Enables -march=bdver2
-+
-+config MJAGUAR
-+ bool "AMD Jaguar"
-+ ---help---
-+ Select this for AMD Jaguar processors.
-+
-+ Enables -march=btver2
-+
- config MCRUSOE
- bool "Crusoe"
- depends on X86_32
-@@ -251,8 +300,17 @@ config MPSC
- using the cpu family field
- in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
-
-+config MATOM
-+ bool "Intel Atom"
-+ ---help---
-+
-+ Select this for the Intel Atom platform. Intel Atom CPUs have an
-+ in-order pipelining architecture and thus can benefit from
-+ accordingly optimized code. Use a recent GCC with specific Atom
-+ support in order to fully benefit from selecting this option.
-+
- config MCORE2
-- bool "Core 2/newer Xeon"
-+ bool "Intel Core 2"
- ---help---
-
- Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -260,14 +318,63 @@ config MCORE2
- family in /proc/cpuinfo. Newer ones have 6 and older ones 15
- (not a typo)
-
--config MATOM
-- bool "Intel Atom"
-+ Enables -march=core2
-+
-+config MNEHALEM
-+ bool "Intel Nehalem"
- ---help---
-
-- Select this for the Intel Atom platform. Intel Atom CPUs have an
-- in-order pipelining architecture and thus can benefit from
-- accordingly optimized code. Use a recent GCC with specific Atom
-- support in order to fully benefit from selecting this option.
-+ Select this for 1st Gen Core processors in the Nehalem family.
-+
-+ Enables -march=nehalem
-+
-+config MWESTMERE
-+ bool "Intel Westmere"
-+ ---help---
-+
-+ Select this for the Intel Westmere formerly Nehalem-C family.
-+
-+ Enables -march=westmere
-+
-+config MSILVERMONT
-+ bool "Intel Silvermont"
-+ ---help---
-+
-+ Select this for the Intel Silvermont platform.
-+
-+ Enables -march=silvermont
-+
-+config MSANDYBRIDGE
-+ bool "Intel Sandy Bridge"
-+ ---help---
-+
-+ Select this for 2nd Gen Core processors in the Sandy Bridge family.
-+
-+ Enables -march=sandybridge
-+
-+config MIVYBRIDGE
-+ bool "Intel Ivy Bridge"
-+ ---help---
-+
-+ Select this for 3rd Gen Core processors in the Ivy Bridge family.
-+
-+ Enables -march=ivybridge
-+
-+config MHASWELL
-+ bool "Intel Haswell"
-+ ---help---
-+
-+ Select this for 4th Gen Core processors in the Haswell family.
-+
-+ Enables -march=haswell
-+
-+config MBROADWELL
-+ bool "Intel Broadwell"
-+ ---help---
-+
-+ Select this for 5th Gen Core processors in the Broadwell family.
-+
-+ Enables -march=broadwell
-
- config GENERIC_CPU
- bool "Generic-x86-64"
-@@ -276,6 +383,19 @@ config GENERIC_CPU
- Generic x86-64 CPU.
- Run equally well on all x86-64 CPUs.
-
-+config MNATIVE
-+ bool "Native optimizations autodetected by GCC"
-+ ---help---
-+
-+ GCC 4.2 and above support -march=native, which automatically detects
-+ the optimum settings to use based on your processor. -march=native
-+ also detects and applies additional settings beyond -march specific
-+ to your CPU, (eg. -msse4). Unless you have a specific reason not to
-+ (e.g. distcc cross-compiling), you should probably be using
-+ -march=native rather than anything listed below.
-+
-+ Enables -march=native
-+
- endchoice
-
- config X86_GENERIC
-@@ -300,7 +420,7 @@ config X86_INTERNODE_CACHE_SHIFT
- config X86_L1_CACHE_SHIFT
- int
- default "7" if MPENTIUM4 || MPSC
-- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
- default "4" if MELAN || M486 || MGEODEGX1
- default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-
-@@ -331,11 +451,11 @@ config X86_ALIGNMENT_16
-
- config X86_INTEL_USERCOPY
- def_bool y
-- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE
-
- config X86_USE_PPRO_CHECKSUM
- def_bool y
-- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MATOM || MNATIVE
-
- config X86_USE_3DNOW
- def_bool y
-@@ -359,17 +479,17 @@ config X86_P6_NOP
-
- config X86_TSC
- def_bool y
-- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM) || X86_64
-
- config X86_CMPXCHG64
- def_bool y
-- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
-+ depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
-
- # this should be set for all -march=.. options where the compiler
- # generates cmov.
- config X86_CMOV
- def_bool y
-- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
-
- config X86_MINIMUM_CPU_FAMILY
- int
---- a/arch/x86/Makefile 2014-06-16 16:44:27.000000000 -0400
-+++ b/arch/x86/Makefile 2015-03-07 03:33:27.650843211 -0500
-@@ -92,13 +92,35 @@ else
- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
-
- # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-+ cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
- cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-+ cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
-+ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
-+ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
-+ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
-+ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
-+ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
-+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
- cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
-
- cflags-$(CONFIG_MCORE2) += \
-- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
-- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
-- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+ $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
-+ cflags-$(CONFIG_MNEHALEM) += \
-+ $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
-+ cflags-$(CONFIG_MWESTMERE) += \
-+ $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
-+ cflags-$(CONFIG_MSILVERMONT) += \
-+ $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
-+ cflags-$(CONFIG_MSANDYBRIDGE) += \
-+ $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
-+ cflags-$(CONFIG_MIVYBRIDGE) += \
-+ $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
-+ cflags-$(CONFIG_MHASWELL) += \
-+ $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
-+ cflags-$(CONFIG_MBROADWELL) += \
-+ $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
-+ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
-+ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
- cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
- KBUILD_CFLAGS += $(cflags-y)
-
---- a/arch/x86/Makefile_32.cpu 2014-06-16 16:44:27.000000000 -0400
-+++ b/arch/x86/Makefile_32.cpu 2015-03-07 03:34:15.203586024 -0500
-@@ -23,7 +23,15 @@ cflags-$(CONFIG_MK6) += -march=k6
- # Please note, that patches that add -march=athlon-xp and friends are pointless.
- # They make zero difference whatsosever to performance at this time.
- cflags-$(CONFIG_MK7) += -march=athlon
-+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
- cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
-+cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-march=athlon)
-+cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon)
-+cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
-+cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon)
-+cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
-+cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon)
-+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon)
- cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
- cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
- cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -32,8 +40,15 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-
- cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
- cflags-$(CONFIG_MVIAC7) += -march=i686
- cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
--cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
-- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+cflags-$(CONFIG_MNEHALEM) += -march=i686 $(call tune,nehalem)
-+cflags-$(CONFIG_MWESTMERE) += -march=i686 $(call tune,westmere)
-+cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont)
-+cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge)
-+cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge)
-+cflags-$(CONFIG_MHASWELL) += -march=i686 $(call tune,haswell)
-+cflags-$(CONFIG_MBROADWELL) += -march=i686 $(call tune,broadwell)
-+cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
-+ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
-
- # AMD Elan support
- cflags-$(CONFIG_MELAN) += -march=i486
-