summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2012-09-18 08:09:57 -0400
committerAnthony G. Basile <blueness@gentoo.org>2012-09-18 08:09:57 -0400
commite23200f6e64c8febf4f0a37056bc82b1f95f7d47 (patch)
tree8437f5fb32654b5b8b342d3ac4ffb4375e554bca
parentGrsec/PaX: 2.9.1-{2.6.32.59,3.5.3}-201209131726 (diff)
downloadhardened-patchset-20120917.tar.gz
hardened-patchset-20120917.tar.bz2
hardened-patchset-20120917.zip
Grsec/PaX: 2.9.1-{2.6.32.59,3.2.28,3.5.4}-20120917182420120917
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209171823.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209131725.patch)69
-rw-r--r--3.2.29/0000_README (renamed from 3.2.28/0000_README)6
-rw-r--r--3.2.29/1021_linux-3.2.22.patch (renamed from 3.2.28/1021_linux-3.2.22.patch)0
-rw-r--r--3.2.29/1022_linux-3.2.23.patch (renamed from 3.2.28/1022_linux-3.2.23.patch)0
-rw-r--r--3.2.29/1023_linux-3.2.24.patch (renamed from 3.2.28/1023_linux-3.2.24.patch)0
-rw-r--r--3.2.29/1024_linux-3.2.25.patch (renamed from 3.2.28/1024_linux-3.2.25.patch)0
-rw-r--r--3.2.29/1025_linux-3.2.26.patch (renamed from 3.2.28/1025_linux-3.2.26.patch)0
-rw-r--r--3.2.29/1026_linux-3.2.27.patch (renamed from 3.2.28/1026_linux-3.2.27.patch)0
-rw-r--r--3.2.29/1027_linux-3.2.28.patch (renamed from 3.2.28/1027_linux-3.2.28.patch)0
-rw-r--r--3.2.29/1028_linux-3.2.29.patch4279
-rw-r--r--3.2.29/4420_grsecurity-2.9.1-3.2.29-201209171824.patch (renamed from 3.2.28/4420_grsecurity-2.9.1-3.2.29-201209122157.patch)348
-rw-r--r--3.2.29/4430_grsec-remove-localversion-grsec.patch (renamed from 3.2.28/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.2.29/4435_grsec-mute-warnings.patch (renamed from 3.2.28/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.2.29/4440_grsec-remove-protected-paths.patch (renamed from 3.2.28/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.2.29/4450_grsec-kconfig-default-gids.patch (renamed from 3.2.28/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.2.29/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.2.28/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.2.29/4470_disable-compat_vdso.patch (renamed from 3.2.28/4470_disable-compat_vdso.patch)0
-rw-r--r--3.5.4/0000_README (renamed from 3.5.3/0000_README)2
-rw-r--r--3.5.4/4420_grsecurity-2.9.1-3.5.4-201209171824.patch (renamed from 3.5.3/4420_grsecurity-2.9.1-3.5.3-201209131726.patch)733
-rw-r--r--3.5.4/4430_grsec-remove-localversion-grsec.patch (renamed from 3.5.3/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.5.4/4435_grsec-mute-warnings.patch (renamed from 3.5.3/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.5.4/4440_grsec-remove-protected-paths.patch (renamed from 3.5.3/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.5.4/4450_grsec-kconfig-default-gids.patch (renamed from 3.5.3/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.5.4/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.5.3/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.5.4/4470_disable-compat_vdso.patch (renamed from 3.5.3/4470_disable-compat_vdso.patch)0
26 files changed, 5139 insertions, 300 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index c9c7d67..0955009 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -30,7 +30,7 @@ Patch: 1058_linux-2.6.32.59.patch
From: http://www.kernel.org
Desc: Linux 2.6.32.59
-Patch: 4420_grsecurity-2.9.1-2.6.32.59-201209131725.patch
+Patch: 4420_grsecurity-2.9.1-2.6.32.59-201209171823.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209131725.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209171823.patch
index b281d5e..1bc2c59 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209131725.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209171823.patch
@@ -23574,7 +23574,7 @@ index 7c6e63e..c5d92c1 100644
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index e6d925f..e7a4af8 100644
+index e6d925f..6bde4d6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -570,7 +570,11 @@ static void reload_tss(void)
@@ -23589,7 +23589,18 @@ index e6d925f..e7a4af8 100644
load_TR_desc();
}
-@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
+@@ -746,6 +750,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ kvm_get_gdt(&dt);
+ vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
++#endif
++
+ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
+ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+@@ -1410,8 +1418,11 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -23603,7 +23614,18 @@ index e6d925f..e7a4af8 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
+@@ -2338,7 +2349,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
+
+ vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
+ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
++#endif
+
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+@@ -2362,7 +2376,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
@@ -23612,7 +23634,7 @@ index e6d925f..e7a4af8 100644
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
-@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+@@ -3718,6 +3732,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
"jmp .Lkvm_vmx_return \n\t"
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
".Lkvm_vmx_return: "
@@ -23625,7 +23647,7 @@ index e6d925f..e7a4af8 100644
/* Save guest registers, load host registers, keep flags */
"xchg %0, (%%"R"sp) \n\t"
"mov %%"R"ax, %c[rax](%0) \n\t"
-@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+@@ -3764,8 +3784,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
[r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
@@ -23640,7 +23662,7 @@ index e6d925f..e7a4af8 100644
#ifdef CONFIG_X86_64
, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
#endif
-@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+@@ -3782,7 +3807,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vmx->rmode.irq.pending)
fixup_rmode_irq(vmx);
@@ -23658,7 +23680,7 @@ index e6d925f..e7a4af8 100644
vmx->launched = 1;
vmx_complete_interrupts(vmx);
-@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
+@@ -3957,7 +3991,7 @@ static bool vmx_gb_page_enable(void)
return false;
}
@@ -102296,7 +102318,7 @@ index c8d466a..1ff8750 100644
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slob.c b/mm/slob.c
-index 837ebd6..5cb1310 100644
+index 837ebd6..1f9a479 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -29,7 +29,7 @@
@@ -102505,7 +102527,7 @@ index 837ebd6..5cb1310 100644
+ base = (void *)((unsigned long)ptr & PAGE_MASK);
+ free = sp->free;
+
-+ while (!slob_last(free) && (void *)free <= ptr) {
++ while ((void *)free <= ptr) {
+ base = free + slob_units(free);
+ free = slob_next(free);
+ }
@@ -107467,10 +107489,31 @@ index f2f7c63..bc36b3d 100644
x->km.state = XFRM_STATE_VALID;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
-index b95a2d6..53a422d 100644
+index b95a2d6..b6081b4 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
-@@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
+@@ -646,6 +646,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ {
+ struct xfrm_dump_info info;
+ struct sk_buff *skb;
++ int err;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+@@ -656,9 +657,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ info.nlmsg_seq = seq;
+ info.nlmsg_flags = 0;
+
+- if (dump_one_state(x, 0, &info)) {
++ err = dump_one_state(x, 0, &info);
++ if (err) {
+ kfree_skb(skb);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ return skb;
+@@ -1169,6 +1171,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
int i;
@@ -107479,7 +107522,7 @@ index b95a2d6..53a422d 100644
if (xp->xfrm_nr == 0)
return 0;
-@@ -1176,6 +1178,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
+@@ -1176,6 +1180,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
@@ -107487,7 +107530,7 @@ index b95a2d6..53a422d 100644
memcpy(&up->id, &kp->id, sizeof(up->id));
up->family = kp->encap_family;
memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
-@@ -1784,6 +1787,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
+@@ -1784,6 +1789,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
int n = 0;
diff --git a/3.2.28/0000_README b/3.2.29/0000_README
index d6a4b02..77f70fa 100644
--- a/3.2.28/0000_README
+++ b/3.2.29/0000_README
@@ -30,7 +30,11 @@ Patch: 1027_linux-3.2.28.patch
From: http://www.kernel.org
Desc: Linux 3.2.28
-Patch: 4420_grsecurity-2.9.1-3.2.29-201209122157.patch
+Patch: 1028_linux-3.2.29.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.29
+
+Patch: 4420_grsecurity-2.9.1-3.2.29-201209171824.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.28/1021_linux-3.2.22.patch b/3.2.29/1021_linux-3.2.22.patch
index e6ad93a..e6ad93a 100644
--- a/3.2.28/1021_linux-3.2.22.patch
+++ b/3.2.29/1021_linux-3.2.22.patch
diff --git a/3.2.28/1022_linux-3.2.23.patch b/3.2.29/1022_linux-3.2.23.patch
index 3d796d0..3d796d0 100644
--- a/3.2.28/1022_linux-3.2.23.patch
+++ b/3.2.29/1022_linux-3.2.23.patch
diff --git a/3.2.28/1023_linux-3.2.24.patch b/3.2.29/1023_linux-3.2.24.patch
index 4692eb4..4692eb4 100644
--- a/3.2.28/1023_linux-3.2.24.patch
+++ b/3.2.29/1023_linux-3.2.24.patch
diff --git a/3.2.28/1024_linux-3.2.25.patch b/3.2.29/1024_linux-3.2.25.patch
index e95c213..e95c213 100644
--- a/3.2.28/1024_linux-3.2.25.patch
+++ b/3.2.29/1024_linux-3.2.25.patch
diff --git a/3.2.28/1025_linux-3.2.26.patch b/3.2.29/1025_linux-3.2.26.patch
index 44065b9..44065b9 100644
--- a/3.2.28/1025_linux-3.2.26.patch
+++ b/3.2.29/1025_linux-3.2.26.patch
diff --git a/3.2.28/1026_linux-3.2.27.patch b/3.2.29/1026_linux-3.2.27.patch
index 5878eb4..5878eb4 100644
--- a/3.2.28/1026_linux-3.2.27.patch
+++ b/3.2.29/1026_linux-3.2.27.patch
diff --git a/3.2.28/1027_linux-3.2.28.patch b/3.2.29/1027_linux-3.2.28.patch
index 4dbba4b..4dbba4b 100644
--- a/3.2.28/1027_linux-3.2.28.patch
+++ b/3.2.29/1027_linux-3.2.28.patch
diff --git a/3.2.29/1028_linux-3.2.29.patch b/3.2.29/1028_linux-3.2.29.patch
new file mode 100644
index 0000000..3c65179
--- /dev/null
+++ b/3.2.29/1028_linux-3.2.29.patch
@@ -0,0 +1,4279 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index f986e7d..82d7fa6 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5452,7 +5452,7 @@ F: Documentation/blockdev/ramdisk.txt
+ F: drivers/block/brd.c
+
+ RANDOM NUMBER DRIVER
+-M: Matt Mackall <mpm@selenic.com>
++M: Theodore Ts'o" <tytso@mit.edu>
+ S: Maintained
+ F: drivers/char/random.c
+
+diff --git a/Makefile b/Makefile
+index 5368961..d96fc2a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 640f909..6f1aca7 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -14,8 +14,8 @@
+ */
+
+
+-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
++#define ATOMIC_INIT(i) { (i) }
++#define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
+diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
+index 06edfef..3eeb47c 100644
+--- a/arch/alpha/include/asm/socket.h
++++ b/arch/alpha/include/asm/socket.h
+@@ -69,9 +69,11 @@
+
+ #define SO_RXQ_OVFL 40
+
++#ifdef __KERNEL__
+ /* O_NONBLOCK clashes with the bits used for socket types. Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+ #define SOCK_NONBLOCK 0x40000000
++#endif /* __KERNEL__ */
+
+ #endif /* _ASM_SOCKET_H */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 9451dce..8512475 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -288,13 +288,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+- * <--------------- offset --------------------> <- type --> 0 0 0
++ * <--------------- offset ----------------------> < type -> 0 0 0
+ *
+- * This gives us up to 63 swap files and 32GB per swap file. Note that
++ * This gives us up to 31 swap files and 64GB per swap file. Note that
+ * the offset field is always non-zero.
+ */
+ #define __SWP_TYPE_SHIFT 3
+-#define __SWP_TYPE_BITS 6
++#define __SWP_TYPE_BITS 5
+ #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+ #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
+index c202113..ea94765 100644
+--- a/arch/arm/mm/tlb-v7.S
++++ b/arch/arm/mm/tlb-v7.S
+@@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range)
+ dsb
+ mov r0, r0, lsr #PAGE_SHIFT @ align address
+ mov r1, r1, lsr #PAGE_SHIFT
+-#ifdef CONFIG_ARM_ERRATA_720789
+- mov r3, #0
+-#else
+ asid r3, r3 @ mask ASID
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(W(mov) r3, #0 )
++ ALT_UP(W(nop) )
+ #endif
+ orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
+ mov r1, r1, lsl #PAGE_SHIFT
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index ad83dad..f0702f3 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -628,8 +628,10 @@ static int __init vfp_init(void)
+ if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
+ elf_hwcap |= HWCAP_NEON;
+ #endif
++#ifdef CONFIG_VFPv3
+ if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+ elf_hwcap |= HWCAP_VFPv4;
++#endif
+ }
+ }
+ return 0;
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index f581a18..df7d12c 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+ }
+
+ /*
+- * search for a shareable pmd page for hugetlb.
++ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
++ * and returns the corresponding pte. While this is not necessary for the
++ * !shared pmd case because we can allocate the pmd later as well, it makes the
++ * code much cleaner. pmd allocation is essential for the shared case because
++ * pud has to be populated inside the same i_mmap_mutex section - otherwise
++ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
++ * bad pmd for sharing.
+ */
+-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
++static pte_t *
++huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ {
+ struct vm_area_struct *vma = find_vma(mm, addr);
+ struct address_space *mapping = vma->vm_file->f_mapping;
+@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ struct vm_area_struct *svma;
+ unsigned long saddr;
+ pte_t *spte = NULL;
++ pte_t *pte;
+
+ if (!vma_shareable(vma, addr))
+- return;
++ return (pte_t *)pmd_alloc(mm, pud, addr);
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
+@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ put_page(virt_to_page(spte));
+ spin_unlock(&mm->page_table_lock);
+ out:
++ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ mutex_unlock(&mapping->i_mmap_mutex);
++ return pte;
+ }
+
+ /*
+@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
+ } else {
+ BUG_ON(sz != PMD_SIZE);
+ if (pud_none(*pud))
+- huge_pmd_share(mm, addr, pud);
+- pte = (pte_t *) pmd_alloc(mm, pud, addr);
++ pte = huge_pmd_share(mm, addr, pud);
++ else
++ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ }
+ }
+ BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
+index e7d13f5..d05f2fe 100644
+--- a/drivers/acpi/acpica/tbxface.c
++++ b/drivers/acpi/acpica/tbxface.c
+@@ -436,6 +436,7 @@ acpi_get_table_with_size(char *signature,
+
+ return (AE_NOT_FOUND);
+ }
++ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
+
+ acpi_status
+ acpi_get_table(char *signature,
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 8c78443..3790c80 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -385,7 +385,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ goto repeat;
+ }
+
+- dev->power.deferred_resume = false;
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
+
+@@ -446,6 +445,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ wake_up_all(&dev->power.wait_queue);
+
+ if (dev->power.deferred_resume) {
++ dev->power.deferred_resume = false;
+ rpm_resume(dev, 0);
+ retval = -EAGAIN;
+ goto out;
+@@ -568,6 +568,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+ || dev->parent->power.runtime_status == RPM_ACTIVE) {
+ atomic_inc(&dev->parent->power.child_count);
+ spin_unlock(&dev->parent->power.lock);
++ retval = 1;
+ goto no_callback; /* Assume success. */
+ }
+ spin_unlock(&dev->parent->power.lock);
+@@ -645,7 +646,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+- if (!retval)
++ if (retval >= 0)
+ rpm_idle(dev, RPM_ASYNC);
+
+ out:
+diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
+index acda773..38aa6dd 100644
+--- a/drivers/block/cciss_scsi.c
++++ b/drivers/block/cciss_scsi.c
+@@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
+ {
+ case CMD_TARGET_STATUS:
+ /* Pass it up to the upper layers... */
+- if( ei->ScsiStatus)
+- {
+-#if 0
+- printk(KERN_WARNING "cciss: cmd %p "
+- "has SCSI Status = %x\n",
+- c, ei->ScsiStatus);
+-#endif
+- cmd->result |= (ei->ScsiStatus << 1);
+- }
+- else { /* scsi status is zero??? How??? */
++ if (!ei->ScsiStatus) {
+
+ /* Ordinarily, this case should never happen, but there is a bug
+ in some released firmware revisions that allows it to happen
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 650a308..de9c800 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -4,6 +4,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/hdreg.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/virtio.h>
+ #include <linux/virtio_blk.h>
+ #include <linux/scatterlist.h>
+@@ -26,14 +27,17 @@ struct virtio_blk
+ /* The disk structure for the kernel. */
+ struct gendisk *disk;
+
+- /* Request tracking. */
+- struct list_head reqs;
+-
+ mempool_t *pool;
+
+ /* Process context for config space updates */
+ struct work_struct config_work;
+
++ /* Lock for config space updates */
++ struct mutex config_lock;
++
++ /* enable config space updates */
++ bool config_enable;
++
+ /* What host tells us, plus 2 for header & tailer. */
+ unsigned int sg_elems;
+
+@@ -46,7 +50,6 @@ struct virtio_blk
+
+ struct virtblk_req
+ {
+- struct list_head list;
+ struct request *req;
+ struct virtio_blk_outhdr out_hdr;
+ struct virtio_scsi_inhdr in_hdr;
+@@ -90,7 +93,6 @@ static void blk_done(struct virtqueue *vq)
+ }
+
+ __blk_end_request_all(vbr->req, error);
+- list_del(&vbr->list);
+ mempool_free(vbr, vblk->pool);
+ }
+ /* In case queue is stopped waiting for more buffers. */
+@@ -175,7 +177,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
+ return false;
+ }
+
+- list_add_tail(&vbr->list, &vblk->reqs);
+ return true;
+ }
+
+@@ -316,6 +317,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
+ char cap_str_2[10], cap_str_10[10];
+ u64 capacity, size;
+
++ mutex_lock(&vblk->config_lock);
++ if (!vblk->config_enable)
++ goto done;
++
+ /* Host must always specify the capacity. */
+ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
+ &capacity, sizeof(capacity));
+@@ -338,6 +343,8 @@ static void virtblk_config_changed_work(struct work_struct *work)
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
++done:
++ mutex_unlock(&vblk->config_lock);
+ }
+
+ static void virtblk_config_changed(struct virtio_device *vdev)
+@@ -381,11 +388,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ goto out_free_index;
+ }
+
+- INIT_LIST_HEAD(&vblk->reqs);
+ vblk->vdev = vdev;
+ vblk->sg_elems = sg_elems;
+ sg_init_table(vblk->sg, vblk->sg_elems);
++ mutex_init(&vblk->config_lock);
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
++ vblk->config_enable = true;
+
+ /* We expect one virtqueue, for output. */
+ vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
+@@ -539,16 +547,19 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
+ struct virtio_blk *vblk = vdev->priv;
+ int index = vblk->index;
+
+- flush_work(&vblk->config_work);
++ /* Prevent config work handler from accessing the device. */
++ mutex_lock(&vblk->config_lock);
++ vblk->config_enable = false;
++ mutex_unlock(&vblk->config_lock);
+
+- /* Nothing should be pending. */
+- BUG_ON(!list_empty(&vblk->reqs));
++ del_gendisk(vblk->disk);
++ blk_cleanup_queue(vblk->disk->queue);
+
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
+- del_gendisk(vblk->disk);
+- blk_cleanup_queue(vblk->disk->queue);
++ flush_work(&vblk->config_work);
++
+ put_disk(vblk->disk);
+ mempool_destroy(vblk->pool);
+ vdev->config->del_vqs(vdev);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 631d4f6..8ae9235 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1114,6 +1114,16 @@ static void init_std_data(struct entropy_store *r)
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+ }
+
++/*
++ * Note that setup_arch() may call add_device_randomness()
++ * long before we get here. This allows seeding of the pools
++ * with some platform dependent data very early in the boot
++ * process. But it limits our options here. We must use
++ * statically allocated structures that already have all
++ * initializations complete at compile time. We should also
++ * take care not to overwrite the precious per platform data
++ * we were given.
++ */
+ static int rand_initialize(void)
+ {
+ init_std_data(&input_pool);
+@@ -1391,10 +1401,15 @@ static int proc_do_uuid(ctl_table *table, int write,
+ uuid = table->data;
+ if (!uuid) {
+ uuid = tmp_uuid;
+- uuid[8] = 0;
+- }
+- if (uuid[8] == 0)
+ generate_random_uuid(uuid);
++ } else {
++ static DEFINE_SPINLOCK(bootid_spinlock);
++
++ spin_lock(&bootid_spinlock);
++ if (!uuid[8])
++ generate_random_uuid(uuid);
++ spin_unlock(&bootid_spinlock);
++ }
+
+ sprintf(buf, "%pU", uuid);
+
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 153980b..b298158 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -6,6 +6,7 @@
+ #include <linux/dmi.h>
+ #include <linux/efi.h>
+ #include <linux/bootmem.h>
++#include <linux/random.h>
+ #include <asm/dmi.h>
+
+ /*
+@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+
+ dmi_table(buf, dmi_len, dmi_num, decode, NULL);
+
++ add_device_randomness(buf, dmi_len);
++
+ dmi_iounmap(buf, dmi_len);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index cc75c4b..3eed270 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4748,17 +4748,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ continue;
+ }
+
+- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+- /* Use VBT settings if we have an eDP panel */
+- unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+-
+- if (edp_bpc < display_bpc) {
+- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+- display_bpc = edp_bpc;
+- }
+- continue;
+- }
+-
+ /* Not one of the known troublemakers, check the EDID */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index fae2050..c8ecaab 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1152,10 +1152,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
+ pp = ironlake_get_pp_control(dev_priv);
+- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
++ /* We need to switch off panel power _and_ force vdd, for otherwise some
++ * panels get very unhappy and cease to work. */
++ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
++ intel_dp->want_panel_vdd = false;
++
+ ironlake_wait_panel_off(intel_dp);
+ }
+
+@@ -1265,11 +1269,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+- ironlake_edp_panel_off(intel_dp);
+-
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
++ ironlake_edp_panel_off(intel_dp);
+ intel_dp_link_down(intel_dp);
+- ironlake_edp_panel_vdd_off(intel_dp, false);
+ }
+
+ static void intel_dp_commit(struct drm_encoder *encoder)
+@@ -1304,11 +1306,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+ /* Switching the panel off requires vdd. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+- ironlake_edp_panel_off(intel_dp);
+-
+ intel_dp_sink_dpms(intel_dp, mode);
++ ironlake_edp_panel_off(intel_dp);
+ intel_dp_link_down(intel_dp);
+- ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_off(encoder);
+diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
+index cb006a7..3002d82 100644
+--- a/drivers/gpu/drm/nouveau/nvd0_display.c
++++ b/drivers/gpu/drm/nouveau/nvd0_display.c
+@@ -472,7 +472,7 @@ static int
+ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+ {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+- const u32 data = (y << 16) | x;
++ const u32 data = (y << 16) | (x & 0xffff);
+
+ nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
+ nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
+index 1b50ad8..4760466 100644
+--- a/drivers/gpu/drm/radeon/atombios.h
++++ b/drivers/gpu/drm/radeon/atombios.h
+@@ -101,6 +101,7 @@
+ #define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
+ #define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
+ #define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
++#define ATOM_INIT (ATOM_DISABLE+7)
+ #define ATOM_GET_STATUS (ATOM_DISABLE+8)
+
+ #define ATOM_BLANKING 1
+@@ -251,25 +252,25 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
+- USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
++ USHORT EnableDispPowerGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+- USHORT MemoryPLLInit;
+- USHORT AdjustDisplayPll; //only used by Bios
++ USHORT MemoryPLLInit; //Atomic Table, used only by Bios
++ USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
+ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
+ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
+ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
+- USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
++ USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+- USHORT GetConditionalGoldenSetting; //only used by Bios
++ USHORT GetConditionalGoldenSetting; //Only used by Bios
+ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
+- USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+- USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
++ USHORT PatchMCSetting; //only used by BIOS
++ USHORT MC_SEQ_Control; //only used by BIOS
+ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT EnableScaler; //Atomic Table, used only by Bios
+ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+@@ -282,7 +283,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
+ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
+- USHORT UpdateCRTC_DoubleBufferRegisters;
++ USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios
+ USHORT LUT_AutoFill; //Atomic Table, only used by Bios
+ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
+ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
+@@ -308,27 +309,36 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+- USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
++ USHORT ComputeMemoryClockParam; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+- USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
++ USHORT GetDispObjectInfo; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
+ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
+ USHORT DPEncoderService; //Function Table,only used by Bios
++ USHORT GetVoltageInfo; //Function Table,only used by Bios since SI
+ }ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+ // For backward compatible
+ #define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
+-#define UNIPHYTransmitterControl DIG1TransmitterControl
+-#define LVTMATransmitterControl DIG2TransmitterControl
++#define DPTranslatorControl DIG2EncoderControl
++#define UNIPHYTransmitterControl DIG1TransmitterControl
++#define LVTMATransmitterControl DIG2TransmitterControl
+ #define SetCRTC_DPM_State GetConditionalGoldenSetting
+ #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
+ #define HPDInterruptService ReadHWAssistedI2CStatus
+ #define EnableVGA_Access GetSCLKOverMCLKRatio
+-#define GetDispObjectInfo EnableYUV
++#define EnableYUV GetDispObjectInfo
++#define DynamicClockGating EnableDispPowerGating
++#define SetupHWAssistedI2CStatus ComputeMemoryClockParam
++
++#define TMDSAEncoderControl PatchMCSetting
++#define LVDSEncoderControl MC_SEQ_Control
++#define LCD1OutputControl HW_Misc_Operation
++
+
+ typedef struct _ATOM_MASTER_COMMAND_TABLE
+ {
+@@ -495,6 +505,34 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+ // ucInputFlag
+ #define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
+
++// use for ComputeMemoryClockParamTable
++typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
++{
++ union
++ {
++ ULONG ulClock;
++ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output:UPPER_WORD=FB_DIV_INTEGER, LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
++ };
++ UCHAR ucDllSpeed; //Output
++ UCHAR ucPostDiv; //Output
++ union{
++ UCHAR ucInputFlag; //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
++ UCHAR ucPllCntlFlag; //Output:
++ };
++ UCHAR ucBWCntl;
++}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
++
++// definition of ucInputFlag
++#define MPLL_INPUT_FLAG_STROBE_MODE_EN 0x01
++// definition of ucPllCntlFlag
++#define MPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
++#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL 0x04
++#define MPLL_CNTL_FLAG_QDR_ENABLE 0x08
++#define MPLL_CNTL_FLAG_AD_HALF_RATE 0x10
++
++//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
++#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04
++
+ typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+@@ -562,6 +600,16 @@ typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
+ #define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
+
+ /****************************************************************************/
++// Structure used by EnableDispPowerGatingTable.ctb
++/****************************************************************************/
++typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1
++{
++ UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ...
++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
++ UCHAR ucPadding[2];
++}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
++
++/****************************************************************************/
+ // Structure used by EnableASIC_StaticPwrMgtTable.ctb
+ /****************************************************************************/
+ typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+@@ -807,6 +855,7 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
++#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ 0x03
+ #define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
+ #define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
+ #define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
+@@ -814,6 +863,7 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+ #define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
+ #define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
+ #define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
++#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER 0x60
+
+ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+ {
+@@ -1171,6 +1221,106 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+ #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
+
+
++typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
++{
++#if ATOM_BIG_ENDIAN
++ UCHAR ucReservd1:1;
++ UCHAR ucHPDSel:3;
++ UCHAR ucPhyClkSrcId:2;
++ UCHAR ucCoherentMode:1;
++ UCHAR ucReserved:1;
++#else
++ UCHAR ucReserved:1;
++ UCHAR ucCoherentMode:1;
++ UCHAR ucPhyClkSrcId:2;
++ UCHAR ucHPDSel:3;
++ UCHAR ucReservd1:1;
++#endif
++}ATOM_DIG_TRANSMITTER_CONFIG_V5;
++
++typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
++{
++ USHORT usSymClock; // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock, (HDMI deep color), =pixel clock * deep_color_ratio
++ UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
++ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx
++ UCHAR ucLaneNum; // indicate lane number 1-8
++ UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h
++ UCHAR ucDigMode; // indicate DIG mode
++ union{
++ ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
++ UCHAR ucConfig;
++ };
++ UCHAR ucDigEncoderSel; // indicate DIG front end encoder
++ UCHAR ucDPLaneSet;
++ UCHAR ucReserved;
++ UCHAR ucReserved1;
++}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
++
++//ucPhyId
++#define ATOM_PHY_ID_UNIPHYA 0
++#define ATOM_PHY_ID_UNIPHYB 1
++#define ATOM_PHY_ID_UNIPHYC 2
++#define ATOM_PHY_ID_UNIPHYD 3
++#define ATOM_PHY_ID_UNIPHYE 4
++#define ATOM_PHY_ID_UNIPHYF 5
++#define ATOM_PHY_ID_UNIPHYG 6
++
++// ucDigEncoderSel
++#define ATOM_TRANMSITTER_V5__DIGA_SEL 0x01
++#define ATOM_TRANMSITTER_V5__DIGB_SEL 0x02
++#define ATOM_TRANMSITTER_V5__DIGC_SEL 0x04
++#define ATOM_TRANMSITTER_V5__DIGD_SEL 0x08
++#define ATOM_TRANMSITTER_V5__DIGE_SEL 0x10
++#define ATOM_TRANMSITTER_V5__DIGF_SEL 0x20
++#define ATOM_TRANMSITTER_V5__DIGG_SEL 0x40
++
++// ucDigMode
++#define ATOM_TRANSMITTER_DIGMODE_V5_DP 0
++#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS 1
++#define ATOM_TRANSMITTER_DIGMODE_V5_DVI 2
++#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI 3
++#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO 4
++#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST 5
++
++// ucDPLaneSet
++#define DP_LANE_SET__0DB_0_4V 0x00
++#define DP_LANE_SET__0DB_0_6V 0x01
++#define DP_LANE_SET__0DB_0_8V 0x02
++#define DP_LANE_SET__0DB_1_2V 0x03
++#define DP_LANE_SET__3_5DB_0_4V 0x08
++#define DP_LANE_SET__3_5DB_0_6V 0x09
++#define DP_LANE_SET__3_5DB_0_8V 0x0a
++#define DP_LANE_SET__6DB_0_4V 0x10
++#define DP_LANE_SET__6DB_0_6V 0x11
++#define DP_LANE_SET__9_5DB_0_4V 0x18
++
++// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
++// Bit1
++#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT 0x02
++
++// Bit3:2
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 0x0c
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT 0x02
++
++#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL 0x00
++#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL 0x04
++#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL 0x08
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT 0x0c
++// Bit6:4
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK 0x70
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT 0x04
++
++#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL 0x00
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL 0x10
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL 0x20
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL 0x30
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL 0x40
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL 0x50
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL 0x60
++
++#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
++
++
+ /****************************************************************************/
+ // Structures used by ExternalEncoderControlTable V1.3
+ // ASIC Families: Evergreen, Llano, NI
+@@ -1793,6 +1943,7 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+ #define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
+ #define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
+ #define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
++#define ATOM_PPLL_SS_TYPE_V3_P0PLL ATOM_PPLL_SS_TYPE_V3_DCPLL
+ #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
+ #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
+ #define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
+@@ -2030,12 +2181,77 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V2
+ USHORT usVoltageLevel; // real voltage level
+ }SET_VOLTAGE_PARAMETERS_V2;
+
++
++typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
++{
++ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
++ UCHAR ucVoltageMode; // Indicate action: Set voltage level
++ USHORT usVoltageLevel; // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
++}SET_VOLTAGE_PARAMETERS_V1_3;
++
++//ucVoltageType
++#define VOLTAGE_TYPE_VDDC 1
++#define VOLTAGE_TYPE_MVDDC 2
++#define VOLTAGE_TYPE_MVDDQ 3
++#define VOLTAGE_TYPE_VDDCI 4
++
++//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
++#define ATOM_SET_VOLTAGE 0 //Set voltage Level
++#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
++#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase
++#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3
++#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID
++
++// define vitual voltage id in usVoltageLevel
++#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
++#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
++#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
++#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
++
+ typedef struct _SET_VOLTAGE_PS_ALLOCATION
+ {
+ SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+ }SET_VOLTAGE_PS_ALLOCATION;
+
++// New Added from SI for GetVoltageInfoTable, input parameter structure
++typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
++{
++ UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
++ UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
++ USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
++ ULONG ulReserved;
++}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
++
++// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
++typedef struct _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
++{
++ ULONG ulVotlageGpioState;
++ ULONG ulVoltageGPioMask;
++}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
++
++// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
++typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
++{
++ USHORT usVoltageLevel;
++ USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
++ ULONG ulReseved;
++}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
++
++
++// GetVoltageInfo v1.1 ucVoltageMode
++#define ATOM_GET_VOLTAGE_VID 0x00
++#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
++#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
++// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
++#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
++
++// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
++#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
++// undefined power state
++#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
++#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
++
+ /****************************************************************************/
+ // Structures used by TVEncoderControlTable
+ /****************************************************************************/
+@@ -2065,9 +2281,9 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+ USHORT StandardVESA_Timing; // Only used by Bios
+ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
+- USHORT DAC_Info; // Will be obsolete from R600
++ USHORT PaletteData; // Only used by BIOS
+ USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
+- USHORT TMDS_Info; // Will be obsolete from R600
++ USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1
+ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
+ USHORT SupportedDevicesInfo; // Will be obsolete from R600
+ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
+@@ -2096,15 +2312,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
+ }ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+-// For backward compatible
+-#define LVDS_Info LCD_Info
+-
+ typedef struct _ATOM_MASTER_DATA_TABLE
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+ }ATOM_MASTER_DATA_TABLE;
+
++// For backward compatible
++#define LVDS_Info LCD_Info
++#define DAC_Info PaletteData
++#define TMDS_Info DIGTransmitterInfo
+
+ /****************************************************************************/
+ // Structure used in MultimediaCapabilityInfoTable
+@@ -2171,7 +2388,9 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+ typedef struct _ATOM_FIRMWARE_CAPABILITY
+ {
+ #if ATOM_BIG_ENDIAN
+- USHORT Reserved:3;
++ USHORT Reserved:1;
++ USHORT SCL2Redefined:1;
++ USHORT PostWithoutModeSet:1;
+ USHORT HyperMemory_Size:4;
+ USHORT HyperMemory_Support:1;
+ USHORT PPMode_Assigned:1;
+@@ -2193,7 +2412,9 @@ typedef struct _ATOM_FIRMWARE_CAPABILITY
+ USHORT PPMode_Assigned:1;
+ USHORT HyperMemory_Support:1;
+ USHORT HyperMemory_Size:4;
+- USHORT Reserved:3;
++ USHORT PostWithoutModeSet:1;
++ USHORT SCL2Redefined:1;
++ USHORT Reserved:1;
+ #endif
+ }ATOM_FIRMWARE_CAPABILITY;
+
+@@ -2418,7 +2639,8 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+- ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
++ UCHAR ucRemoteDisplayConfig;
++ UCHAR ucReserved5[3]; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+ ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+ ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+ USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
+@@ -2438,6 +2660,11 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
+
+ #define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
+
++
++// definition of ucRemoteDisplayConfig
++#define REMOTE_DISPLAY_DISABLE 0x00
++#define REMOTE_DISPLAY_ENABLE 0x01
++
+ /****************************************************************************/
+ // Structures used in IntegratedSystemInfoTable
+ /****************************************************************************/
+@@ -2660,8 +2887,9 @@ usMinDownStreamHTLinkWidth: same as above.
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
++#define INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI 5
+
+-#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
++#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI // this deff reflects max defined CPU code
+
+ #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
+ #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
+@@ -2753,6 +2981,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+ #define ASIC_INT_DIG4_ENCODER_ID 0x0b
+ #define ASIC_INT_DIG5_ENCODER_ID 0x0c
+ #define ASIC_INT_DIG6_ENCODER_ID 0x0d
++#define ASIC_INT_DIG7_ENCODER_ID 0x0e
+
+ //define Encoder attribute
+ #define ATOM_ANALOG_ENCODER 0
+@@ -3226,15 +3455,23 @@ typedef struct _ATOM_LCD_INFO_V13
+
+ UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
+ UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
+- UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
++ UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+
+ UCHAR ucOffDelay_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
+ UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
+ UCHAR ucReserved1;
+
+- ULONG ulReserved[4];
++ UCHAR ucDPCD_eDP_CONFIGURATION_CAP; // dpcd 0dh
++ UCHAR ucDPCD_MAX_LINK_RATE; // dpcd 01h
++ UCHAR ucDPCD_MAX_LANE_COUNT; // dpcd 02h
++ UCHAR ucDPCD_MAX_DOWNSPREAD; // dpcd 03h
++
++ USHORT usMaxPclkFreqInSingleLink; // Max PixelClock frequency in single link mode.
++ UCHAR uceDPToLVDSRxId;
++ UCHAR ucLcdReservd;
++ ULONG ulReserved[2];
+ }ATOM_LCD_INFO_V13;
+
+ #define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
+@@ -3273,6 +3510,11 @@ typedef struct _ATOM_LCD_INFO_V13
+ //Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+ #define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
+
++//uceDPToLVDSRxId
++#define eDP_TO_LVDS_RX_DISABLE 0x00 // no eDP->LVDS translator chip
++#define eDP_TO_LVDS_COMMON_ID 0x01 // common eDP->LVDS translator chip without AMD SW init
++#define eDP_TO_LVDS_RT_ID 0x02 // RT tanslator which require AMD SW init
++
+ typedef struct _ATOM_PATCH_RECORD_MODE
+ {
+ UCHAR ucRecordType;
+@@ -3317,6 +3559,7 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+ #define LCD_CAP_RECORD_TYPE 3
+ #define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
+ #define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
++#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE 6
+ #define ATOM_RECORD_END_TYPE 0xFF
+
+ /****************************Spread Spectrum Info Table Definitions **********************/
+@@ -3528,6 +3771,7 @@ else //Non VGA case
+
+ CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
++/***********************************************************************************/
+ #define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
+
+ typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+@@ -3818,13 +4062,17 @@ typedef struct _EXT_DISPLAY_PATH
+ ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+ ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+ };
+- UCHAR ucReserved;
+- USHORT usReserved[2];
++ UCHAR ucChPNInvert; // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
++ USHORT usCaps;
++ USHORT usReserved;
+ }EXT_DISPLAY_PATH;
+
+ #define NUMBER_OF_UCHAR_FOR_GUID 16
+ #define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+
++//usCaps
++#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
++
+ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+@@ -3832,7 +4080,9 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+ EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
+ UCHAR uc3DStereoPinId; // use for eDP panel
+- UCHAR Reserved [6]; // for potential expansion
++ UCHAR ucRemoteDisplayConfig;
++ UCHAR uceDPToLVDSRxId;
++ UCHAR Reserved[4]; // for potential expansion
+ }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+ //Related definitions, all records are different but they have a commond header
+@@ -3977,6 +4227,7 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+ #define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
+
+ // Indexes to GPIO array in GLSync record
++// GLSync record is for Frame Lock/Gen Lock feature.
+ #define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
+ #define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
+ #define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
+@@ -3984,7 +4235,9 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+ #define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
+ #define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+ #define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
+-#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
++#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
++#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL 8
++#define ATOM_GPIO_INDEX_GLSYNC_MAX 9
+
+ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+ {
+@@ -3994,7 +4247,8 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+ }ATOM_ENCODER_DVO_CF_RECORD;
+
+ // Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+-#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
++#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder
++#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
+
+ typedef struct _ATOM_ENCODER_CAP_RECORD
+ {
+@@ -4003,11 +4257,13 @@ typedef struct _ATOM_ENCODER_CAP_RECORD
+ USHORT usEncoderCap;
+ struct {
+ #if ATOM_BIG_ENDIAN
+- USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
++ USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
++ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+ #else
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+- USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
++ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
++ USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
+ #endif
+ };
+ };
+@@ -4157,6 +4413,7 @@ typedef struct _ATOM_VOLTAGE_CONTROL
+ #define VOLTAGE_CONTROL_ID_VT1556M 0x07
+ #define VOLTAGE_CONTROL_ID_CHL822x 0x08
+ #define VOLTAGE_CONTROL_ID_VT1586M 0x09
++#define VOLTAGE_CONTROL_ID_UP1637 0x0A
+
+ typedef struct _ATOM_VOLTAGE_OBJECT
+ {
+@@ -4193,6 +4450,69 @@ typedef struct _ATOM_LEAKID_VOLTAGE
+ USHORT usVoltage;
+ }ATOM_LEAKID_VOLTAGE;
+
++typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
++ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
++ UCHAR ucVoltageMode; //Indicate voltage control mode: Init/Set/Leakage/Set phase
++ USHORT usSize; //Size of Object
++}ATOM_VOLTAGE_OBJECT_HEADER_V3;
++
++typedef struct _VOLTAGE_LUT_ENTRY_V2
++{
++ ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
++ USHORT usVoltageValue; // The corresponding Voltage Value, in mV
++}VOLTAGE_LUT_ENTRY_V2;
++
++typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
++{
++ USHORT usVoltageLevel; // The Voltage ID which is used to program GPIO register
++ USHORT usVoltageId;
++ USHORT usLeakageId; // The corresponding Voltage Value, in mV
++}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
++
++typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
++ UCHAR ucVoltageControlI2cLine;
++ UCHAR ucVoltageControlAddress;
++ UCHAR ucVoltageControlOffset;
++ ULONG ulReserved;
++ VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
++}ATOM_I2C_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
++ UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
++ UCHAR ucPhaseDelay; // phase delay in unit of micro second
++ UCHAR ucReserved;
++ ULONG ulGpioMaskVal; // GPIO Mask value
++ VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
++}ATOM_GPIO_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucLeakageCntlId; // default is 0
++ UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
++ UCHAR ucReserved[2];
++ ULONG ulMaxVoltageLevel;
++ LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
++}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
++
++typedef union _ATOM_VOLTAGE_OBJECT_V3{
++ ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
++ ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
++ ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
++}ATOM_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ ATOM_VOLTAGE_OBJECT_V3 asVoltageObj[3]; //Info for Voltage control
++}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
++
+ typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
+ {
+ UCHAR ucProfileId;
+@@ -4305,7 +4625,18 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+ USHORT usHDMISSpreadRateIn10Hz;
+ USHORT usDVISSPercentage;
+ USHORT usDVISSpreadRateIn10Hz;
+- ULONG ulReserved3[21];
++ ULONG SclkDpmBoostMargin;
++ ULONG SclkDpmThrottleMargin;
++ USHORT SclkDpmTdpLimitPG;
++ USHORT SclkDpmTdpLimitBoost;
++ ULONG ulBoostEngineCLock;
++ UCHAR ulBoostVid_2bit;
++ UCHAR EnableBoost;
++ USHORT GnbTdpLimit;
++ USHORT usMaxLVDSPclkFreqInSingleLink;
++ UCHAR ucLvdsMisc;
++ UCHAR ucLVDSReserved;
++ ULONG ulReserved3[15];
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+ }ATOM_INTEGRATED_SYSTEM_INFO_V6;
+
+@@ -4313,9 +4644,16 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+ #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
+ #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
+
+-// ulOtherDisplayMisc
+-#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
++//ucLVDSMisc:
++#define SYS_INFO_LVDSMISC__888_FPDI_MODE 0x01
++#define SYS_INFO_LVDSMISC__DL_CH_SWAP 0x02
++#define SYS_INFO_LVDSMISC__888_BPC 0x04
++#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
++#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
+
++// not used any more
++#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
++#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW 0x08
+
+ /**********************************************************************************************************************
+ ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+@@ -4384,7 +4722,208 @@ ucUMAChannelNumber: System memory channel numbers.
+ ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
+ ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
+ ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+-sAvail_SCLK[5]: Arrays to provide available list of SLCK and corresponding voltage, order from low to high
++sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
++ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
++ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
++ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
++ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
++ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
++usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
++usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
++usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
++usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
++usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
++usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
++ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
++ [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
++ [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
++ [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
++ [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
++**********************************************************************************************************************/
++
++// this Table is used for Liano/Ontario APU
++typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
++{
++ ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
++ ULONG ulPowerplayTable[128];
++}ATOM_FUSION_SYSTEM_INFO_V1;
++/**********************************************************************************************************************
++ ATOM_FUSION_SYSTEM_INFO_V1 Description
++sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
++ulPowerplayTable[128]: This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]
++**********************************************************************************************************************/
++
++// this IntegrateSystemInfoTable is used for Trinity APU
++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ ULONG ulBootUpEngineClock;
++ ULONG ulDentistVCOFreq;
++ ULONG ulBootUpUMAClock;
++ ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
++ ULONG ulBootUpReqDisplayVector;
++ ULONG ulOtherDisplayMisc;
++ ULONG ulGPUCapInfo;
++ ULONG ulSB_MMIO_Base_Addr;
++ USHORT usRequestedPWMFreqInHz;
++ UCHAR ucHtcTmpLmt;
++ UCHAR ucHtcHystLmt;
++ ULONG ulMinEngineClock;
++ ULONG ulSystemConfig;
++ ULONG ulCPUCapInfo;
++ USHORT usNBP0Voltage;
++ USHORT usNBP1Voltage;
++ USHORT usBootUpNBVoltage;
++ USHORT usExtDispConnInfoOffset;
++ USHORT usPanelRefreshRateRange;
++ UCHAR ucMemoryType;
++ UCHAR ucUMAChannelNumber;
++ UCHAR strVBIOSMsg[40];
++ ULONG ulReserved[20];
++ ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
++ ULONG ulGMCRestoreResetTime;
++ ULONG ulMinimumNClk;
++ ULONG ulIdleNClk;
++ ULONG ulDDR_DLL_PowerUpTime;
++ ULONG ulDDR_PLL_PowerUpTime;
++ USHORT usPCIEClkSSPercentage;
++ USHORT usPCIEClkSSType;
++ USHORT usLvdsSSPercentage;
++ USHORT usLvdsSSpreadRateIn10Hz;
++ USHORT usHDMISSPercentage;
++ USHORT usHDMISSpreadRateIn10Hz;
++ USHORT usDVISSPercentage;
++ USHORT usDVISSpreadRateIn10Hz;
++ ULONG SclkDpmBoostMargin;
++ ULONG SclkDpmThrottleMargin;
++ USHORT SclkDpmTdpLimitPG;
++ USHORT SclkDpmTdpLimitBoost;
++ ULONG ulBoostEngineCLock;
++ UCHAR ulBoostVid_2bit;
++ UCHAR EnableBoost;
++ USHORT GnbTdpLimit;
++ USHORT usMaxLVDSPclkFreqInSingleLink;
++ UCHAR ucLvdsMisc;
++ UCHAR ucLVDSReserved;
++ UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
++ UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
++ UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
++ UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
++ UCHAR ucLVDSOffToOnDelay_in4Ms;
++ UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
++ UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
++ UCHAR ucLVDSReserved1;
++ ULONG ulLCDBitDepthControlVal;
++ ULONG ulNbpStateMemclkFreq[4];
++ USHORT usNBP2Voltage;
++ USHORT usNBP3Voltage;
++ ULONG ulNbpStateNClkFreq[4];
++ UCHAR ucNBDPMEnable;
++ UCHAR ucReserved[3];
++ UCHAR ucDPMState0VclkFid;
++ UCHAR ucDPMState0DclkFid;
++ UCHAR ucDPMState1VclkFid;
++ UCHAR ucDPMState1DclkFid;
++ UCHAR ucDPMState2VclkFid;
++ UCHAR ucDPMState2DclkFid;
++ UCHAR ucDPMState3VclkFid;
++ UCHAR ucDPMState3DclkFid;
++ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
++}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
++
++// ulOtherDisplayMisc
++#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
++#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT 0x02
++#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT 0x04
++#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT 0x08
++
++// ulGPUCapInfo
++#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
++#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
++#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
++
++/**********************************************************************************************************************
++ ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
++ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
++ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
++ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
++sDISPCLK_Voltage: Report Display clock voltage requirement.
++
++ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
++ ATOM_DEVICE_CRT1_SUPPORT 0x0001
++ ATOM_DEVICE_DFP1_SUPPORT 0x0008
++ ATOM_DEVICE_DFP6_SUPPORT 0x0040
++ ATOM_DEVICE_DFP2_SUPPORT 0x0080
++ ATOM_DEVICE_DFP3_SUPPORT 0x0200
++ ATOM_DEVICE_DFP4_SUPPORT 0x0400
++ ATOM_DEVICE_DFP5_SUPPORT 0x0800
++ ATOM_DEVICE_LCD1_SUPPORT 0x0002
++ulOtherDisplayMisc: bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
++ =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
++ bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
++ =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
++ bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
++ =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
++ bit[3]=0: VBIOS fast boot is disable
++ =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
++ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
++ =1: TMDS/HDMI Coherent Mode use signel PLL mode.
++ bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
++ =1: DP mode use single PLL mode
++ bit[3]=0: Enable AUX HW mode detection logic
++ =1: Disable AUX HW mode detection logic
++
++ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
++
++usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
++ Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
++
++ When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
++ 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
++ VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
++ Changing BL using VBIOS function is functional in both driver and non-driver present environment;
++ and enabling VariBri under the driver environment from PP table is optional.
++
++ 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
++ that BL control from GPU is expected.
++ VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
++ Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
++ it's per platform
++ and enabling VariBri under the driver environment from PP table is optional.
++
++ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
++ Threshold on value to enter HTC_active state.
++ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
++ To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
++ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
++ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
++ =1: PCIE Power Gating Enabled
++ Bit[1]=0: DDR-DLL shut-down feature disabled.
++ 1: DDR-DLL shut-down feature enabled.
++ Bit[2]=0: DDR-PLL Power down feature disabled.
++ 1: DDR-PLL Power down feature enabled.
++ulCPUCapInfo: TBD
++usNBP0Voltage: VID for voltage on NB P0 State
++usNBP1Voltage: VID for voltage on NB P1 State
++usNBP2Voltage: VID for voltage on NB P2 State
++usNBP3Voltage: VID for voltage on NB P3 State
++usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
++usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
++usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
++ to indicate a range.
++ SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
++ SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
++ SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
++ SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
++ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
++ucUMAChannelNumber: System memory channel numbers.
++ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
++ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
++ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
++sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+@@ -4398,6 +4937,41 @@ usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%;
+ usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+ usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+ usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
++ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
++ [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
++ [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
++ [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
++ [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
++ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
++ =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++ucLVDSPwrOnDEtoVARY_BL_in4Ms: LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
++ =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffVARY_BLtoDE_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
++ =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffDEtoDIGON_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
++ =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
++ =0 means to use VBIOS default delay which is 125 ( 500ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
++ =0 means to use VBIOS default delay which is 0 ( 0ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
++ =0 means to use VBIOS default delay which is 0 ( 0ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
++
+ **********************************************************************************************************************/
+
+ /**************************************************************************/
+@@ -4459,6 +5033,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+ #define ASIC_INTERNAL_SS_ON_DP 7
+ #define ASIC_INTERNAL_SS_ON_DCPLL 8
+ #define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
++#define ASIC_INTERNAL_VCE_SS 10
+
+ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+ {
+@@ -4520,7 +5095,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+ #define ATOM_DOS_MODE_INFO_DEF 7
+ #define ATOM_I2C_CHANNEL_STATUS_DEF 8
+ #define ATOM_I2C_CHANNEL_STATUS1_DEF 9
+-
++#define ATOM_INTERNAL_TIMER_DEF 10
+
+ // BIOS_0_SCRATCH Definition
+ #define ATOM_S0_CRT1_MONO 0x00000001L
+@@ -4648,6 +5223,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+ #define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
+ #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
+ #define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
++#define ATOM_S2_TMDS_COHERENT_MODEb3 0x10 // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
+ #define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
+ #define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
+
+@@ -5038,6 +5614,23 @@ typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+ USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
+ }ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
++typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
++{
++ USHORT usHight; // Image Hight
++ USHORT usWidth; // Image Width
++ USHORT usGraphPitch;
++ UCHAR ucColorDepth;
++ UCHAR ucPixelFormat;
++ UCHAR ucSurface; // Surface 1 or 2
++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
++ UCHAR ucModeType;
++ UCHAR ucReserved;
++}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
++
++// ucEnable
++#define ATOM_GRAPH_CONTROL_SET_PITCH 0x0f
++#define ATOM_GRAPH_CONTROL_SET_DISP_START 0x10
++
+ typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+ {
+ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+@@ -5057,6 +5650,58 @@ typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+ USHORT usY_Size;
+ }GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+
++typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
++{
++ union{
++ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
++ USHORT usSurface;
++ };
++ USHORT usY_Size;
++ USHORT usDispXStart;
++ USHORT usDispYStart;
++}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2;
++
++
++typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3
++{
++ UCHAR ucLutId;
++ UCHAR ucAction;
++ USHORT usLutStartIndex;
++ USHORT usLutLength;
++ USHORT usLutOffsetInVram;
++}PALETTE_DATA_CONTROL_PARAMETERS_V3;
++
++// ucAction:
++#define PALETTE_DATA_AUTO_FILL 1
++#define PALETTE_DATA_READ 2
++#define PALETTE_DATA_WRITE 3
++
++
++typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
++{
++ UCHAR ucInterruptId;
++ UCHAR ucServiceId;
++ UCHAR ucStatus;
++ UCHAR ucReserved;
++}INTERRUPT_SERVICE_PARAMETER_V2;
++
++// ucInterruptId
++#define HDP1_INTERRUPT_ID 1
++#define HDP2_INTERRUPT_ID 2
++#define HDP3_INTERRUPT_ID 3
++#define HDP4_INTERRUPT_ID 4
++#define HDP5_INTERRUPT_ID 5
++#define HDP6_INTERRUPT_ID 6
++#define SW_INTERRUPT_ID 11
++
++// ucAction
++#define INTERRUPT_SERVICE_GEN_SW_INT 1
++#define INTERRUPT_SERVICE_GET_STATUS 2
++
++ // ucStatus
++#define INTERRUPT_STATUS__INT_TRIGGER 1
++#define INTERRUPT_STATUS__HPD_HIGH 2
++
+ typedef struct _INDIRECT_IO_ACCESS
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+@@ -5189,7 +5834,7 @@ typedef struct _ATOM_INIT_REG_BLOCK{
+
+ #define END_OF_REG_INDEX_BLOCK 0x0ffff
+ #define END_OF_REG_DATA_BLOCK 0x00000000
+-#define ATOM_INIT_REG_MASK_FLAG 0x80
++#define ATOM_INIT_REG_MASK_FLAG 0x80 //Not used in BIOS
+ #define CLOCK_RANGE_HIGHEST 0x00ffffff
+
+ #define VALUE_DWORD SIZEOF ULONG
+@@ -5229,6 +5874,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
+ #define _128Mx8 0x51
+ #define _128Mx16 0x52
+ #define _256Mx8 0x61
++#define _256Mx16 0x62
+
+ #define SAMSUNG 0x1
+ #define INFINEON 0x2
+@@ -5585,7 +6231,7 @@ typedef struct _ATOM_VRAM_MODULE_V7
+ ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
+ USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
+ USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+- USHORT usReserved;
++ USHORT usEnableChannels; // bit vector which indicate which channels are enabled
+ UCHAR ucExtMemoryID; // Current memory module ID
+ UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+ UCHAR ucChannelNum; // Number of mem. channels supported in this module
+@@ -5597,7 +6243,8 @@ typedef struct _ATOM_VRAM_MODULE_V7
+ UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+- UCHAR ucReserved[3];
++ USHORT usSEQSettingOffset;
++ UCHAR ucReserved;
+ // Memory Module specific values
+ USHORT usEMRS2Value; // EMRS2/MR2 Value.
+ USHORT usEMRS3Value; // EMRS3/MR3 Value.
+@@ -5633,10 +6280,10 @@ typedef struct _ATOM_VRAM_INFO_V3
+ typedef struct _ATOM_VRAM_INFO_V4
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+- USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+- USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+- USHORT usRerseved;
+- UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
++ USHORT usRerseved;
++ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+ UCHAR ucReservde[4];
+ UCHAR ucNumOfVRAMModule;
+@@ -5648,9 +6295,10 @@ typedef struct _ATOM_VRAM_INFO_V4
+ typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+- USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+- USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+- USHORT usReserved[4];
++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
++ USHORT usPerBytePresetOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
++ USHORT usReserved[3];
+ UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
+ UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
+ UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
+@@ -5935,6 +6583,52 @@ typedef struct _ATOM_DISP_OUT_INFO_V2
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+ }ATOM_DISP_OUT_INFO_V2;
+
++
++typedef struct _ATOM_DISP_CLOCK_ID {
++ UCHAR ucPpllId;
++ UCHAR ucPpllAttribute;
++}ATOM_DISP_CLOCK_ID;
++
++// ucPpllAttribute
++#define CLOCK_SOURCE_SHAREABLE 0x01
++#define CLOCK_SOURCE_DP_MODE 0x02
++#define CLOCK_SOURCE_NONE_DP_MODE 0x04
++
++//DispOutInfoTable
++typedef struct _ASIC_TRANSMITTER_INFO_V2
++{
++ USHORT usTransmitterObjId;
++ USHORT usDispClkIdOffset; // point to clock source id list supported by Encoder Object
++ UCHAR ucTransmitterCmdTblId;
++ UCHAR ucConfig;
++ UCHAR ucEncoderID; // available 1st encoder ( default )
++ UCHAR ucOptionEncoderID; // available 2nd encoder ( optional )
++ UCHAR uc2ndEncoderID;
++ UCHAR ucReserved;
++}ASIC_TRANSMITTER_INFO_V2;
++
++typedef struct _ATOM_DISP_OUT_INFO_V3
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ USHORT ptrTransmitterInfo;
++ USHORT ptrEncoderInfo;
++ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
++ USHORT usReserved;
++ UCHAR ucDCERevision;
++ UCHAR ucMaxDispEngineNum;
++ UCHAR ucMaxActiveDispEngineNum;
++ UCHAR ucMaxPPLLNum;
++ UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
++ UCHAR ucReserved[3];
++ ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
++}ATOM_DISP_OUT_INFO_V3;
++
++typedef enum CORE_REF_CLK_SOURCE{
++ CLOCK_SRC_XTALIN=0,
++ CLOCK_SRC_XO_IN=1,
++ CLOCK_SRC_XO_IN2=2,
++}CORE_REF_CLK_SOURCE;
++
+ // DispDevicePriorityInfo
+ typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+ {
+@@ -6070,6 +6764,39 @@ typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+ #define HW_I2C_READ 0
+ #define I2C_2BYTE_ADDR 0x02
+
++/****************************************************************************/
++// Structures used by HW_Misc_OperationTable
++/****************************************************************************/
++typedef struct _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1
++{
++ UCHAR ucCmd; // Input: To tell which action to take
++ UCHAR ucReserved[3];
++ ULONG ulReserved;
++}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1;
++
++typedef struct _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1
++{
++ UCHAR ucReturnCode; // Output: Return value base on action was taken
++ UCHAR ucReserved[3];
++ ULONG ulReserved;
++}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
++
++// Actions code
++#define ATOM_GET_SDI_SUPPORT 0xF0
++
++// Return code
++#define ATOM_UNKNOWN_CMD 0
++#define ATOM_FEATURE_NOT_SUPPORTED 1
++#define ATOM_FEATURE_SUPPORTED 2
++
++typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
++{
++ ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 sInput_Output;
++ PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS sReserved;
++}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
++
++/****************************************************************************/
++
+ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+ {
+ UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
+@@ -6090,6 +6817,52 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+ #define SELECT_CRTC_PIXEL_RATE 7
+ #define SELECT_VGA_BLK 8
+
++// DIGTransmitterInfoTable structure used to program UNIPHY settings
++typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
++ USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
++ USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
++ USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
++ USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
++}DIG_TRANSMITTER_INFO_HEADER_V3_1;
++
++typedef struct _CLOCK_CONDITION_REGESTER_INFO{
++ USHORT usRegisterIndex;
++ UCHAR ucStartBit;
++ UCHAR ucEndBit;
++}CLOCK_CONDITION_REGESTER_INFO;
++
++typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
++ USHORT usMaxClockFreq;
++ UCHAR ucEncodeMode;
++ UCHAR ucPhySel;
++ ULONG ulAnalogSetting[1];
++}CLOCK_CONDITION_SETTING_ENTRY;
++
++typedef struct _CLOCK_CONDITION_SETTING_INFO{
++ USHORT usEntrySize;
++ CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
++}CLOCK_CONDITION_SETTING_INFO;
++
++typedef struct _PHY_CONDITION_REG_VAL{
++ ULONG ulCondition;
++ ULONG ulRegVal;
++}PHY_CONDITION_REG_VAL;
++
++typedef struct _PHY_CONDITION_REG_INFO{
++ USHORT usRegIndex;
++ USHORT usSize;
++ PHY_CONDITION_REG_VAL asRegVal[1];
++}PHY_CONDITION_REG_INFO;
++
++typedef struct _PHY_ANALOG_SETTING_INFO{
++ UCHAR ucEncodeMode;
++ UCHAR ucPhySel;
++ USHORT usSize;
++ PHY_CONDITION_REG_INFO asAnalogSetting[1];
++}PHY_ANALOG_SETTING_INFO;
++
+ /****************************************************************************/
+ //Portion VI: Definitinos for vbios MC scratch registers that driver used
+ /****************************************************************************/
+@@ -7020,4 +7793,68 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+
+ #pragma pack() // BIOS data must use byte aligment
+
++//
++// AMD ACPI Table
++//
++#pragma pack(1)
++
++typedef struct {
++ ULONG Signature;
++ ULONG TableLength; //Length
++ UCHAR Revision;
++ UCHAR Checksum;
++ UCHAR OemId[6];
++ UCHAR OemTableId[8]; //UINT64 OemTableId;
++ ULONG OemRevision;
++ ULONG CreatorId;
++ ULONG CreatorRevision;
++} AMD_ACPI_DESCRIPTION_HEADER;
++/*
++//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
++typedef struct {
++ UINT32 Signature; //0x0
++ UINT32 Length; //0x4
++ UINT8 Revision; //0x8
++ UINT8 Checksum; //0x9
++ UINT8 OemId[6]; //0xA
++ UINT64 OemTableId; //0x10
++ UINT32 OemRevision; //0x18
++ UINT32 CreatorId; //0x1C
++ UINT32 CreatorRevision; //0x20
++}EFI_ACPI_DESCRIPTION_HEADER;
++*/
++typedef struct {
++ AMD_ACPI_DESCRIPTION_HEADER SHeader;
++ UCHAR TableUUID[16]; //0x24
++ ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
++ ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
++ ULONG Reserved[4]; //0x3C
++}UEFI_ACPI_VFCT;
++
++typedef struct {
++ ULONG PCIBus; //0x4C
++ ULONG PCIDevice; //0x50
++ ULONG PCIFunction; //0x54
++ USHORT VendorID; //0x58
++ USHORT DeviceID; //0x5A
++ USHORT SSVID; //0x5C
++ USHORT SSID; //0x5E
++ ULONG Revision; //0x60
++ ULONG ImageLength; //0x64
++}VFCT_IMAGE_HEADER;
++
++
++typedef struct {
++ VFCT_IMAGE_HEADER VbiosHeader;
++ UCHAR VbiosContent[1];
++}GOP_VBIOS_CONTENT;
++
++typedef struct {
++ VFCT_IMAGE_HEADER Lib1Header;
++ UCHAR Lib1Content[1];
++}GOP_LIB1_CONTENT;
++
++#pragma pack()
++
++
+ #endif /* _ATOMBIOS_H */
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 8227e76..28e69e9 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -123,21 +123,6 @@ struct radeon_device;
+ /*
+ * BIOS.
+ */
+-#define ATRM_BIOS_PAGE 4096
+-
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool radeon_atrm_supported(struct pci_dev *pdev);
+-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
+-#else
+-static inline bool radeon_atrm_supported(struct pci_dev *pdev)
+-{
+- return false;
+-}
+-
+-static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
+- return -EINVAL;
+-}
+-#endif
+ bool radeon_get_bios(struct radeon_device *rdev);
+
+
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 9d2c369..38585c5 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -446,7 +446,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ }
+
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+- if ((dev->pdev->device == 0x9802) &&
++ if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 9d95792..2a2cf0b 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -30,56 +30,8 @@ static struct radeon_atpx_priv {
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ acpi_handle atpx_handle;
+- acpi_handle atrm_handle;
+ } radeon_atpx_priv;
+
+-/* retrieve the ROM in 4k blocks */
+-static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+- int offset, int len)
+-{
+- acpi_status status;
+- union acpi_object atrm_arg_elements[2], *obj;
+- struct acpi_object_list atrm_arg;
+- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+-
+- atrm_arg.count = 2;
+- atrm_arg.pointer = &atrm_arg_elements[0];
+-
+- atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+- atrm_arg_elements[0].integer.value = offset;
+-
+- atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+- atrm_arg_elements[1].integer.value = len;
+-
+- status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+- if (ACPI_FAILURE(status)) {
+- printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+- return -ENODEV;
+- }
+-
+- obj = (union acpi_object *)buffer.pointer;
+- memcpy(bios+offset, obj->buffer.pointer, len);
+- kfree(buffer.pointer);
+- return len;
+-}
+-
+-bool radeon_atrm_supported(struct pci_dev *pdev)
+-{
+- /* get the discrete ROM only via ATRM */
+- if (!radeon_atpx_priv.atpx_detected)
+- return false;
+-
+- if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+- return false;
+- return true;
+-}
+-
+-
+-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
+-{
+- return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
+-}
+-
+ static int radeon_atpx_get_version(acpi_handle handle)
+ {
+ acpi_status status;
+@@ -197,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+
+ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+ {
+- acpi_handle dhandle, atpx_handle, atrm_handle;
++ acpi_handle dhandle, atpx_handle;
+ acpi_status status;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+@@ -208,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+ if (ACPI_FAILURE(status))
+ return false;
+
+- status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+- if (ACPI_FAILURE(status))
+- return false;
+-
+ radeon_atpx_priv.dhandle = dhandle;
+ radeon_atpx_priv.atpx_handle = atpx_handle;
+- radeon_atpx_priv.atrm_handle = atrm_handle;
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 229a20f..d306cc8 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -32,6 +32,7 @@
+
+ #include <linux/vga_switcheroo.h>
+ #include <linux/slab.h>
++#include <linux/acpi.h>
+ /*
+ * BIOS.
+ */
+@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
+ return true;
+ }
+
++#ifdef CONFIG_ACPI
+ /* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
++/* retrieve the ROM in 4k blocks */
++#define ATRM_BIOS_PAGE 4096
++/**
++ * radeon_atrm_call - fetch a chunk of the vbios
++ *
++ * @atrm_handle: acpi ATRM handle
++ * @bios: vbios image pointer
++ * @offset: offset of vbios image data to fetch
++ * @len: length of vbios image data to fetch
++ *
++ * Executes ATRM to fetch a chunk of the discrete
++ * vbios image on PX systems (all asics).
++ * Returns the length of the buffer fetched.
++ */
++static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
++ int offset, int len)
++{
++ acpi_status status;
++ union acpi_object atrm_arg_elements[2], *obj;
++ struct acpi_object_list atrm_arg;
++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
++
++ atrm_arg.count = 2;
++ atrm_arg.pointer = &atrm_arg_elements[0];
++
++ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
++ atrm_arg_elements[0].integer.value = offset;
++
++ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
++ atrm_arg_elements[1].integer.value = len;
++
++ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
++ if (ACPI_FAILURE(status)) {
++ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
++ return -ENODEV;
++ }
++
++ obj = (union acpi_object *)buffer.pointer;
++ memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
++ len = obj->buffer.length;
++ kfree(buffer.pointer);
++ return len;
++}
++
+ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ {
+ int ret;
+ int size = 256 * 1024;
+ int i;
++ struct pci_dev *pdev = NULL;
++ acpi_handle dhandle, atrm_handle;
++ acpi_status status;
++ bool found = false;
++
++ /* ATRM is for the discrete card only */
++ if (rdev->flags & RADEON_IS_IGP)
++ return false;
++
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
++ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ continue;
++
++ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++ if (!ACPI_FAILURE(status)) {
++ found = true;
++ break;
++ }
++ }
+
+- if (!radeon_atrm_supported(rdev->pdev))
++ if (!found)
+ return false;
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+@@ -117,10 +183,11 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ }
+
+ for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+- ret = radeon_atrm_get_bios_chunk(rdev->bios,
+- (i * ATRM_BIOS_PAGE),
+- ATRM_BIOS_PAGE);
+- if (ret <= 0)
++ ret = radeon_atrm_call(atrm_handle,
++ rdev->bios,
++ (i * ATRM_BIOS_PAGE),
++ ATRM_BIOS_PAGE);
++ if (ret < ATRM_BIOS_PAGE)
+ break;
+ }
+
+@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ }
+ return true;
+ }
++#else
++static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
++{
++ return false;
++}
++#endif
+
+ static bool ni_read_disabled_bios(struct radeon_device *rdev)
+ {
+@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+ return legacy_read_disabled_bios(rdev);
+ }
+
++#ifdef CONFIG_ACPI
++static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
++{
++ bool ret = false;
++ struct acpi_table_header *hdr;
++ acpi_size tbl_size;
++ UEFI_ACPI_VFCT *vfct;
++ GOP_VBIOS_CONTENT *vbios;
++ VFCT_IMAGE_HEADER *vhdr;
++
++ if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
++ return false;
++ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
++ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
++ goto out_unmap;
++ }
++
++ vfct = (UEFI_ACPI_VFCT *)hdr;
++ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
++ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
++ goto out_unmap;
++ }
++
++ vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
++ vhdr = &vbios->VbiosHeader;
++ DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
++ vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
++ vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
++
++ if (vhdr->PCIBus != rdev->pdev->bus->number ||
++ vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
++ vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
++ vhdr->VendorID != rdev->pdev->vendor ||
++ vhdr->DeviceID != rdev->pdev->device) {
++ DRM_INFO("ACPI VFCT table is not for this card\n");
++ goto out_unmap;
++ };
++
++ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
++ DRM_ERROR("ACPI VFCT image truncated\n");
++ goto out_unmap;
++ }
++
++ rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
++ ret = !!rdev->bios;
++
++out_unmap:
++ return ret;
++}
++#else
++static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
++{
++ return false;
++}
++#endif
+
+ bool radeon_get_bios(struct radeon_device *rdev)
+ {
+@@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
+
+ r = radeon_atrm_get_bios(rdev);
+ if (r == false)
++ r = radeon_acpi_vfct_bios(rdev);
++ if (r == false)
+ r = igp_read_bios_from_vram(rdev);
+ if (r == false)
+ r = radeon_read_bios(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 39497c7..f3ae607 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -117,6 +117,7 @@ int radeon_bo_create(struct radeon_device *rdev,
+ return -ENOMEM;
+ }
+
++retry:
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+@@ -129,8 +130,6 @@ int radeon_bo_create(struct radeon_device *rdev,
+ bo->gem_base.driver_private = NULL;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+-
+-retry:
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
+diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
+index b99af34..a2abb8e 100644
+--- a/drivers/hid/hid-chicony.c
++++ b/drivers/hid/hid-chicony.c
+@@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ static const struct hid_device_id ch_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ch_devices);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 95430a0..5cc029f 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1398,12 +1398,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
+index 2f0be4c..9e43aac 100644
+--- a/drivers/hid/hid-cypress.c
++++ b/drivers/hid/hid-cypress.c
+@@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
++ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
+ .driver_data = CP_2WHEEL_MOUSE_HACK },
+ { }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 7db934d..e4317a2 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -196,6 +196,7 @@
+ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
+ #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
+ #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
++#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
+
+ #define USB_VENDOR_ID_CHUNGHWAT 0x2247
+ #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
+@@ -225,6 +226,7 @@
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
++#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
+ #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
+
+ #define USB_VENDOR_ID_DEALEXTREAME 0x10c5
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 0bfa545..c76b051 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -568,24 +568,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
+ scmnd->sc_data_direction);
+ }
+
+-static void srp_remove_req(struct srp_target_port *target,
+- struct srp_request *req, s32 req_lim_delta)
++/**
++ * srp_claim_req - Take ownership of the scmnd associated with a request.
++ * @target: SRP target port.
++ * @req: SRP request.
++ * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
++ * ownership of @req->scmnd if it equals @scmnd.
++ *
++ * Return value:
++ * Either NULL or a pointer to the SCSI command the caller became owner of.
++ */
++static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
++ struct srp_request *req,
++ struct scsi_cmnd *scmnd)
+ {
+ unsigned long flags;
+
+- srp_unmap_data(req->scmnd, target, req);
++ spin_lock_irqsave(&target->lock, flags);
++ if (!scmnd) {
++ scmnd = req->scmnd;
++ req->scmnd = NULL;
++ } else if (req->scmnd == scmnd) {
++ req->scmnd = NULL;
++ } else {
++ scmnd = NULL;
++ }
++ spin_unlock_irqrestore(&target->lock, flags);
++
++ return scmnd;
++}
++
++/**
++ * srp_free_req() - Unmap data and add request to the free request list.
++ */
++static void srp_free_req(struct srp_target_port *target,
++ struct srp_request *req, struct scsi_cmnd *scmnd,
++ s32 req_lim_delta)
++{
++ unsigned long flags;
++
++ srp_unmap_data(scmnd, target, req);
++
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += req_lim_delta;
+- req->scmnd = NULL;
+ list_add_tail(&req->list, &target->free_reqs);
+ spin_unlock_irqrestore(&target->lock, flags);
+ }
+
+ static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
+ {
+- req->scmnd->result = DID_RESET << 16;
+- req->scmnd->scsi_done(req->scmnd);
+- srp_remove_req(target, req, 0);
++ struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
++
++ if (scmnd) {
++ scmnd->result = DID_RESET << 16;
++ scmnd->scsi_done(scmnd);
++ srp_free_req(target, req, scmnd, 0);
++ }
+ }
+
+ static int srp_reconnect_target(struct srp_target_port *target)
+@@ -1055,11 +1093,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
+ complete(&target->tsk_mgmt_done);
+ } else {
+ req = &target->req_ring[rsp->tag];
+- scmnd = req->scmnd;
+- if (!scmnd)
++ scmnd = srp_claim_req(target, req, NULL);
++ if (!scmnd) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Null scmnd for RSP w/tag %016llx\n",
+ (unsigned long long) rsp->tag);
++
++ spin_lock_irqsave(&target->lock, flags);
++ target->req_lim += be32_to_cpu(rsp->req_lim_delta);
++ spin_unlock_irqrestore(&target->lock, flags);
++
++ return;
++ }
+ scmnd->result = rsp->status;
+
+ if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
+@@ -1074,7 +1119,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
+ else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
+
+- srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
++ srp_free_req(target, req, scmnd,
++ be32_to_cpu(rsp->req_lim_delta));
++
+ scmnd->host_scribble = NULL;
+ scmnd->scsi_done(scmnd);
+ }
+@@ -1613,25 +1660,17 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ {
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
+- int ret = SUCCESS;
+
+ shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+
+- if (!req || target->qp_in_error)
+- return FAILED;
+- if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+- SRP_TSK_ABORT_TASK))
++ if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
+ return FAILED;
++ srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
++ SRP_TSK_ABORT_TASK);
++ srp_free_req(target, req, scmnd, 0);
++ scmnd->result = DID_ABORT << 16;
+
+- if (req->scmnd) {
+- if (!target->tsk_mgmt_status) {
+- srp_remove_req(target, req, 0);
+- scmnd->result = DID_ABORT << 16;
+- } else
+- ret = FAILED;
+- }
+-
+- return ret;
++ return SUCCESS;
+ }
+
+ static int srp_reset_device(struct scsi_cmnd *scmnd)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index d8646d7..2887f22 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1144,8 +1144,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+ ret = 0;
+ }
+ rdev->sectors = rdev->sb_start;
+- /* Limit to 4TB as metadata cannot record more than that */
+- if (rdev->sectors >= (2ULL << 32))
++ /* Limit to 4TB as metadata cannot record more than that.
++ * (not needed for Linear and RAID0 as metadata doesn't
++ * record this size)
++ */
++ if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
+ rdev->sectors = (2ULL << 32) - 2;
+
+ if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
+@@ -1427,7 +1430,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
+ /* Limit to 4TB as metadata cannot record more than that.
+ * 4TB == 2^32 KB, or 2*2^32 sectors.
+ */
+- if (num_sectors >= (2ULL << 32))
++ if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
+ num_sectors = (2ULL << 32) - 2;
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
+index fb68805..027550d 100644
+--- a/drivers/media/dvb/siano/smsusb.c
++++ b/drivers/media/dvb/siano/smsusb.c
+@@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf)
+ return 0;
+ }
+
+-static const struct usb_device_id smsusb_id_table[] __devinitconst = {
++static const struct usb_device_id smsusb_id_table[] = {
+ { USB_DEVICE(0x187f, 0x0010),
+ .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
+ { USB_DEVICE(0x187f, 0x0100),
+diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
+index 89fec4c..731cd16 100644
+--- a/drivers/media/video/gspca/spca506.c
++++ b/drivers/media/video/gspca/spca506.c
+@@ -685,7 +685,7 @@ static const struct sd_desc sd_desc = {
+ };
+
+ /* -- module initialisation -- */
+-static const struct usb_device_id device_table[] __devinitconst = {
++static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x06e1, 0xa190)},
+ /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
+ {USB_DEVICE(0x0733, 0x0430)}, */
+diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
+index 17bbacb..cc2ae7e 100644
+--- a/drivers/misc/sgi-xp/xpc_uv.c
++++ b/drivers/misc/sgi-xp/xpc_uv.c
+@@ -18,6 +18,8 @@
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
+ #include <asm/uv/uv_hub.h>
+@@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
+ XPC_NOTIFY_MSG_SIZE_UV)
+ #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
+
++static int xpc_mq_node = -1;
++
+ static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
+ static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
+
+@@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
+ #if defined CONFIG_X86_64
+ mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
+ UV_AFFINITY_CPU);
+- if (mq->irq < 0) {
+- dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
+- -mq->irq);
++ if (mq->irq < 0)
+ return mq->irq;
+- }
+
+ mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
+
+@@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
+ mq->mmr_blade = uv_cpu_to_blade_id(cpu);
+
+ nid = cpu_to_node(cpu);
+- page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+- pg_order);
++ page = alloc_pages_exact_node(nid,
++ GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
++ pg_order);
+ if (page == NULL) {
+ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
+ "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
+@@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
+ };
+
++static int
++xpc_init_mq_node(int nid)
++{
++ int cpu;
++
++ get_online_cpus();
++
++ for_each_cpu(cpu, cpumask_of_node(nid)) {
++ xpc_activate_mq_uv =
++ xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
++ XPC_ACTIVATE_IRQ_NAME,
++ xpc_handle_activate_IRQ_uv);
++ if (!IS_ERR(xpc_activate_mq_uv))
++ break;
++ }
++ if (IS_ERR(xpc_activate_mq_uv)) {
++ put_online_cpus();
++ return PTR_ERR(xpc_activate_mq_uv);
++ }
++
++ for_each_cpu(cpu, cpumask_of_node(nid)) {
++ xpc_notify_mq_uv =
++ xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
++ XPC_NOTIFY_IRQ_NAME,
++ xpc_handle_notify_IRQ_uv);
++ if (!IS_ERR(xpc_notify_mq_uv))
++ break;
++ }
++ if (IS_ERR(xpc_notify_mq_uv)) {
++ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
++ put_online_cpus();
++ return PTR_ERR(xpc_notify_mq_uv);
++ }
++
++ put_online_cpus();
++ return 0;
++}
++
+ int
+ xpc_init_uv(void)
+ {
++ int nid;
++ int ret = 0;
++
+ xpc_arch_ops = xpc_arch_ops_uv;
+
+ if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
+@@ -1742,21 +1785,21 @@ xpc_init_uv(void)
+ return -E2BIG;
+ }
+
+- xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
+- XPC_ACTIVATE_IRQ_NAME,
+- xpc_handle_activate_IRQ_uv);
+- if (IS_ERR(xpc_activate_mq_uv))
+- return PTR_ERR(xpc_activate_mq_uv);
++ if (xpc_mq_node < 0)
++ for_each_online_node(nid) {
++ ret = xpc_init_mq_node(nid);
+
+- xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
+- XPC_NOTIFY_IRQ_NAME,
+- xpc_handle_notify_IRQ_uv);
+- if (IS_ERR(xpc_notify_mq_uv)) {
+- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+- return PTR_ERR(xpc_notify_mq_uv);
+- }
++ if (!ret)
++ break;
++ }
++ else
++ ret = xpc_init_mq_node(xpc_mq_node);
+
+- return 0;
++ if (ret < 0)
++ dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
++ -ret);
++
++ return ret;
+ }
+
+ void
+@@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
+ xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ }
++
++module_param(xpc_mq_node, int, 0);
++MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
+diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
+index e888202..01b104e 100644
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -652,7 +652,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ flags);
+ dev_put(nt->np.dev);
+ nt->np.dev = NULL;
+- netconsole_target_put(nt);
+ }
+ nt->enabled = 0;
+ stopped = true;
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index e6d791c..b4cbc82 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -1782,7 +1782,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hdr *hdr;
+ int retval;
+- bool decrypt_error = false;
+ struct ath_rx_status rs;
+ enum ath9k_rx_qtype qtype;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+@@ -1804,6 +1803,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+ tsf_lower = tsf & 0xffffffff;
+
+ do {
++ bool decrypt_error = false;
+ /* If handling rx interrupt and flush is in progress => exit */
+ if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
+ break;
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 9b60968..8a009bc 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb");
+ * whenever you add a new device.
+ */
+
+-static struct usb_device_id p54u_table[] __devinitdata = {
++static struct usb_device_id p54u_table[] = {
+ /* Version 1 devices (pci chip + net2280) */
+ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
+ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
+index 4a78f9e..4e98c39 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
++++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
+@@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+ MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
+ MODULE_LICENSE("GPL");
+
+-static struct usb_device_id rtl8187_table[] __devinitdata = {
++static struct usb_device_id rtl8187_table[] = {
+ /* Asus */
+ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
+ /* Belkin */
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index d024f83..68af94c 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -952,6 +952,13 @@ static int pci_pm_poweroff_noirq(struct device *dev)
+ if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+
++ /*
++ * The reason for doing this here is the same as for the analogous code
++ * in pci_pm_suspend_noirq().
++ */
++ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
++ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index b0859d4..ec5b17f 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -86,6 +86,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, 0x8A, { KEY_PROG1 } },
+ { KE_KEY, 0x95, { KEY_MEDIA } },
+ { KE_KEY, 0x99, { KEY_PHONE } },
++ { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
++ { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
++ { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
++ { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
+ { KE_KEY, 0xb5, { KEY_CALC } },
+ { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
+ { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 30d2072..33471e1 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -439,6 +439,9 @@ static void tsi721_db_dpc(struct work_struct *work)
+ " info %4.4x\n", DBELL_SID(idb.bytes),
+ DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
+ }
++
++ wr_ptr = ioread32(priv->regs +
++ TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+ }
+
+ iowrite32(rd_ptr & (IDB_QSIZE - 1),
+@@ -449,6 +452,10 @@ static void tsi721_db_dpc(struct work_struct *work)
+ regval |= TSI721_SR_CHINT_IDBQRCV;
+ iowrite32(regval,
+ priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
++
++ wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
++ if (wr_ptr != rd_ptr)
++ schedule_work(&priv->idb_work);
+ }
+
+ /**
+@@ -2155,7 +2162,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
+ struct tsi721_device *priv;
+- int i, cap;
++ int cap;
+ int err;
+ u32 regval;
+
+@@ -2175,12 +2182,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
+ priv->pdev = pdev;
+
+ #ifdef DEBUG
++ {
++ int i;
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
+ i, (unsigned long long)pci_resource_start(pdev, i),
+ (unsigned long)pci_resource_len(pdev, i),
+ pci_resource_flags(pdev, i));
+ }
++ }
+ #endif
+ /*
+ * Verify BAR configuration
+diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
+index 971bc8e..11bcb20 100644
+--- a/drivers/rtc/rtc-rs5c348.c
++++ b/drivers/rtc/rtc-rs5c348.c
+@@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
+ tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
+ if (!pdata->rtc_24h) {
+- tm->tm_hour %= 12;
+- if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
++ if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
++ tm->tm_hour -= 20;
++ tm->tm_hour %= 12;
+ tm->tm_hour += 12;
++ } else
++ tm->tm_hour %= 12;
+ }
+ tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
+ tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
+diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
+index 8be5604..0d70f68 100644
+--- a/drivers/staging/speakup/main.c
++++ b/drivers/staging/speakup/main.c
+@@ -1854,7 +1854,7 @@ static void speakup_bits(struct vc_data *vc)
+
+ static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
+ {
+- static u_char *goto_buf = "\0\0\0\0\0\0";
++ static u_char goto_buf[8];
+ static int num;
+ int maxlen, go_pos;
+ char *cp;
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index 27521b6..ae62d57 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -222,7 +222,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode");
+ // Static vars definitions
+ //
+
+-static struct usb_device_id vt6656_table[] __devinitdata = {
++static struct usb_device_id vt6656_table[] = {
+ {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
+ {}
+ };
+diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
+index f958eb4..3f0ce2b 100644
+--- a/drivers/staging/winbond/wbusb.c
++++ b/drivers/staging/winbond/wbusb.c
+@@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION("0.1");
+
+-static const struct usb_device_id wb35_table[] __devinitconst = {
++static const struct usb_device_id wb35_table[] = {
+ { USB_DEVICE(0x0416, 0x0035) },
+ { USB_DEVICE(0x18E8, 0x6201) },
+ { USB_DEVICE(0x18E8, 0x6206) },
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 94c03d2..597fb9b 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3509,9 +3509,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
+ return 0;
+
+ out:
+- while (i >= 0) {
+- __free_page(sg_page(&cmd->t_data_sg[i]));
++ while (i > 0) {
+ i--;
++ __free_page(sg_page(&cmd->t_data_sg[i]));
+ }
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = NULL;
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index 5acd24a..086f7fe 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -1407,10 +1407,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)
+ static int pmz_poll_get_char(struct uart_port *port)
+ {
+ struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
++ int tries = 2;
+
+- while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0)
+- udelay(5);
+- return read_zsdata(uap);
++ while (tries) {
++ if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0)
++ return read_zsdata(uap);
++ if (tries--)
++ udelay(5);
++ }
++
++ return NO_POLL_CHAR;
+ }
+
+ static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 1094469..dbf7d20 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1043,7 +1043,8 @@ skip_normal_probe:
+ }
+
+
+- if (data_interface->cur_altsetting->desc.bNumEndpoints < 2)
++ if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
++ control_interface->cur_altsetting->desc.bNumEndpoints == 0)
+ return -EINVAL;
+
+ epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
+diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
+index 4e1f0aa..9a2a1ae 100644
+--- a/drivers/usb/gadget/u_ether.c
++++ b/drivers/usb/gadget/u_ether.c
+@@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net)
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb) {
+ struct gether *link = dev->port_usb;
++ const struct usb_endpoint_descriptor *in;
++ const struct usb_endpoint_descriptor *out;
+
+ if (link->close)
+ link->close(link);
+@@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net)
+ * their own pace; the network stack can handle old packets.
+ * For the moment we leave this here, since it works.
+ */
++ in = link->in_ep->desc;
++ out = link->out_ep->desc;
+ usb_ep_disable(link->in_ep);
+ usb_ep_disable(link->out_ep);
+ if (netif_carrier_ok(net)) {
+ DBG(dev, "host still using in/out endpoints\n");
++ link->in_ep->desc = in;
++ link->out_ep->desc = out;
+ usb_ep_enable(link->in_ep);
+ usb_ep_enable(link->out_ep);
+ }
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index daf5754..07c72a4 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -95,6 +95,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_VIA)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 05f82e9..f7c0a2a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -163,7 +163,7 @@ int xhci_reset(struct xhci_hcd *xhci)
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+
+ ret = handshake(xhci, &xhci->op_regs->command,
+- CMD_RESET, 0, 250 * 1000);
++ CMD_RESET, 0, 10 * 1000 * 1000);
+ if (ret)
+ return ret;
+
+@@ -172,7 +172,8 @@ int xhci_reset(struct xhci_hcd *xhci)
+ * xHCI cannot write to any doorbells or operational registers other
+ * than status until the "Controller Not Ready" flag is cleared.
+ */
+- return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
++ return handshake(xhci, &xhci->op_regs->status,
++ STS_CNR, 0, 10 * 1000 * 1000);
+ }
+
+ #ifdef CONFIG_PCI
+diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
+index fc15ad4..723e833 100644
+--- a/drivers/usb/misc/emi62.c
++++ b/drivers/usb/misc/emi62.c
+@@ -259,7 +259,7 @@ wraperr:
+ return err;
+ }
+
+-static const struct usb_device_id id_table[] __devinitconst = {
++static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 4045e39..b3182bb 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -811,6 +811,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
+ { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
+ { USB_DEVICE(PI_VID, PI_E861_PID) },
++ { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
+ { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
+ { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index d27d7d7..54b4258 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -795,6 +795,13 @@
+ #define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
+
+ /*
++ * Kondo Kagaku Co.Ltd.
++ * http://www.kondo-robot.com/EN
++ */
++#define KONDO_VID 0x165c
++#define KONDO_USB_SERIAL_PID 0x0002
++
++/*
+ * Bayer Ascensia Contour blood glucose meter USB-converter cable.
+ * http://winglucofacts.com/cables/
+ */
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 5c7d654..b150ed9 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1191,9 +1191,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
+ }
+
+ spin_lock_irqsave(&mos7840_port->pool_lock, flags);
+- for (i = 0; i < NUM_URBS; ++i)
+- if (mos7840_port->busy[i])
+- chars += URB_TRANSFER_BUFFER_SIZE;
++ for (i = 0; i < NUM_URBS; ++i) {
++ if (mos7840_port->busy[i]) {
++ struct urb *urb = mos7840_port->write_urb_pool[i];
++ chars += urb->transfer_buffer_length;
++ }
++ }
+ spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
+ dbg("%s - returns %d", __func__, chars);
+ return chars;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d89aac1..113560d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -80,84 +80,9 @@ static void option_instat_callback(struct urb *urb);
+ #define OPTION_PRODUCT_GTM380_MODEM 0x7201
+
+ #define HUAWEI_VENDOR_ID 0x12D1
+-#define HUAWEI_PRODUCT_E600 0x1001
+-#define HUAWEI_PRODUCT_E220 0x1003
+-#define HUAWEI_PRODUCT_E220BIS 0x1004
+-#define HUAWEI_PRODUCT_E1401 0x1401
+-#define HUAWEI_PRODUCT_E1402 0x1402
+-#define HUAWEI_PRODUCT_E1403 0x1403
+-#define HUAWEI_PRODUCT_E1404 0x1404
+-#define HUAWEI_PRODUCT_E1405 0x1405
+-#define HUAWEI_PRODUCT_E1406 0x1406
+-#define HUAWEI_PRODUCT_E1407 0x1407
+-#define HUAWEI_PRODUCT_E1408 0x1408
+-#define HUAWEI_PRODUCT_E1409 0x1409
+-#define HUAWEI_PRODUCT_E140A 0x140A
+-#define HUAWEI_PRODUCT_E140B 0x140B
+-#define HUAWEI_PRODUCT_E140C 0x140C
+-#define HUAWEI_PRODUCT_E140D 0x140D
+-#define HUAWEI_PRODUCT_E140E 0x140E
+-#define HUAWEI_PRODUCT_E140F 0x140F
+-#define HUAWEI_PRODUCT_E1410 0x1410
+-#define HUAWEI_PRODUCT_E1411 0x1411
+-#define HUAWEI_PRODUCT_E1412 0x1412
+-#define HUAWEI_PRODUCT_E1413 0x1413
+-#define HUAWEI_PRODUCT_E1414 0x1414
+-#define HUAWEI_PRODUCT_E1415 0x1415
+-#define HUAWEI_PRODUCT_E1416 0x1416
+-#define HUAWEI_PRODUCT_E1417 0x1417
+-#define HUAWEI_PRODUCT_E1418 0x1418
+-#define HUAWEI_PRODUCT_E1419 0x1419
+-#define HUAWEI_PRODUCT_E141A 0x141A
+-#define HUAWEI_PRODUCT_E141B 0x141B
+-#define HUAWEI_PRODUCT_E141C 0x141C
+-#define HUAWEI_PRODUCT_E141D 0x141D
+-#define HUAWEI_PRODUCT_E141E 0x141E
+-#define HUAWEI_PRODUCT_E141F 0x141F
+-#define HUAWEI_PRODUCT_E1420 0x1420
+-#define HUAWEI_PRODUCT_E1421 0x1421
+-#define HUAWEI_PRODUCT_E1422 0x1422
+-#define HUAWEI_PRODUCT_E1423 0x1423
+-#define HUAWEI_PRODUCT_E1424 0x1424
+-#define HUAWEI_PRODUCT_E1425 0x1425
+-#define HUAWEI_PRODUCT_E1426 0x1426
+-#define HUAWEI_PRODUCT_E1427 0x1427
+-#define HUAWEI_PRODUCT_E1428 0x1428
+-#define HUAWEI_PRODUCT_E1429 0x1429
+-#define HUAWEI_PRODUCT_E142A 0x142A
+-#define HUAWEI_PRODUCT_E142B 0x142B
+-#define HUAWEI_PRODUCT_E142C 0x142C
+-#define HUAWEI_PRODUCT_E142D 0x142D
+-#define HUAWEI_PRODUCT_E142E 0x142E
+-#define HUAWEI_PRODUCT_E142F 0x142F
+-#define HUAWEI_PRODUCT_E1430 0x1430
+-#define HUAWEI_PRODUCT_E1431 0x1431
+-#define HUAWEI_PRODUCT_E1432 0x1432
+-#define HUAWEI_PRODUCT_E1433 0x1433
+-#define HUAWEI_PRODUCT_E1434 0x1434
+-#define HUAWEI_PRODUCT_E1435 0x1435
+-#define HUAWEI_PRODUCT_E1436 0x1436
+-#define HUAWEI_PRODUCT_E1437 0x1437
+-#define HUAWEI_PRODUCT_E1438 0x1438
+-#define HUAWEI_PRODUCT_E1439 0x1439
+-#define HUAWEI_PRODUCT_E143A 0x143A
+-#define HUAWEI_PRODUCT_E143B 0x143B
+-#define HUAWEI_PRODUCT_E143C 0x143C
+-#define HUAWEI_PRODUCT_E143D 0x143D
+-#define HUAWEI_PRODUCT_E143E 0x143E
+-#define HUAWEI_PRODUCT_E143F 0x143F
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+-#define HUAWEI_PRODUCT_E14AC 0x14AC
+-#define HUAWEI_PRODUCT_K3806 0x14AE
+ #define HUAWEI_PRODUCT_K4605 0x14C6
+-#define HUAWEI_PRODUCT_K3770 0x14C9
+-#define HUAWEI_PRODUCT_K3771 0x14CA
+-#define HUAWEI_PRODUCT_K4510 0x14CB
+-#define HUAWEI_PRODUCT_K4511 0x14CC
+-#define HUAWEI_PRODUCT_ETS1220 0x1803
+-#define HUAWEI_PRODUCT_E353 0x1506
+-#define HUAWEI_PRODUCT_E173S 0x1C05
+
+ #define QUANTA_VENDOR_ID 0x0408
+ #define QUANTA_PRODUCT_Q101 0xEA02
+@@ -614,101 +539,123 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
++
++
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 8745637..bf9a9b7 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -373,8 +373,15 @@ static void fb_flashcursor(struct work_struct *work)
+ struct vc_data *vc = NULL;
+ int c;
+ int mode;
++ int ret;
++
++ /* FIXME: we should sort out the unbind locking instead */
++ /* instead we just fail to flash the cursor if we can't get
++ * the lock instead of blocking fbcon deinit */
++ ret = console_trylock();
++ if (ret == 0)
++ return;
+
+- console_lock();
+ if (ops && ops->currcon != -1)
+ vc = vc_cons[ops->currcon].d;
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 4115eca..19a4f0b 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -964,7 +964,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
+ /*
+ * Initialise the state of a blockdev page's buffers.
+ */
+-static void
++static sector_t
+ init_page_buffers(struct page *page, struct block_device *bdev,
+ sector_t block, int size)
+ {
+@@ -986,33 +986,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
+ block++;
+ bh = bh->b_this_page;
+ } while (bh != head);
++
++ /*
++ * Caller needs to validate requested block against end of device.
++ */
++ return end_block;
+ }
+
+ /*
+ * Create the page-cache page that contains the requested block.
+ *
+- * This is user purely for blockdev mappings.
++ * This is used purely for blockdev mappings.
+ */
+-static struct page *
++static int
+ grow_dev_page(struct block_device *bdev, sector_t block,
+- pgoff_t index, int size)
++ pgoff_t index, int size, int sizebits)
+ {
+ struct inode *inode = bdev->bd_inode;
+ struct page *page;
+ struct buffer_head *bh;
++ sector_t end_block;
++ int ret = 0; /* Will call free_more_memory() */
+
+ page = find_or_create_page(inode->i_mapping, index,
+ (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
+ if (!page)
+- return NULL;
++ return ret;
+
+ BUG_ON(!PageLocked(page));
+
+ if (page_has_buffers(page)) {
+ bh = page_buffers(page);
+ if (bh->b_size == size) {
+- init_page_buffers(page, bdev, block, size);
+- return page;
++ end_block = init_page_buffers(page, bdev,
++ index << sizebits, size);
++ goto done;
+ }
+ if (!try_to_free_buffers(page))
+ goto failed;
+@@ -1032,15 +1040,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ */
+ spin_lock(&inode->i_mapping->private_lock);
+ link_dev_buffers(page, bh);
+- init_page_buffers(page, bdev, block, size);
++ end_block = init_page_buffers(page, bdev, index << sizebits, size);
+ spin_unlock(&inode->i_mapping->private_lock);
+- return page;
+-
++done:
++ ret = (block < end_block) ? 1 : -ENXIO;
+ failed:
+- BUG();
+ unlock_page(page);
+ page_cache_release(page);
+- return NULL;
++ return ret;
+ }
+
+ /*
+@@ -1050,7 +1057,6 @@ failed:
+ static int
+ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ {
+- struct page *page;
+ pgoff_t index;
+ int sizebits;
+
+@@ -1074,22 +1080,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ bdevname(bdev, b));
+ return -EIO;
+ }
+- block = index << sizebits;
++
+ /* Create a page with the proper size buffers.. */
+- page = grow_dev_page(bdev, block, index, size);
+- if (!page)
+- return 0;
+- unlock_page(page);
+- page_cache_release(page);
+- return 1;
++ return grow_dev_page(bdev, block, index, size, sizebits);
+ }
+
+ static struct buffer_head *
+ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ {
+- int ret;
+- struct buffer_head *bh;
+-
+ /* Size must be multiple of hard sectorsize */
+ if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
+ (size < 512 || size > PAGE_SIZE))) {
+@@ -1102,21 +1100,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ return NULL;
+ }
+
+-retry:
+- bh = __find_get_block(bdev, block, size);
+- if (bh)
+- return bh;
++ for (;;) {
++ struct buffer_head *bh;
++ int ret;
+
+- ret = grow_buffers(bdev, block, size);
+- if (ret == 0) {
+- free_more_memory();
+- goto retry;
+- } else if (ret > 0) {
+ bh = __find_get_block(bdev, block, size);
+ if (bh)
+ return bh;
++
++ ret = grow_buffers(bdev, block, size);
++ if (ret < 0)
++ return NULL;
++ if (ret == 0)
++ free_more_memory();
+ }
+- return NULL;
+ }
+
+ /*
+@@ -1372,10 +1369,6 @@ EXPORT_SYMBOL(__find_get_block);
+ * which corresponds to the passed block_device, block and size. The
+ * returned buffer has its reference count incremented.
+ *
+- * __getblk() cannot fail - it just keeps trying. If you pass it an
+- * illegal block number, __getblk() will happily return a buffer_head
+- * which represents the non-existent block. Very weird.
+- *
+ * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
+ * attempt is failing. FIXME, perhaps?
+ */
+diff --git a/fs/compat.c b/fs/compat.c
+index c987875..e07a3d3 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -1174,11 +1174,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
+ struct file *file;
+ int fput_needed;
+ ssize_t ret;
++ loff_t pos;
+
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ return -EBADF;
+- ret = compat_readv(file, vec, vlen, &file->f_pos);
++ pos = file->f_pos;
++ ret = compat_readv(file, vec, vlen, &pos);
++ file->f_pos = pos;
+ fput_light(file, fput_needed);
+ return ret;
+ }
+@@ -1233,11 +1236,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
+ struct file *file;
+ int fput_needed;
+ ssize_t ret;
++ loff_t pos;
+
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ return -EBADF;
+- ret = compat_writev(file, vec, vlen, &file->f_pos);
++ pos = file->f_pos;
++ ret = compat_writev(file, vec, vlen, &pos);
++ file->f_pos = pos;
+ fput_light(file, fput_needed);
+ return ret;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a071348..f8d5fce 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -904,6 +904,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
+ ei->i_reserved_meta_blocks = 0;
+ ei->i_allocated_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
++ ei->i_da_metadata_calc_last_lblock = 0;
+ spin_lock_init(&(ei->i_block_reservation_lock));
+ #ifdef CONFIG_QUOTA
+ ei->i_reserved_quota = 0;
+@@ -3107,6 +3108,10 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+ int s, j, count = 0;
+
++ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
++ return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
++ sbi->s_itb_per_group + 2);
++
+ first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
+ (grp * EXT4_BLOCKS_PER_GROUP(sb));
+ last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 0c84100..5242006 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1687,7 +1687,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
+ size_t n;
+ u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
+
+- for (n = 0; n < count; n++) {
++ for (n = 0; n < count; n++, iov++) {
+ if (iov->iov_len > (size_t) max)
+ return -ENOMEM;
+ max -= iov->iov_len;
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 3db6b82..d774309 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -38,6 +38,8 @@
+ #include <linux/buffer_head.h> /* various write calls */
+ #include <linux/prefetch.h>
+
++#include "../pnfs.h"
++#include "../internal.h"
+ #include "blocklayout.h"
+
+ #define NFSDBG_FACILITY NFSDBG_PNFS_LD
+@@ -814,7 +816,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
++ max_pages = nfs_page_array_len(0, max_resp_sz);
+ dprintk("%s max_resp_sz %u max_pages %d\n",
+ __func__, max_resp_sz, max_pages);
+
+diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
+index c69682a..4e2ee99 100644
+--- a/fs/nfs/blocklayout/extents.c
++++ b/fs/nfs/blocklayout/extents.c
+@@ -153,7 +153,7 @@ static int _preload_range(struct pnfs_inval_markings *marks,
+ count = (int)(end - start) / (int)tree->mtt_step_size;
+
+ /* Pre-malloc what memory we might need */
+- storage = kmalloc(sizeof(*storage) * count, GFP_NOFS);
++ storage = kcalloc(count, sizeof(*storage), GFP_NOFS);
+ if (!storage)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index ac28990..756f4df 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1103,7 +1103,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
+ struct nfs_fattr *fattr = NULL;
+ int error;
+
+- if (nd->flags & LOOKUP_RCU)
++ if (nd && (nd->flags & LOOKUP_RCU))
+ return -ECHILD;
+
+ parent = dget_parent(dentry);
+@@ -1508,7 +1508,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+ struct nfs_open_context *ctx;
+ int openflags, ret = 0;
+
+- if (nd->flags & LOOKUP_RCU)
++ if (nd && (nd->flags & LOOKUP_RCU))
+ return -ECHILD;
+
+ inode = dentry->d_inode;
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index d4bc9ed9..5195fd6 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -68,7 +68,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
+ nfs_fattr_init(info->fattr);
+ status = rpc_call_sync(client, &msg, 0);
+ dprintk("%s: reply fsinfo: %d\n", __func__, status);
+- if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
++ if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
+ msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
+ msg.rpc_resp = info->fattr;
+ status = rpc_call_sync(client, &msg, 0);
+diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
+index ed388aa..bd5d9cf 100644
+--- a/fs/nfs/nfs4filelayoutdev.c
++++ b/fs/nfs/nfs4filelayoutdev.c
+@@ -721,7 +721,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
++ max_pages = nfs_page_array_len(0, max_resp_sz);
+ dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
+ __func__, inode, max_resp_sz, max_pages);
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 8000459..d20221d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5769,11 +5769,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
+ dprintk("<-- %s\n", __func__);
+ }
+
++static size_t max_response_pages(struct nfs_server *server)
++{
++ u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
++ return nfs_page_array_len(0, max_resp_sz);
++}
++
++static void nfs4_free_pages(struct page **pages, size_t size)
++{
++ int i;
++
++ if (!pages)
++ return;
++
++ for (i = 0; i < size; i++) {
++ if (!pages[i])
++ break;
++ __free_page(pages[i]);
++ }
++ kfree(pages);
++}
++
++static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
++{
++ struct page **pages;
++ int i;
++
++ pages = kcalloc(size, sizeof(struct page *), gfp_flags);
++ if (!pages) {
++ dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
++ return NULL;
++ }
++
++ for (i = 0; i < size; i++) {
++ pages[i] = alloc_page(gfp_flags);
++ if (!pages[i]) {
++ dprintk("%s: failed to allocate page\n", __func__);
++ nfs4_free_pages(pages, size);
++ return NULL;
++ }
++ }
++
++ return pages;
++}
++
+ static void nfs4_layoutget_release(void *calldata)
+ {
+ struct nfs4_layoutget *lgp = calldata;
++ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
++ size_t max_pages = max_response_pages(server);
+
+ dprintk("--> %s\n", __func__);
++ nfs4_free_pages(lgp->args.layout.pages, max_pages);
+ put_nfs_open_context(lgp->args.ctx);
+ kfree(calldata);
+ dprintk("<-- %s\n", __func__);
+@@ -5785,9 +5832,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
+ .rpc_release = nfs4_layoutget_release,
+ };
+
+-int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
++int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+ {
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
++ size_t max_pages = max_response_pages(server);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
+@@ -5805,6 +5853,13 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+
+ dprintk("--> %s\n", __func__);
+
++ lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
++ if (!lgp->args.layout.pages) {
++ nfs4_layoutget_release(lgp);
++ return -ENOMEM;
++ }
++ lgp->args.layout.pglen = max_pages * PAGE_SIZE;
++
+ lgp->res.layoutp = &lgp->args.layout;
+ lgp->res.seq_res.sr_slot = NULL;
+ task = rpc_run_task(&task_setup_data);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index f881a63..3ad6595 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -575,9 +575,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs_server *server = NFS_SERVER(ino);
+ struct nfs4_layoutget *lgp;
+ struct pnfs_layout_segment *lseg = NULL;
+- struct page **pages = NULL;
+- int i;
+- u32 max_resp_sz, max_pages;
+
+ dprintk("--> %s\n", __func__);
+
+@@ -586,20 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ if (lgp == NULL)
+ return NULL;
+
+- /* allocate pages for xdr post processing */
+- max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
+-
+- pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
+- if (!pages)
+- goto out_err_free;
+-
+- for (i = 0; i < max_pages; i++) {
+- pages[i] = alloc_page(gfp_flags);
+- if (!pages[i])
+- goto out_err_free;
+- }
+-
+ lgp->args.minlength = PAGE_CACHE_SIZE;
+ if (lgp->args.minlength > range->length)
+ lgp->args.minlength = range->length;
+@@ -608,39 +591,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ lgp->args.type = server->pnfs_curr_ld->id;
+ lgp->args.inode = ino;
+ lgp->args.ctx = get_nfs_open_context(ctx);
+- lgp->args.layout.pages = pages;
+- lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+ lgp->lsegpp = &lseg;
+ lgp->gfp_flags = gfp_flags;
+
+ /* Synchronously retrieve layout information from server and
+ * store in lseg.
+ */
+- nfs4_proc_layoutget(lgp);
++ nfs4_proc_layoutget(lgp, gfp_flags);
+ if (!lseg) {
+ /* remember that LAYOUTGET failed and suspend trying */
+ set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
+ }
+
+- /* free xdr pages */
+- for (i = 0; i < max_pages; i++)
+- __free_page(pages[i]);
+- kfree(pages);
+-
+ return lseg;
+-
+-out_err_free:
+- /* free any allocated xdr pages, lgp as it's not used */
+- if (pages) {
+- for (i = 0; i < max_pages; i++) {
+- if (!pages[i])
+- break;
+- __free_page(pages[i]);
+- }
+- kfree(pages);
+- }
+- kfree(lgp);
+- return NULL;
+ }
+
+ /* Initiates a LAYOUTRETURN(FILE) */
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index 53d593a..c946b1b 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -162,7 +162,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
+ struct pnfs_devicelist *devlist);
+ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
+ struct pnfs_device *dev);
+-extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
++extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
+ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
+
+ /* pnfs.c */
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 376cd65..6e85ec6 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -3087,4 +3087,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
+ return res;
+ }
+
++MODULE_ALIAS("nfs4");
++
+ #endif /* CONFIG_NFS_V4 */
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index c6e523a..301391a 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1742,12 +1742,12 @@ int __init nfs_init_writepagecache(void)
+ nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
+ nfs_wdata_cachep);
+ if (nfs_wdata_mempool == NULL)
+- return -ENOMEM;
++ goto out_destroy_write_cache;
+
+ nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
+ nfs_wdata_cachep);
+ if (nfs_commit_mempool == NULL)
+- return -ENOMEM;
++ goto out_destroy_write_mempool;
+
+ /*
+ * NFS congestion size, scale with available memory.
+@@ -1770,6 +1770,12 @@ int __init nfs_init_writepagecache(void)
+ nfs_congestion_kb = 256*1024;
+
+ return 0;
++
++out_destroy_write_mempool:
++ mempool_destroy(nfs_wdata_mempool);
++out_destroy_write_cache:
++ kmem_cache_destroy(nfs_wdata_cachep);
++ return -ENOMEM;
+ }
+
+ void nfs_destroy_writepagecache(void)
+diff --git a/fs/open.c b/fs/open.c
+index e2b5d51..b8485d3 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -882,9 +882,10 @@ static inline int build_open_flags(int flags, int mode, struct open_flags *op)
+ int lookup_flags = 0;
+ int acc_mode;
+
+- if (!(flags & O_CREAT))
+- mode = 0;
+- op->mode = mode;
++ if (flags & O_CREAT)
++ op->mode = (mode & S_IALLUGO) | S_IFREG;
++ else
++ op->mode = 0;
+
+ /* Must never be set by userspace */
+ flags &= ~FMODE_NONOTIFY;
+diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
+index 2da1715..4619247 100644
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -290,7 +290,7 @@ handle_fragments:
+
+ check_directory_table:
+ /* Sanity check directory_table */
+- if (msblk->directory_table >= next_table) {
++ if (msblk->directory_table > next_table) {
+ err = -EINVAL;
+ goto failed_mount;
+ }
+diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
+index 580a6d3..c04e0db 100644
+--- a/include/asm-generic/mutex-xchg.h
++++ b/include/asm-generic/mutex-xchg.h
+@@ -26,7 +26,13 @@ static inline void
+ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+ {
+ if (unlikely(atomic_xchg(count, 0) != 1))
+- fail_fn(count);
++ /*
++ * We failed to acquire the lock, so mark it contended
++ * to ensure that any waiting tasks are woken up by the
++ * unlock slow path.
++ */
++ if (likely(atomic_xchg(count, -1) != 1))
++ fail_fn(count);
+ }
+
+ /**
+@@ -43,7 +49,8 @@ static inline int
+ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+ {
+ if (unlikely(atomic_xchg(count, 0) != 1))
+- return fail_fn(count);
++ if (likely(atomic_xchg(count, -1) != 1))
++ return fail_fn(count);
+ return 0;
+ }
+
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 4269c3f..93629fc 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -775,6 +775,27 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
++/**
++ * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces
++ * @vend: the 16 bit USB Vendor ID
++ * @cl: bInterfaceClass value
++ * @sc: bInterfaceSubClass value
++ * @pr: bInterfaceProtocol value
++ *
++ * This macro is used to create a struct usb_device_id that matches a
++ * specific vendor with a specific class of interfaces.
++ *
++ * This is especially useful when explicitly matching devices that have
++ * vendor specific bDeviceClass values, but standards-compliant interfaces.
++ */
++#define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
++ | USB_DEVICE_ID_MATCH_VENDOR, \
++ .idVendor = (vend), \
++ .bInterfaceClass = (cl), \
++ .bInterfaceSubClass = (sc), \
++ .bInterfaceProtocol = (pr)
++
+ /* ----------------------------------------------------------------------- */
+
+ /* Stuff for dynamic usb ids */
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 5bf0790..31fdc48 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -250,7 +250,6 @@ static void untag_chunk(struct node *p)
+ spin_unlock(&hash_lock);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+- fsnotify_put_mark(entry);
+ goto out;
+ }
+
+@@ -259,7 +258,7 @@ static void untag_chunk(struct node *p)
+
+ fsnotify_duplicate_mark(&new->mark, entry);
+ if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
+- free_chunk(new);
++ fsnotify_put_mark(&new->mark);
+ goto Fallback;
+ }
+
+@@ -293,7 +292,6 @@ static void untag_chunk(struct node *p)
+ spin_unlock(&hash_lock);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+- fsnotify_put_mark(entry);
+ goto out;
+
+ Fallback:
+@@ -322,7 +320,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
+
+ entry = &chunk->mark;
+ if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
+- free_chunk(chunk);
++ fsnotify_put_mark(entry);
+ return -ENOSPC;
+ }
+
+@@ -332,6 +330,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&hash_lock);
+ chunk->dead = 1;
+ spin_unlock(&entry->lock);
++ fsnotify_get_mark(entry);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
+ return 0;
+@@ -396,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ fsnotify_duplicate_mark(chunk_entry, old_entry);
+ if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
+ spin_unlock(&old_entry->lock);
+- free_chunk(chunk);
++ fsnotify_put_mark(chunk_entry);
+ fsnotify_put_mark(old_entry);
+ return -ENOSPC;
+ }
+@@ -412,6 +411,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+
++ fsnotify_get_mark(chunk_entry);
+ fsnotify_destroy_mark(chunk_entry);
+
+ fsnotify_put_mark(chunk_entry);
+@@ -445,7 +445,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&old_entry->lock);
+ fsnotify_destroy_mark(old_entry);
+ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
+- fsnotify_put_mark(old_entry); /* and kill it */
+ return 0;
+ }
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index e0431c4..910db7d 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4355,6 +4355,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
+ #endif
+
++static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
++{
++ u64 temp = (__force u64) rtime;
++
++ temp *= (__force u64) utime;
++
++ if (sizeof(cputime_t) == 4)
++ temp = div_u64(temp, (__force u32) total);
++ else
++ temp = div64_u64(temp, (__force u64) total);
++
++ return (__force cputime_t) temp;
++}
++
+ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ {
+ cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
+@@ -4364,13 +4378,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ */
+ rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
+
+- if (total) {
+- u64 temp = rtime;
+-
+- temp *= utime;
+- do_div(temp, total);
+- utime = (cputime_t)temp;
+- } else
++ if (total)
++ utime = scale_utime(utime, rtime, total);
++ else
+ utime = rtime;
+
+ /*
+@@ -4397,13 +4407,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ total = cputime_add(cputime.utime, cputime.stime);
+ rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
+
+- if (total) {
+- u64 temp = rtime;
+-
+- temp *= cputime.utime;
+- do_div(temp, total);
+- utime = (cputime_t)temp;
+- } else
++ if (total)
++ utime = scale_utime(cputime.utime, rtime, total);
++ else
+ utime = rtime;
+
+ sig->prev_utime = max(sig->prev_utime, utime);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 48febd7..86eb848 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1977,10 +1977,10 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
+ * proportional to the fraction of recently scanned pages on
+ * each list that were recently referenced and in active use.
+ */
+- ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
++ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
+ ap /= reclaim_stat->recent_rotated[0] + 1;
+
+- fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
++ fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
+ fp /= reclaim_stat->recent_rotated[1] + 1;
+ spin_unlock_irq(&zone->lru_lock);
+
+@@ -1993,7 +1993,7 @@ out:
+ unsigned long scan;
+
+ scan = zone_nr_lru_pages(zone, sc, l);
+- if (priority || noswap) {
++ if (priority || noswap || !vmscan_swappiness(sc)) {
+ scan >>= priority;
+ if (!scan && force_scan)
+ scan = SWAP_CLUSTER_MAX;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 643a41b..6033f02 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1411,7 +1411,13 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
+ if (conn->type == ACL_LINK) {
+ conn->state = BT_CONFIG;
+ hci_conn_hold(conn);
+- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
++
++ if (!conn->out &&
++ !(conn->ssp_mode && conn->hdev->ssp_mode) &&
++ !hci_find_link_key(hdev, &ev->bdaddr))
++ conn->disc_timeout = HCI_PAIRING_TIMEOUT;
++ else
++ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ } else
+ conn->state = BT_CONNECTED;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 17b5b1c..dd76177 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -862,6 +862,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+ write_lock_bh(&conn->chan_lock);
+
+ hci_conn_hold(conn->hcon);
++ conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+ bacpy(&bt_sk(sk)->src, conn->src);
+ bacpy(&bt_sk(sk)->dst, conn->dst);
+@@ -2263,12 +2264,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+- switch (type) {
+- case L2CAP_CONF_RFC:
+- if (olen == sizeof(rfc))
+- memcpy(&rfc, (void *)val, olen);
+- goto done;
+- }
++ if (type != L2CAP_CONF_RFC)
++ continue;
++
++ if (olen != sizeof(rfc))
++ break;
++
++ memcpy(&rfc, (void *)val, olen);
++ goto done;
+ }
+
+ /* Use sane default values in case a misbehaving remote device
+diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
+index 75c3582..fb85d37 100644
+--- a/net/dccp/ccid.h
++++ b/net/dccp/ccid.h
+@@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
+ u32 __user *optval, int __user *optlen)
+ {
+ int rc = -ENOPROTOOPT;
+- if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
++ if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
+ rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
+ optval, optlen);
+ return rc;
+@@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
+ u32 __user *optval, int __user *optlen)
+ {
+ int rc = -ENOPROTOOPT;
+- if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
++ if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
+ rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
+ optval, optlen);
+ return rc;
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 9ed2cd0..3282453 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -315,7 +315,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+ */
+ void svc_xprt_enqueue(struct svc_xprt *xprt)
+ {
+- struct svc_serv *serv = xprt->xpt_server;
+ struct svc_pool *pool;
+ struct svc_rqst *rqstp;
+ int cpu;
+@@ -361,8 +360,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
+ rqstp, rqstp->rq_xprt);
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+- rqstp->rq_reserved = serv->sv_max_mesg;
+- atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ pool->sp_stats.threads_woken++;
+ wake_up(&rqstp->rq_wait);
+ } else {
+@@ -642,8 +639,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
+ if (xprt) {
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+- rqstp->rq_reserved = serv->sv_max_mesg;
+- atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+
+ /* As there is a shortage of threads and this request
+ * had to be queued, don't allow the thread to wait so
+@@ -740,6 +735,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
+ else
+ len = xprt->xpt_ops->xpo_recvfrom(rqstp);
+ dprintk("svc: got len=%d\n", len);
++ rqstp->rq_reserved = serv->sv_max_mesg;
++ atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ }
+ svc_xprt_received(xprt);
+
+@@ -796,7 +793,8 @@ int svc_send(struct svc_rqst *rqstp)
+
+ /* Grab mutex to serialize outgoing data. */
+ mutex_lock(&xprt->xpt_mutex);
+- if (test_bit(XPT_DEAD, &xprt->xpt_flags))
++ if (test_bit(XPT_DEAD, &xprt->xpt_flags)
++ || test_bit(XPT_CLOSE, &xprt->xpt_flags))
+ len = -ENOTCONN;
+ else
+ len = xprt->xpt_ops->xpo_sendto(rqstp);
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 71bed1c..296192c 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1136,9 +1136,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+ if (len >= 0)
+ svsk->sk_tcplen += len;
+ if (len != want) {
++ svc_tcp_save_pages(svsk, rqstp);
+ if (len < 0 && len != -EAGAIN)
+ goto err_other;
+- svc_tcp_save_pages(svsk, rqstp);
+ dprintk("svc: incomplete TCP record (%d of %d)\n",
+ svsk->sk_tcplen, svsk->sk_reclen);
+ goto err_noclose;
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index 254ab52..2210b83 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer,
+ if (digi1 & AC_DIG1_EMPHASIS)
+ snd_iprintf(buffer, " Preemphasis");
+ if (digi1 & AC_DIG1_COPYRIGHT)
+- snd_iprintf(buffer, " Copyright");
++ snd_iprintf(buffer, " Non-Copyright");
+ if (digi1 & AC_DIG1_NONAUDIO)
+ snd_iprintf(buffer, " Non-Audio");
+ if (digi1 & AC_DIG1_PROFESSIONAL)
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 35abe3c..b22989e 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -276,6 +276,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
+ int type = dir ? HDA_INPUT : HDA_OUTPUT;
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type);
++ if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) {
++ snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid);
++ return 0;
++ }
+ sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
+ return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
+ }
+@@ -287,6 +291,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
+ int type = dir ? HDA_INPUT : HDA_OUTPUT;
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type);
++ if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) {
++ snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid);
++ return 0;
++ }
+ sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);
+ return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
+ }
+diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
+index 90117f8..90e5005 100644
+--- a/sound/soc/codecs/wm9712.c
++++ b/sound/soc/codecs/wm9712.c
+@@ -270,7 +270,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]);
+
+ /* Mic select */
+ static const struct snd_kcontrol_new wm9712_mic_src_controls =
+-SOC_DAPM_ENUM("Route", wm9712_enum[7]);
++SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]);
+
+ /* diff select */
+ static const struct snd_kcontrol_new wm9712_diff_sel_controls =
+@@ -289,7 +289,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0,
+ &wm9712_capture_selectl_controls),
+ SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0,
+ &wm9712_capture_selectr_controls),
+-SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0,
++SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0,
++ &wm9712_mic_src_controls),
++SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
+ &wm9712_mic_src_controls),
+ SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
+ &wm9712_diff_sel_controls),
+@@ -317,6 +319,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0),
++SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1),
+ SND_SOC_DAPM_OUTPUT("MONOOUT"),
+ SND_SOC_DAPM_OUTPUT("HPOUTL"),
+@@ -377,6 +380,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
+ {"Mic PGA", NULL, "MIC1"},
+ {"Mic PGA", NULL, "MIC2"},
+
++ /* microphones */
++ {"Differential Mic", NULL, "MIC1"},
++ {"Differential Mic", NULL, "MIC2"},
++ {"Left Mic Select Source", "Mic 1", "MIC1"},
++ {"Left Mic Select Source", "Mic 2", "MIC2"},
++ {"Left Mic Select Source", "Stereo", "MIC1"},
++ {"Left Mic Select Source", "Differential", "Differential Mic"},
++ {"Right Mic Select Source", "Mic 1", "MIC1"},
++ {"Right Mic Select Source", "Mic 2", "MIC2"},
++ {"Right Mic Select Source", "Stereo", "MIC2"},
++ {"Right Mic Select Source", "Differential", "Differential Mic"},
++
+ /* left capture selector */
+ {"Left Capture Select", "Mic", "MIC1"},
+ {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"},
diff --git a/3.2.28/4420_grsecurity-2.9.1-3.2.29-201209122157.patch b/3.2.29/4420_grsecurity-2.9.1-3.2.29-201209171824.patch
index 228fc5d..4eebb32 100644
--- a/3.2.28/4420_grsecurity-2.9.1-3.2.29-201209122157.patch
+++ b/3.2.29/4420_grsecurity-2.9.1-3.2.29-201209171824.patch
@@ -20653,7 +20653,7 @@ index 94a4672..5c6b853 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 407789b..942f6a6 100644
+index 407789b..5570a86 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1305,7 +1305,11 @@ static void reload_tss(void)
@@ -20668,7 +20668,18 @@ index 407789b..942f6a6 100644
load_TR_desc();
}
-@@ -2634,8 +2638,11 @@ static __init int hardware_setup(void)
+@@ -1504,6 +1508,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
+ vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
++#endif
++
+ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
+ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+ vmx->loaded_vmcs->cpu = cpu;
+@@ -2634,8 +2642,11 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -20682,7 +20693,18 @@ index 407789b..942f6a6 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -3649,7 +3656,7 @@ static void vmx_set_constant_host_state(void)
+@@ -3637,7 +3648,10 @@ static void vmx_set_constant_host_state(void)
+
+ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
+ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
++#endif
+
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+@@ -3649,7 +3663,7 @@ static void vmx_set_constant_host_state(void)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
@@ -20691,7 +20713,7 @@ index 407789b..942f6a6 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -6172,6 +6179,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6172,6 +6186,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp .Lkvm_vmx_return \n\t"
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
".Lkvm_vmx_return: "
@@ -20704,7 +20726,7 @@ index 407789b..942f6a6 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%"R"sp) \n\t"
"pop %0 \n\t"
-@@ -6220,6 +6233,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6220,6 +6240,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -20716,7 +20738,7 @@ index 407789b..942f6a6 100644
: "cc", "memory"
, R"ax", R"bx", R"di", R"si"
#ifdef CONFIG_X86_64
-@@ -6248,7 +6266,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6248,7 +6273,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
}
}
@@ -44609,10 +44631,36 @@ index f3a257d..715ac0f 100644
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
-index af11098..81e3bbe 100644
+index af11098..4e6f039 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
-@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+@@ -640,6 +640,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct dentry *lower_old_dir_dentry;
+ struct dentry *lower_new_dir_dentry;
+ struct dentry *trap = NULL;
++ struct inode *target_inode;
+
+ lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
+ lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
+@@ -647,6 +648,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ dget(lower_new_dentry);
+ lower_old_dir_dentry = dget_parent(lower_old_dentry);
+ lower_new_dir_dentry = dget_parent(lower_new_dentry);
++ target_inode = new_dentry->d_inode;
+ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ /* source should not be ancestor of target */
+ if (trap == lower_old_dentry) {
+@@ -662,6 +664,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ lower_new_dir_dentry->d_inode, lower_new_dentry);
+ if (rc)
+ goto out_lock;
++ if (target_inode)
++ fsstack_copy_attr_all(target_inode,
++ ecryptfs_inode_to_lower(target_inode));
+ fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
+ if (new_dir != old_dir)
+ fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
+@@ -691,7 +696,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
old_fs = get_fs();
set_fs(get_ds());
rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
@@ -44621,7 +44669,7 @@ index af11098..81e3bbe 100644
lower_bufsiz);
set_fs(old_fs);
if (rc < 0)
-@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -737,7 +742,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
}
old_fs = get_fs();
set_fs(get_ds());
@@ -44630,7 +44678,7 @@ index af11098..81e3bbe 100644
set_fs(old_fs);
if (rc < 0) {
kfree(buf);
-@@ -752,7 +752,7 @@ out:
+@@ -752,7 +757,7 @@ out:
static void
ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
{
@@ -63491,6 +63539,19 @@ index b16f653..eb908f4 100644
#define request_module(mod...) __request_module(true, mod)
#define request_module_nowait(mod...) __request_module(false, mod)
#define try_then_request_module(x, mod...) \
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index ad81e1c..445f978 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -226,7 +226,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
+
+ static inline __printf(2, 3)
+ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
+-{ return 0; }
++{ return -ENOMEM; }
+
+ static inline int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6136821..b3ba6ba 100644
--- a/include/linux/kvm_host.h
@@ -64149,7 +64210,7 @@ index 4633b2f..988bc08 100644
atomic_t refcnt;
unsigned int max_seq_nr;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index b1f8912..c955bff 100644
+index b1f8912..8335421 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -748,8 +748,8 @@ struct perf_event {
@@ -64163,7 +64224,14 @@ index b1f8912..c955bff 100644
/*
* These are the total time in nanoseconds that the event
-@@ -800,8 +800,8 @@ struct perf_event {
+@@ -794,14 +794,14 @@ struct perf_event {
+ struct hw_perf_event hw;
+
+ struct perf_event_context *ctx;
+- struct file *filp;
++ atomic_long_t refcount;
+
+ /*
* These accumulate total time (in nanoseconds) that children
* events have been enabled and running, respectively.
*/
@@ -67520,7 +67588,7 @@ index 63786e7..0780cac 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 58690af..d903d75 100644
+index 58690af..66d5a8e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
@@ -67541,7 +67609,38 @@ index 58690af..d903d75 100644
}
static u64 perf_event_read(struct perf_event *event)
-@@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -3011,12 +3011,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+ /*
+ * Called when the last reference to the file is gone.
+ */
+-static int perf_release(struct inode *inode, struct file *file)
++static void put_event(struct perf_event *event)
+ {
+- struct perf_event *event = file->private_data;
+ struct task_struct *owner;
+
+- file->private_data = NULL;
++ if (!atomic_long_dec_and_test(&event->refcount))
++ return;
+
+ rcu_read_lock();
+ owner = ACCESS_ONCE(event->owner);
+@@ -3051,7 +3051,13 @@ static int perf_release(struct inode *inode, struct file *file)
+ put_task_struct(owner);
+ }
+
+- return perf_event_release_kernel(event);
++ perf_event_release_kernel(event);
++}
++
++static int perf_release(struct inode *inode, struct file *file)
++{
++ put_event(file->private_data);
++ return 0;
+ }
+
+ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -3065,9 +3071,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
@@ -67553,7 +67652,51 @@ index 58690af..d903d75 100644
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
-@@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
+@@ -3304,7 +3310,7 @@ unlock:
+
+ static const struct file_operations perf_fops;
+
+-static struct perf_event *perf_fget_light(int fd, int *fput_needed)
++static struct file *perf_fget_light(int fd, int *fput_needed)
+ {
+ struct file *file;
+
+@@ -3318,7 +3324,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
+ return ERR_PTR(-EBADF);
+ }
+
+- return file->private_data;
++ return file;
+ }
+
+ static int perf_event_set_output(struct perf_event *event,
+@@ -3350,19 +3356,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+
+ case PERF_EVENT_IOC_SET_OUTPUT:
+ {
++ struct file *output_file = NULL;
+ struct perf_event *output_event = NULL;
+ int fput_needed = 0;
+ int ret;
+
+ if (arg != -1) {
+- output_event = perf_fget_light(arg, &fput_needed);
+- if (IS_ERR(output_event))
+- return PTR_ERR(output_event);
++ output_file = perf_fget_light(arg, &fput_needed);
++ if (IS_ERR(output_file))
++ return PTR_ERR(output_file);
++ output_event = output_file->private_data;
+ }
+
+ ret = perf_event_set_output(event, output_event);
+ if (output_event)
+- fput_light(output_event->filp, fput_needed);
++ fput_light(output_file, fput_needed);
+
+ return ret;
+ }
+@@ -3474,10 +3482,10 @@ void perf_event_update_userpage(struct perf_event *event)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
@@ -67566,7 +67709,7 @@ index 58690af..d903d75 100644
barrier();
++userpg->lock;
-@@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+@@ -3906,11 +3914,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
@@ -67580,7 +67723,7 @@ index 58690af..d903d75 100644
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
-@@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+@@ -4561,12 +4569,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
@@ -67595,7 +67738,15 @@ index 58690af..d903d75 100644
if (IS_ERR(name)) {
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
-@@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+@@ -5912,6 +5920,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+
+ mutex_init(&event->mmap_mutex);
+
++ atomic_long_set(&event->refcount, 1);
+ event->cpu = cpu;
+ event->attr = *attr;
+ event->group_leader = group_leader;
+@@ -5921,7 +5930,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->parent = parent_event;
event->ns = get_pid_ns(current->nsproxy->pid_ns);
@@ -67604,7 +67755,40 @@ index 58690af..d903d75 100644
event->state = PERF_EVENT_STATE_INACTIVE;
-@@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
+@@ -6182,12 +6191,12 @@ SYSCALL_DEFINE5(perf_event_open,
+ return event_fd;
+
+ if (group_fd != -1) {
+- group_leader = perf_fget_light(group_fd, &fput_needed);
+- if (IS_ERR(group_leader)) {
+- err = PTR_ERR(group_leader);
++ group_file = perf_fget_light(group_fd, &fput_needed);
++ if (IS_ERR(group_file)) {
++ err = PTR_ERR(group_file);
+ goto err_fd;
+ }
+- group_file = group_leader->filp;
++ group_leader = group_file->private_data;
+ if (flags & PERF_FLAG_FD_OUTPUT)
+ output_event = group_leader;
+ if (flags & PERF_FLAG_FD_NO_GROUP)
+@@ -6322,7 +6331,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ put_ctx(gctx);
+ }
+
+- event->filp = event_file;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+
+@@ -6412,7 +6420,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ goto err_free;
+ }
+
+- event->filp = NULL;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ perf_install_in_context(ctx, event, cpu);
+@@ -6443,10 +6450,10 @@ static void sync_child_event(struct perf_event *child_event,
/*
* Add back the child's count to the parent's count:
*/
@@ -67618,6 +67802,64 @@ index 58690af..d903d75 100644
&parent_event->child_total_time_running);
/*
+@@ -6461,7 +6468,7 @@ static void sync_child_event(struct perf_event *child_event,
+ * Release the parent event, if this was the last
+ * reference to it.
+ */
+- fput(parent_event->filp);
++ put_event(parent_event);
+ }
+
+ static void
+@@ -6537,9 +6544,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ *
+ * __perf_event_exit_task()
+ * sync_child_event()
+- * fput(parent_event->filp)
+- * perf_release()
+- * mutex_lock(&ctx->mutex)
++ * put_event()
++ * mutex_lock(&ctx->mutex)
+ *
+ * But since its the parent context it won't be the same instance.
+ */
+@@ -6607,7 +6613,7 @@ static void perf_free_event(struct perf_event *event,
+ list_del_init(&event->child_list);
+ mutex_unlock(&parent->child_mutex);
+
+- fput(parent->filp);
++ put_event(parent);
+
+ perf_group_detach(event);
+ list_del_event(event, ctx);
+@@ -6687,6 +6693,12 @@ inherit_event(struct perf_event *parent_event,
+ NULL, NULL);
+ if (IS_ERR(child_event))
+ return child_event;
++
++ if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
++ free_event(child_event);
++ return NULL;
++ }
++
+ get_ctx(child_ctx);
+
+ /*
+@@ -6728,14 +6740,6 @@ inherit_event(struct perf_event *parent_event,
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+
+ /*
+- * Get a reference to the parent filp - we will fput it
+- * when the child event exits. This is safe to do because
+- * we are in the parent and we know that the filp still
+- * exists and has a nonzero count:
+- */
+- atomic_long_inc(&parent_event->filp->f_count);
+-
+- /*
+ * Link this into the parent event's child list
+ */
+ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
diff --git a/kernel/exit.c b/kernel/exit.c
index 5a8a66e..ded4680 100644
--- a/kernel/exit.c
@@ -75544,7 +75786,7 @@ index cd3ab93..3ff7a99 100644
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slob.c b/mm/slob.c
-index 8105be4..3e3e9cd 100644
+index 8105be4..33e52d7 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -29,7 +29,7 @@
@@ -75564,17 +75806,18 @@ index 8105be4..3e3e9cd 100644
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
-@@ -102,7 +103,8 @@ struct slob_page {
+@@ -100,9 +101,8 @@ struct slob_page {
+ union {
+ struct {
unsigned long flags; /* mandatory */
- atomic_t _count; /* mandatory */
+- atomic_t _count; /* mandatory */
slobidx_t units; /* free units left in page */
- unsigned long pad[2];
-+ unsigned long pad[1];
+ unsigned long size; /* size when >=PAGE_SIZE */
slob_t *free; /* first free slob_t in page */
struct list_head list; /* linked list of free pages */
};
-@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
+@@ -135,7 +135,7 @@ static LIST_HEAD(free_slob_large);
*/
static inline int is_slob_page(struct slob_page *sp)
{
@@ -75583,7 +75826,7 @@ index 8105be4..3e3e9cd 100644
}
static inline void set_slob_page(struct slob_page *sp)
-@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
+@@ -150,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
static inline struct slob_page *slob_page(const void *addr)
{
@@ -75592,7 +75835,7 @@ index 8105be4..3e3e9cd 100644
}
/*
-@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
+@@ -210,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
/*
* Return the size of a slob block.
*/
@@ -75601,7 +75844,7 @@ index 8105be4..3e3e9cd 100644
{
if (s->units > 0)
return s->units;
-@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
+@@ -220,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
/*
* Return the next free slob block pointer after this one.
*/
@@ -75610,7 +75853,7 @@ index 8105be4..3e3e9cd 100644
{
slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
slobidx_t next;
-@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
+@@ -235,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
/*
* Returns true if s is the last free block in its page.
*/
@@ -75619,7 +75862,7 @@ index 8105be4..3e3e9cd 100644
{
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
+@@ -254,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page)
return NULL;
@@ -75627,7 +75870,7 @@ index 8105be4..3e3e9cd 100644
return page_address(page);
}
-@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+@@ -370,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
if (!b)
return NULL;
sp = slob_page(b);
@@ -75640,7 +75883,7 @@ index 8105be4..3e3e9cd 100644
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
-@@ -476,10 +479,9 @@ out:
+@@ -476,10 +477,9 @@ out:
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/
@@ -75653,7 +75896,7 @@ index 8105be4..3e3e9cd 100644
void *ret;
gfp &= gfp_allowed_mask;
-@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+@@ -494,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
if (!m)
return NULL;
@@ -75665,7 +75908,7 @@ index 8105be4..3e3e9cd 100644
ret = (void *)m + align;
trace_kmalloc_node(_RET_IP_, ret,
-@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+@@ -506,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
if (ret) {
@@ -75695,7 +75938,7 @@ index 8105be4..3e3e9cd 100644
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
-@@ -533,13 +547,88 @@ void kfree(const void *block)
+@@ -533,13 +545,88 @@ void kfree(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -75753,7 +75996,7 @@ index 8105be4..3e3e9cd 100644
+ base = (void *)((unsigned long)ptr & PAGE_MASK);
+ free = sp->free;
+
-+ while (!slob_last(free) && (void *)free <= ptr) {
++ while ((void *)free <= ptr) {
+ base = free + slob_units(free);
+ free = slob_next(free);
+ }
@@ -75787,7 +76030,7 @@ index 8105be4..3e3e9cd 100644
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
-@@ -552,10 +641,10 @@ size_t ksize(const void *block)
+@@ -552,10 +639,10 @@ size_t ksize(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -75801,7 +76044,7 @@ index 8105be4..3e3e9cd 100644
}
EXPORT_SYMBOL(ksize);
-@@ -571,8 +660,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -571,8 +658,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *c;
@@ -75815,7 +76058,7 @@ index 8105be4..3e3e9cd 100644
if (c) {
c->name = name;
-@@ -614,17 +708,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+@@ -614,17 +706,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
lockdep_trace_alloc(flags);
@@ -75841,7 +76084,7 @@ index 8105be4..3e3e9cd 100644
if (c->ctor)
c->ctor(b);
-@@ -636,10 +738,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+@@ -636,10 +736,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
static void __kmem_cache_free(void *b, int size)
{
@@ -75860,7 +76103,7 @@ index 8105be4..3e3e9cd 100644
}
static void kmem_rcu_free(struct rcu_head *head)
-@@ -652,17 +760,31 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -652,17 +758,31 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
@@ -80767,10 +81010,31 @@ index 9414b9c..d14ecb6 100644
if (init_replay) {
err = xfrm_init_replay(x);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
-index 7cae73e..fe80421 100644
+index 7cae73e..ca5c26f 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
-@@ -1401,6 +1401,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
+@@ -862,6 +862,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ {
+ struct xfrm_dump_info info;
+ struct sk_buff *skb;
++ int err;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+@@ -872,9 +873,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ info.nlmsg_seq = seq;
+ info.nlmsg_flags = 0;
+
+- if (dump_one_state(x, 0, &info)) {
++ err = dump_one_state(x, 0, &info);
++ if (err) {
+ kfree_skb(skb);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ return skb;
+@@ -1401,6 +1403,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
diff --git a/3.2.28/4430_grsec-remove-localversion-grsec.patch b/3.2.29/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.2.28/4430_grsec-remove-localversion-grsec.patch
+++ b/3.2.29/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.2.28/4435_grsec-mute-warnings.patch b/3.2.29/4435_grsec-mute-warnings.patch
index e85abd6..e85abd6 100644
--- a/3.2.28/4435_grsec-mute-warnings.patch
+++ b/3.2.29/4435_grsec-mute-warnings.patch
diff --git a/3.2.28/4440_grsec-remove-protected-paths.patch b/3.2.29/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.2.28/4440_grsec-remove-protected-paths.patch
+++ b/3.2.29/4440_grsec-remove-protected-paths.patch
diff --git a/3.2.28/4450_grsec-kconfig-default-gids.patch b/3.2.29/4450_grsec-kconfig-default-gids.patch
index 0ab1250..0ab1250 100644
--- a/3.2.28/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.29/4450_grsec-kconfig-default-gids.patch
diff --git a/3.2.28/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.29/4465_selinux-avc_audit-log-curr_ip.patch
index 48acad7..48acad7 100644
--- a/3.2.28/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.29/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.2.28/4470_disable-compat_vdso.patch b/3.2.29/4470_disable-compat_vdso.patch
index 4742d01..4742d01 100644
--- a/3.2.28/4470_disable-compat_vdso.patch
+++ b/3.2.29/4470_disable-compat_vdso.patch
diff --git a/3.5.3/0000_README b/3.5.4/0000_README
index c901840..2253a6b 100644
--- a/3.5.3/0000_README
+++ b/3.5.4/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.9.1-3.5.3-201209131726.patch
+Patch: 4420_grsecurity-2.9.1-3.5.4-201209171824.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.5.3/4420_grsecurity-2.9.1-3.5.3-201209131726.patch b/3.5.4/4420_grsecurity-2.9.1-3.5.4-201209171824.patch
index fa219c0..ca159d7 100644
--- a/3.5.3/4420_grsecurity-2.9.1-3.5.3-201209131726.patch
+++ b/3.5.4/4420_grsecurity-2.9.1-3.5.4-201209171824.patch
@@ -275,7 +275,7 @@ index 13d6166..8c235b6 100644
==============================================================
diff --git a/Makefile b/Makefile
-index c901aae..0f96503 100644
+index 6453ead..f5148e2 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -3244,7 +3244,7 @@ index 4ce7a01..449202a 100644
#endif /* __ASM_OPENRISC_CACHE_H */
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
-index 6c6defc..d30653d 100644
+index af9cf30..2aae9b2 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
@@ -4214,7 +4214,7 @@ index 2e3200c..72095ce 100644
/* Find this entry, or if that fails, the next avail. entry */
while (entry->jump[0]) {
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index 710f400..d00ebe6 100644
+index 1a1f2dd..f4d1bb4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
@@ -4228,7 +4228,7 @@ index 710f400..d00ebe6 100644
#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
if (!user_mode(regs))
-@@ -1189,10 +1189,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
@@ -4241,7 +4241,7 @@ index 710f400..d00ebe6 100644
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
}
-@@ -1212,7 +1212,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
lr = regs->link;
@@ -4250,7 +4250,7 @@ index 710f400..d00ebe6 100644
regs->trap, (void *)regs->nip, (void *)lr);
firstframe = 1;
}
-@@ -1254,58 +1254,3 @@ void __ppc64_runlatch_off(void)
+@@ -1246,58 +1246,3 @@ void __ppc64_runlatch_off(void)
mtspr(SPRN_CTRLT, ctrl);
}
#endif /* CONFIG_PPC64 */
@@ -4395,7 +4395,7 @@ index f2496f2..4e3cc47 100644
}
#endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
-index 1589723..cefe690 100644
+index ae0843f..f16372c 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
@@ -9067,7 +9067,7 @@ index 49331be..9706065 100644
".previous"
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
-index eaff479..3025a63 100644
+index eaff479..1eff9b5 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
@@ -9079,6 +9079,15 @@ index eaff479..3025a63 100644
extern int local_apic_timer_c2_ok;
extern int disable_apic;
+@@ -390,7 +390,7 @@ struct apic {
+ */
+ int (*x86_32_numa_cpu_node)(int cpu);
+ #endif
+-};
++} __do_const;
+
+ /*
+ * Pointer to the local APIC driver in use on this system (there's
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 20370c6..a2eb9b0 100644
--- a/arch/x86/include/asm/apm.h
@@ -13940,6 +13949,19 @@ index 5f0ff59..f9e01bc 100644
eoi_ioapic_irq(irq, cfg);
}
+diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
+index 3fe9866..6abf259 100644
+--- a/arch/x86/kernel/apic/probe_64.c
++++ b/arch/x86/kernel/apic/probe_64.c
+@@ -50,7 +50,7 @@ void __init default_setup_apic_routing(void)
+
+ if (is_vsmp_box()) {
+ /* need to update phys_pkg_id */
+- apic->phys_pkg_id = apicid_phys_pkg_id;
++ *(void **)&apic->phys_pkg_id = apicid_phys_pkg_id;
+ }
+ }
+
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 07b0c0d..1df6f42 100644
--- a/arch/x86/kernel/apm_32.c
@@ -14469,6 +14491,24 @@ index c4706cf..264b0f7 100644
}
}
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 187c294..28a069c 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -1811,10 +1811,10 @@ __init int intel_pmu_init(void)
+ * v2 and above have a perf capabilities MSR
+ */
+ if (version > 1) {
+- u64 capabilities;
++ u64 capabilities = x86_pmu.intel_cap.capabilities;
+
+- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+- x86_pmu.intel_cap.capabilities = capabilities;
++ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
++ x86_pmu.intel_cap.capabilities = capabilities;
+ }
+
+ intel_ds_init();
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 13ad899..f642b9a 100644
--- a/arch/x86/kernel/crash.c
@@ -20412,7 +20452,7 @@ index f75af40..285b18f 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 86c8704..e8ee2ac 100644
+index 86c8704..d9277bb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1317,7 +1317,11 @@ static void reload_tss(void)
@@ -20427,7 +20467,18 @@ index 86c8704..e8ee2ac 100644
load_TR_desc();
}
-@@ -2650,8 +2654,11 @@ static __init int hardware_setup(void)
+@@ -1527,6 +1531,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
+ vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
++#endif
++
+ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
+ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+ vmx->loaded_vmcs->cpu = cpu;
+@@ -2650,8 +2658,11 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -20441,7 +20492,18 @@ index 86c8704..e8ee2ac 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -3719,7 +3726,7 @@ static void vmx_set_constant_host_state(void)
+@@ -3697,7 +3708,10 @@ static void vmx_set_constant_host_state(void)
+
+ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
+ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
++#endif
+
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+ #ifdef CONFIG_X86_64
+@@ -3719,7 +3733,7 @@ static void vmx_set_constant_host_state(void)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
@@ -20450,7 +20512,7 @@ index 86c8704..e8ee2ac 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -6257,6 +6264,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6257,6 +6271,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp .Lkvm_vmx_return \n\t"
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
".Lkvm_vmx_return: "
@@ -20463,7 +20525,7 @@ index 86c8704..e8ee2ac 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%"R"sp) \n\t"
"pop %0 \n\t"
-@@ -6305,6 +6318,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6305,6 +6325,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -20475,7 +20537,7 @@ index 86c8704..e8ee2ac 100644
: "cc", "memory"
, R"ax", R"bx", R"di", R"si"
#ifdef CONFIG_X86_64
-@@ -6312,7 +6330,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6312,7 +6337,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
@@ -20484,7 +20546,7 @@ index 86c8704..e8ee2ac 100644
/*
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
-@@ -6321,8 +6339,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6321,8 +6346,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* may be executed in interrupt context, which saves and restore segments
* around it, nullifying its effect.
*/
@@ -24260,10 +24322,10 @@ index 6f31ee5..8ee4164 100644
return (void *)vaddr;
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index f6679a7..8f795a3 100644
+index b91e485..d00e7c9 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
-@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+@@ -277,13 +277,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -24288,7 +24350,7 @@ index f6679a7..8f795a3 100644
}
full_search:
-@@ -280,26 +287,27 @@ full_search:
+@@ -291,26 +298,27 @@ full_search:
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
@@ -24323,7 +24385,7 @@ index f6679a7..8f795a3 100644
}
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
-@@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -321,9 +329,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long base = mm->mmap_base;
@@ -24334,7 +24396,7 @@ index f6679a7..8f795a3 100644
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
-@@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -333,16 +340,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
largest_hole = 0;
mm->free_area_cache = base;
}
@@ -24353,7 +24415,7 @@ index f6679a7..8f795a3 100644
/*
* Lookup failure means no vma is above this address,
* i.e. return with success:
-@@ -340,10 +346,10 @@ try_again:
+@@ -351,10 +357,10 @@ try_again:
if (!vma)
return addr;
@@ -24367,7 +24429,7 @@ index f6679a7..8f795a3 100644
} else if (mm->free_area_cache == vma->vm_end) {
/* pull free_area_cache down to the first hole */
mm->free_area_cache = vma->vm_start;
-@@ -352,29 +358,34 @@ try_again:
+@@ -363,29 +369,34 @@ try_again:
/* remember the largest hole we saw so far */
if (addr + largest_hole < vma->vm_start)
@@ -24415,7 +24477,7 @@ index f6679a7..8f795a3 100644
mm->cached_hole_size = ~0UL;
addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
len, pgoff, flags);
-@@ -382,6 +393,7 @@ fail:
+@@ -393,6 +404,7 @@ fail:
/*
* Restore the topdown base:
*/
@@ -24423,7 +24485,7 @@ index f6679a7..8f795a3 100644
mm->free_area_cache = base;
mm->cached_hole_size = ~0UL;
-@@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -406,10 +418,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -24444,7 +24506,7 @@ index f6679a7..8f795a3 100644
return -ENOMEM;
if (flags & MAP_FIXED) {
-@@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -421,8 +442,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
@@ -34795,7 +34857,7 @@ index 9d71c9c..0e4a0ac 100644
{ "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
{ "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
-index 6e6fffb..588f361 100644
+index fa47b85..246edeb 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -181,7 +181,7 @@ struct e1000_info;
@@ -37052,10 +37114,10 @@ index bbbc9c9..ce22f77 100644
/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
-index 495db80..fb37d59 100644
+index dae3873..bb4bee6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
-@@ -1422,7 +1422,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
+@@ -1425,7 +1425,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
shost = sdev->host;
scsi_init_cmd_errh(cmd);
cmd->result = DID_NO_CONNECT << 16;
@@ -37064,7 +37126,7 @@ index 495db80..fb37d59 100644
/*
* SCSI request completion path will do scsi_device_unbusy(),
-@@ -1448,9 +1448,9 @@ static void scsi_softirq_done(struct request *rq)
+@@ -1451,9 +1451,9 @@ static void scsi_softirq_done(struct request *rq)
INIT_LIST_HEAD(&cmd->eh_entry);
@@ -37511,7 +37573,7 @@ index 0d4aa82..f7832d4 100644
/* core tmem accessor functions */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index c6c385f..b23c65d 100644
+index df9824c..f699b8a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1233,7 +1233,7 @@ struct se_device *transport_add_device_to_core_hba(
@@ -44023,7 +44085,7 @@ index 6901578..d402eb5 100644
return hit;
diff --git a/fs/compat.c b/fs/compat.c
-index 6161255..9f28287 100644
+index 1bdb350..9f28287 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
@@ -44133,38 +44195,6 @@ index 6161255..9f28287 100644
if (__put_user_unaligned(d_off, &lastdirent->d_off))
error = -EFAULT;
else
-@@ -1155,11 +1173,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
- struct file *file;
- int fput_needed;
- ssize_t ret;
-+ loff_t pos;
-
- file = fget_light(fd, &fput_needed);
- if (!file)
- return -EBADF;
-- ret = compat_readv(file, vec, vlen, &file->f_pos);
-+ pos = file->f_pos;
-+ ret = compat_readv(file, vec, vlen, &pos);
-+ file->f_pos = pos;
- fput_light(file, fput_needed);
- return ret;
- }
-@@ -1221,11 +1242,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
- struct file *file;
- int fput_needed;
- ssize_t ret;
-+ loff_t pos;
-
- file = fget_light(fd, &fput_needed);
- if (!file)
- return -EBADF;
-- ret = compat_writev(file, vec, vlen, &file->f_pos);
-+ pos = file->f_pos;
-+ ret = compat_writev(file, vec, vlen, &pos);
-+ file->f_pos = pos;
- fput_light(file, fput_needed);
- return ret;
- }
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index 112e45a..b59845b 100644
--- a/fs/compat_binfmt_elf.c
@@ -44283,10 +44313,36 @@ index b80bc84..0d46d1a 100644
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
-index a07441a..046fc0d 100644
+index a07441a..5c47fa2 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
-@@ -671,7 +671,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+@@ -621,6 +621,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct dentry *lower_old_dir_dentry;
+ struct dentry *lower_new_dir_dentry;
+ struct dentry *trap = NULL;
++ struct inode *target_inode;
+
+ lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
+ lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
+@@ -628,6 +629,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ dget(lower_new_dentry);
+ lower_old_dir_dentry = dget_parent(lower_old_dentry);
+ lower_new_dir_dentry = dget_parent(lower_new_dentry);
++ target_inode = new_dentry->d_inode;
+ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ /* source should not be ancestor of target */
+ if (trap == lower_old_dentry) {
+@@ -643,6 +645,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ lower_new_dir_dentry->d_inode, lower_new_dentry);
+ if (rc)
+ goto out_lock;
++ if (target_inode)
++ fsstack_copy_attr_all(target_inode,
++ ecryptfs_inode_to_lower(target_inode));
+ fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
+ if (new_dir != old_dir)
+ fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
+@@ -671,7 +676,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
old_fs = get_fs();
set_fs(get_ds());
rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
@@ -44295,7 +44351,7 @@ index a07441a..046fc0d 100644
PATH_MAX);
set_fs(old_fs);
if (rc < 0)
-@@ -703,7 +703,7 @@ out:
+@@ -703,7 +708,7 @@ out:
static void
ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
{
@@ -47005,7 +47061,7 @@ index 3426521..3b75162 100644
cuse_class = class_create(THIS_MODULE, "cuse");
if (IS_ERR(cuse_class))
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
-index 7df2b5e..5804aa7 100644
+index f4246cf..b4aed1d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
@@ -48096,7 +48152,7 @@ index 68f4541..89cfe6a 100644
/* Copy the blockcheck stats from the superblock probe */
osb->osb_ecc_stats = *stats;
diff --git a/fs/open.c b/fs/open.c
-index 1540632..79d7242 100644
+index 5d9c71b..adb5b19 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -31,6 +31,8 @@
@@ -48193,7 +48249,7 @@ index 1540632..79d7242 100644
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
if (!uid_valid(uid))
-@@ -1035,6 +1072,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+@@ -1036,6 +1073,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
} else {
fsnotify_open(f);
fd_install(fd, f);
@@ -62811,6 +62867,19 @@ index 5398d58..5883a34 100644
#define request_module(mod...) __request_module(true, mod)
#define request_module_nowait(mod...) __request_module(false, mod)
#define try_then_request_module(x, mod...) \
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index fc615a9..1e57449 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -224,7 +224,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
+
+ static inline __printf(2, 3)
+ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
+-{ return 0; }
++{ return -ENOMEM; }
+
+ static inline int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type)
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 9c07dce..a92fa71 100644
--- a/include/linux/kref.h
@@ -63424,7 +63493,7 @@ index a4c5624..79d6d88 100644
/** create a directory */
struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index 45db49f..386788e 100644
+index 45db49f..8795db3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -879,8 +879,8 @@ struct perf_event {
@@ -63438,7 +63507,14 @@ index 45db49f..386788e 100644
/*
* These are the total time in nanoseconds that the event
-@@ -931,8 +931,8 @@ struct perf_event {
+@@ -925,14 +925,14 @@ struct perf_event {
+ struct hw_perf_event hw;
+
+ struct perf_event_context *ctx;
+- struct file *filp;
++ atomic_long_t refcount;
+
+ /*
* These accumulate total time (in nanoseconds) that children
* events have been enabled and running, respectively.
*/
@@ -66664,7 +66740,7 @@ index 1f91413..362a0a1 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index d7d71d6..f54b76f 100644
+index d7d71d6..b6ec863 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
@@ -66685,7 +66761,38 @@ index d7d71d6..f54b76f 100644
}
static u64 perf_event_read(struct perf_event *event)
-@@ -2987,9 +2987,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -2933,12 +2933,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+ /*
+ * Called when the last reference to the file is gone.
+ */
+-static int perf_release(struct inode *inode, struct file *file)
++static void put_event(struct perf_event *event)
+ {
+- struct perf_event *event = file->private_data;
+ struct task_struct *owner;
+
+- file->private_data = NULL;
++ if (!atomic_long_dec_and_test(&event->refcount))
++ return;
+
+ rcu_read_lock();
+ owner = ACCESS_ONCE(event->owner);
+@@ -2973,7 +2973,13 @@ static int perf_release(struct inode *inode, struct file *file)
+ put_task_struct(owner);
+ }
+
+- return perf_event_release_kernel(event);
++ perf_event_release_kernel(event);
++}
++
++static int perf_release(struct inode *inode, struct file *file)
++{
++ put_event(file->private_data);
++ return 0;
+ }
+
+ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -2987,9 +2993,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
@@ -66697,7 +66804,51 @@ index d7d71d6..f54b76f 100644
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
-@@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct perf_event *event)
+@@ -3225,7 +3231,7 @@ unlock:
+
+ static const struct file_operations perf_fops;
+
+-static struct perf_event *perf_fget_light(int fd, int *fput_needed)
++static struct file *perf_fget_light(int fd, int *fput_needed)
+ {
+ struct file *file;
+
+@@ -3239,7 +3245,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
+ return ERR_PTR(-EBADF);
+ }
+
+- return file->private_data;
++ return file;
+ }
+
+ static int perf_event_set_output(struct perf_event *event,
+@@ -3271,19 +3277,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+
+ case PERF_EVENT_IOC_SET_OUTPUT:
+ {
++ struct file *output_file = NULL;
+ struct perf_event *output_event = NULL;
+ int fput_needed = 0;
+ int ret;
+
+ if (arg != -1) {
+- output_event = perf_fget_light(arg, &fput_needed);
+- if (IS_ERR(output_event))
+- return PTR_ERR(output_event);
++ output_file = perf_fget_light(arg, &fput_needed);
++ if (IS_ERR(output_file))
++ return PTR_ERR(output_file);
++ output_event = output_file->private_data;
+ }
+
+ ret = perf_event_set_output(event, output_event);
+ if (output_event)
+- fput_light(output_event->filp, fput_needed);
++ fput_light(output_file, fput_needed);
+
+ return ret;
+ }
+@@ -3396,10 +3404,10 @@ void perf_event_update_userpage(struct perf_event *event)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
@@ -66710,7 +66861,7 @@ index d7d71d6..f54b76f 100644
arch_perf_update_userpage(userpg, now);
-@@ -3832,11 +3832,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+@@ -3832,11 +3840,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
@@ -66724,7 +66875,7 @@ index d7d71d6..f54b76f 100644
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
-@@ -4514,12 +4514,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+@@ -4514,12 +4522,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
@@ -66739,7 +66890,15 @@ index d7d71d6..f54b76f 100644
if (IS_ERR(name)) {
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
-@@ -5931,7 +5931,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+@@ -5922,6 +5930,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+
+ mutex_init(&event->mmap_mutex);
+
++ atomic_long_set(&event->refcount, 1);
+ event->cpu = cpu;
+ event->attr = *attr;
+ event->group_leader = group_leader;
+@@ -5931,7 +5940,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->parent = parent_event;
event->ns = get_pid_ns(current->nsproxy->pid_ns);
@@ -66748,7 +66907,40 @@ index d7d71d6..f54b76f 100644
event->state = PERF_EVENT_STATE_INACTIVE;
-@@ -6493,10 +6493,10 @@ static void sync_child_event(struct perf_event *child_event,
+@@ -6232,12 +6241,12 @@ SYSCALL_DEFINE5(perf_event_open,
+ return event_fd;
+
+ if (group_fd != -1) {
+- group_leader = perf_fget_light(group_fd, &fput_needed);
+- if (IS_ERR(group_leader)) {
+- err = PTR_ERR(group_leader);
++ group_file = perf_fget_light(group_fd, &fput_needed);
++ if (IS_ERR(group_file)) {
++ err = PTR_ERR(group_file);
+ goto err_fd;
+ }
+- group_file = group_leader->filp;
++ group_leader = group_file->private_data;
+ if (flags & PERF_FLAG_FD_OUTPUT)
+ output_event = group_leader;
+ if (flags & PERF_FLAG_FD_NO_GROUP)
+@@ -6372,7 +6381,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ put_ctx(gctx);
+ }
+
+- event->filp = event_file;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+
+@@ -6462,7 +6470,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ goto err_free;
+ }
+
+- event->filp = NULL;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ perf_install_in_context(ctx, event, cpu);
+@@ -6493,10 +6500,10 @@ static void sync_child_event(struct perf_event *child_event,
/*
* Add back the child's count to the parent's count:
*/
@@ -66762,6 +66954,64 @@ index d7d71d6..f54b76f 100644
&parent_event->child_total_time_running);
/*
+@@ -6511,7 +6518,7 @@ static void sync_child_event(struct perf_event *child_event,
+ * Release the parent event, if this was the last
+ * reference to it.
+ */
+- fput(parent_event->filp);
++ put_event(parent_event);
+ }
+
+ static void
+@@ -6587,9 +6594,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ *
+ * __perf_event_exit_task()
+ * sync_child_event()
+- * fput(parent_event->filp)
+- * perf_release()
+- * mutex_lock(&ctx->mutex)
++ * put_event()
++ * mutex_lock(&ctx->mutex)
+ *
+ * But since its the parent context it won't be the same instance.
+ */
+@@ -6657,7 +6663,7 @@ static void perf_free_event(struct perf_event *event,
+ list_del_init(&event->child_list);
+ mutex_unlock(&parent->child_mutex);
+
+- fput(parent->filp);
++ put_event(parent);
+
+ perf_group_detach(event);
+ list_del_event(event, ctx);
+@@ -6737,6 +6743,12 @@ inherit_event(struct perf_event *parent_event,
+ NULL, NULL);
+ if (IS_ERR(child_event))
+ return child_event;
++
++ if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
++ free_event(child_event);
++ return NULL;
++ }
++
+ get_ctx(child_ctx);
+
+ /*
+@@ -6778,14 +6790,6 @@ inherit_event(struct perf_event *parent_event,
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+
+ /*
+- * Get a reference to the parent filp - we will fput it
+- * when the child event exits. This is safe to do because
+- * we are in the parent and we know that the filp still
+- * exists and has a nonzero count:
+- */
+- atomic_long_inc(&parent_event->filp->f_count);
+-
+- /*
+ * Link this into the parent event's child list
+ */
+ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
diff --git a/kernel/exit.c b/kernel/exit.c
index 46ce8da..c648f3a 100644
--- a/kernel/exit.c
@@ -66852,7 +67102,7 @@ index 46ce8da..c648f3a 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index f00e319..c212fbc 100644
+index f9d0499..e4f8f44 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -321,7 +321,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
@@ -67028,9 +67278,9 @@ index f00e319..c212fbc 100644
if (retval)
goto out;
-- if (file && uprobe_mmap(tmp))
-+ if (tmp->vm_file && uprobe_mmap(tmp))
- goto out;
+- if (file)
++ if (tmp->vm_file)
+ uprobe_mmap(tmp);
}
+
+#ifdef CONFIG_PAX_SEGMEXEC
@@ -69507,10 +69757,10 @@ index 0984a21..939f183 100644
#ifdef CONFIG_RT_GROUP_SCHED
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 468bdd4..b941572 100644
+index 39c44fa..70edb8b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4097,6 +4097,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -4103,6 +4103,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
@@ -69519,7 +69769,7 @@ index 468bdd4..b941572 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -4130,7 +4132,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -4136,7 +4138,8 @@ SYSCALL_DEFINE1(nice, int, increment)
if (nice > 19)
nice = 19;
@@ -69529,7 +69779,7 @@ index 468bdd4..b941572 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -4284,6 +4287,7 @@ recheck:
+@@ -4290,6 +4293,7 @@ recheck:
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
@@ -70818,6 +71068,24 @@ index 80b9c76..9e32279 100644
}
EXPORT_SYMBOL(devm_ioport_unmap);
+diff --git a/lib/digsig.c b/lib/digsig.c
+index 286d558..8c0e629 100644
+--- a/lib/digsig.c
++++ b/lib/digsig.c
+@@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key,
+ memcpy(out1 + head, p, l);
+
+ err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
++ if (err)
++ goto err;
+
+- if (!err && len == hlen)
+- err = memcmp(out2, h, hlen);
++ if (len != hlen || memcmp(out2, h, hlen))
++ err = -EINVAL;
+
+ err:
+ mpi_free(in);
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 66ce414..6f0a0dd 100644
--- a/lib/dma-debug.c
@@ -71440,15 +71708,19 @@ index 6de0d61..da836cf 100644
/* keep elevated page count for bad page */
return ret;
diff --git a/mm/memory.c b/mm/memory.c
-index 2466d12..08be4f6 100644
+index 2466d12..595ed79 100644
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
- return;
+@@ -422,6 +422,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ free_pte_range(tlb, pmd, addr);
+ } while (pmd++, addr = next, addr != end);
- pmd = pmd_offset(pud, start);
-+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
+ start &= PUD_MASK;
+ if (start < floor)
+ return;
+@@ -436,6 +437,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
+#endif
@@ -71456,11 +71728,15 @@ index 2466d12..08be4f6 100644
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
- if (end - 1 > ceiling - 1)
- return;
+@@ -455,6 +458,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+ free_pmd_range(tlb, pud, addr, next, floor, ceiling);
+ } while (pud++, addr = next, addr != end);
+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
+ start &= PGDIR_MASK;
+ if (start < floor)
+ return;
+@@ -469,6 +473,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
@@ -71469,7 +71745,7 @@ index 2466d12..08be4f6 100644
}
/*
-@@ -1602,12 +1609,6 @@ no_page_table:
+@@ -1602,12 +1608,6 @@ no_page_table:
return page;
}
@@ -71482,7 +71758,7 @@ index 2466d12..08be4f6 100644
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
-@@ -1680,10 +1681,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1680,10 +1680,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
@@ -71495,7 +71771,7 @@ index 2466d12..08be4f6 100644
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
-@@ -1731,7 +1732,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1731,7 +1731,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
@@ -71504,7 +71780,7 @@ index 2466d12..08be4f6 100644
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1758,11 +1759,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1758,11 +1758,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
@@ -71516,7 +71792,7 @@ index 2466d12..08be4f6 100644
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
-@@ -1836,7 +1832,7 @@ next_page:
+@@ -1836,7 +1831,7 @@ next_page:
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
@@ -71525,7 +71801,7 @@ index 2466d12..08be4f6 100644
return i;
}
EXPORT_SYMBOL(__get_user_pages);
-@@ -2043,6 +2039,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2043,6 +2038,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -71536,7 +71812,7 @@ index 2466d12..08be4f6 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -2077,10 +2077,22 @@ out:
+@@ -2077,10 +2076,22 @@ out:
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
{
@@ -71559,7 +71835,7 @@ index 2466d12..08be4f6 100644
vma->vm_flags |= VM_INSERTPAGE;
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -2166,6 +2178,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2166,6 +2177,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -71567,7 +71843,7 @@ index 2466d12..08be4f6 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -2373,7 +2386,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2373,7 +2385,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -71578,7 +71854,7 @@ index 2466d12..08be4f6 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -2393,7 +2408,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2393,7 +2407,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -71589,7 +71865,7 @@ index 2466d12..08be4f6 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2481,6 +2498,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2481,6 +2497,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
@@ -71776,7 +72052,7 @@ index 2466d12..08be4f6 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2692,6 +2889,12 @@ gotten:
+@@ -2692,6 +2888,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -71789,7 +72065,7 @@ index 2466d12..08be4f6 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2743,6 +2946,10 @@ gotten:
+@@ -2743,6 +2945,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -71800,7 +72076,7 @@ index 2466d12..08be4f6 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -3022,6 +3229,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3022,6 +3228,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -71812,7 +72088,7 @@ index 2466d12..08be4f6 100644
unlock_page(page);
if (swapcache) {
/*
-@@ -3045,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3045,6 +3256,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -71824,7 +72100,7 @@ index 2466d12..08be4f6 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -3064,40 +3281,6 @@ out_release:
+@@ -3064,40 +3280,6 @@ out_release:
}
/*
@@ -71865,7 +72141,7 @@ index 2466d12..08be4f6 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3106,27 +3289,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3106,27 +3288,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -71898,7 +72174,7 @@ index 2466d12..08be4f6 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3145,6 +3324,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3145,6 +3323,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -71910,7 +72186,7 @@ index 2466d12..08be4f6 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -3152,6 +3336,12 @@ setpte:
+@@ -3152,6 +3335,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -71923,7 +72199,7 @@ index 2466d12..08be4f6 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -3295,6 +3485,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3295,6 +3484,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -71936,7 +72212,7 @@ index 2466d12..08be4f6 100644
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -3314,6 +3510,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3314,6 +3509,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -71951,7 +72227,7 @@ index 2466d12..08be4f6 100644
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
-@@ -3467,6 +3671,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3467,6 +3670,12 @@ int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -71964,7 +72240,7 @@ index 2466d12..08be4f6 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3483,6 +3693,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3483,6 +3692,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -71975,7 +72251,7 @@ index 2466d12..08be4f6 100644
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3494,6 +3708,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3494,6 +3707,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -72010,7 +72286,7 @@ index 2466d12..08be4f6 100644
retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
-@@ -3535,7 +3777,7 @@ retry:
+@@ -3535,7 +3776,7 @@ retry:
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
@@ -72019,7 +72295,7 @@ index 2466d12..08be4f6 100644
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
-@@ -3572,6 +3814,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3572,6 +3813,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -72043,7 +72319,7 @@ index 2466d12..08be4f6 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3602,6 +3861,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3602,6 +3860,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -72074,7 +72350,7 @@ index 2466d12..08be4f6 100644
#endif /* __PAGETABLE_PMD_FOLDED */
int make_pages_present(unsigned long addr, unsigned long end)
-@@ -3639,7 +3922,7 @@ static int __init gate_vma_init(void)
+@@ -3639,7 +3921,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -72084,7 +72360,7 @@ index 2466d12..08be4f6 100644
return 0;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index 1d771e4..64b57d0 100644
+index b12b28a..64b57d0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
@@ -72159,15 +72435,6 @@ index 1d771e4..64b57d0 100644
err = do_migrate_pages(mm, old, new,
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
-@@ -2556,7 +2589,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
- break;
-
- default:
-- BUG();
-+ return -EINVAL;
- }
-
- l = strlen(policy_modes[mode]);
diff --git a/mm/mlock.c b/mm/mlock.c
index ef726e8..cd7f1ec 100644
--- a/mm/mlock.c
@@ -72255,7 +72522,7 @@ index ef726e8..cd7f1ec 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index 3edfcdf..4a27ae9 100644
+index fa1f274..86de476 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -47,6 +47,16 @@
@@ -72629,7 +72896,7 @@ index 3edfcdf..4a27ae9 100644
if (vm_flags & VM_LOCKED) {
if (!mlock_vma_pages_range(vma, addr, addr + len))
mm->locked_vm += (len >> PAGE_SHIFT);
-@@ -1371,6 +1521,12 @@ unmap_and_free_vma:
+@@ -1370,6 +1520,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -72642,7 +72909,7 @@ index 3edfcdf..4a27ae9 100644
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1378,6 +1534,44 @@ unacct_error:
+@@ -1377,6 +1533,44 @@ unacct_error:
return error;
}
@@ -72687,7 +72954,7 @@ index 3edfcdf..4a27ae9 100644
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
*
-@@ -1404,18 +1598,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1403,18 +1597,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
return addr;
@@ -72718,7 +72985,7 @@ index 3edfcdf..4a27ae9 100644
}
full_search:
-@@ -1426,34 +1625,40 @@ full_search:
+@@ -1425,34 +1624,40 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -72770,7 +73037,7 @@ index 3edfcdf..4a27ae9 100644
mm->free_area_cache = addr;
}
-@@ -1469,7 +1674,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1468,7 +1673,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -72779,7 +73046,7 @@ index 3edfcdf..4a27ae9 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -1478,13 +1683,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1477,13 +1682,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -72802,7 +73069,7 @@ index 3edfcdf..4a27ae9 100644
}
/* check if free_area_cache is useful for us */
-@@ -1508,7 +1718,7 @@ try_again:
+@@ -1507,7 +1717,7 @@ try_again:
* return with success:
*/
vma = find_vma(mm, addr);
@@ -72811,7 +73078,7 @@ index 3edfcdf..4a27ae9 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
-@@ -1517,8 +1727,8 @@ try_again:
+@@ -1516,8 +1726,8 @@ try_again:
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
@@ -72822,7 +73089,7 @@ index 3edfcdf..4a27ae9 100644
fail:
/*
-@@ -1541,13 +1751,21 @@ fail:
+@@ -1540,13 +1750,21 @@ fail:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -72846,7 +73113,7 @@ index 3edfcdf..4a27ae9 100644
mm->cached_hole_size = ~0UL;
return addr;
-@@ -1556,6 +1774,12 @@ fail:
+@@ -1555,6 +1773,12 @@ fail:
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
@@ -72859,7 +73126,7 @@ index 3edfcdf..4a27ae9 100644
/*
* Is this a new hole at the highest possible address?
*/
-@@ -1563,8 +1787,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1562,8 +1786,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
mm->free_area_cache = addr;
/* dont allow allocations above current base */
@@ -72871,7 +73138,7 @@ index 3edfcdf..4a27ae9 100644
}
unsigned long
-@@ -1663,6 +1889,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -1662,6 +1888,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
return vma;
}
@@ -72900,7 +73167,7 @@ index 3edfcdf..4a27ae9 100644
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -1679,6 +1927,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1678,6 +1926,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -72908,7 +73175,7 @@ index 3edfcdf..4a27ae9 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -1689,6 +1938,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1688,6 +1937,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -72916,7 +73183,7 @@ index 3edfcdf..4a27ae9 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -1719,37 +1969,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1718,37 +1968,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -72974,7 +73241,7 @@ index 3edfcdf..4a27ae9 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -1764,6 +2025,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -1763,6 +2024,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -72983,7 +73250,7 @@ index 3edfcdf..4a27ae9 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
return error;
-@@ -1777,6 +2040,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1776,6 +2039,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -72992,7 +73259,7 @@ index 3edfcdf..4a27ae9 100644
/*
* We must make sure the anon_vma is allocated
-@@ -1790,6 +2055,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1789,6 +2054,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -73008,7 +73275,7 @@ index 3edfcdf..4a27ae9 100644
vma_lock_anon_vma(vma);
/*
-@@ -1799,9 +2073,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1798,9 +2072,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -73027,7 +73294,7 @@ index 3edfcdf..4a27ae9 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -1811,11 +2093,22 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1810,11 +2092,22 @@ int expand_downwards(struct vm_area_struct *vma,
if (!error) {
vma->vm_start = address;
vma->vm_pgoff -= grow;
@@ -73050,7 +73317,7 @@ index 3edfcdf..4a27ae9 100644
khugepaged_enter_vma_merge(vma);
return error;
}
-@@ -1887,6 +2180,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -1886,6 +2179,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -73064,7 +73331,7 @@ index 3edfcdf..4a27ae9 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
mm->total_vm -= nrpages;
-@@ -1933,6 +2233,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1932,6 +2232,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -73081,7 +73348,7 @@ index 3edfcdf..4a27ae9 100644
rb_erase(&vma->vm_rb, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -1961,14 +2271,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1960,14 +2270,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -73115,7 +73382,7 @@ index 3edfcdf..4a27ae9 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -1981,6 +2310,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1980,6 +2309,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -73138,7 +73405,7 @@ index 3edfcdf..4a27ae9 100644
pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
err = PTR_ERR(pol);
-@@ -2006,6 +2351,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2005,6 +2350,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -73181,7 +73448,7 @@ index 3edfcdf..4a27ae9 100644
/* Success. */
if (!err)
return 0;
-@@ -2018,10 +2399,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2017,10 +2398,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
removed_exe_file_vma(mm);
fput(new->vm_file);
}
@@ -73201,7 +73468,7 @@ index 3edfcdf..4a27ae9 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2034,6 +2423,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2033,6 +2422,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -73217,7 +73484,7 @@ index 3edfcdf..4a27ae9 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2045,11 +2443,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2044,11 +2442,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -73248,7 +73515,7 @@ index 3edfcdf..4a27ae9 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2124,6 +2541,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2123,6 +2540,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -73257,7 +73524,7 @@ index 3edfcdf..4a27ae9 100644
return 0;
}
-@@ -2132,6 +2551,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2131,6 +2550,13 @@ int vm_munmap(unsigned long start, size_t len)
int ret;
struct mm_struct *mm = current->mm;
@@ -73271,7 +73538,7 @@ index 3edfcdf..4a27ae9 100644
down_write(&mm->mmap_sem);
ret = do_munmap(mm, start, len);
up_write(&mm->mmap_sem);
-@@ -2145,16 +2571,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2144,16 +2570,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return vm_munmap(addr, len);
}
@@ -73288,7 +73555,7 @@ index 3edfcdf..4a27ae9 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2168,6 +2584,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2167,6 +2583,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -73296,7 +73563,7 @@ index 3edfcdf..4a27ae9 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2175,16 +2592,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2174,16 +2591,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -73328,7 +73595,7 @@ index 3edfcdf..4a27ae9 100644
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
-@@ -2201,22 +2632,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2200,22 +2631,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -73356,7 +73623,7 @@ index 3edfcdf..4a27ae9 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2230,7 +2661,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2229,7 +2660,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -73365,7 +73632,7 @@ index 3edfcdf..4a27ae9 100644
return -ENOMEM;
}
-@@ -2244,11 +2675,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2243,11 +2674,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -73380,7 +73647,7 @@ index 3edfcdf..4a27ae9 100644
return addr;
}
-@@ -2306,6 +2738,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2305,6 +2737,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
@@ -73388,7 +73655,7 @@ index 3edfcdf..4a27ae9 100644
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-@@ -2322,6 +2755,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2321,6 +2754,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
struct vm_area_struct * __vma, * prev;
struct rb_node ** rb_link, * rb_parent;
@@ -73402,7 +73669,7 @@ index 3edfcdf..4a27ae9 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2348,7 +2788,21 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2347,7 +2787,21 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
if (vma->vm_file && uprobe_mmap(vma))
return -EINVAL;
@@ -73424,7 +73691,7 @@ index 3edfcdf..4a27ae9 100644
return 0;
}
-@@ -2367,6 +2821,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2366,6 +2820,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct mempolicy *pol;
bool faulted_in_anon_vma = true;
@@ -73433,7 +73700,7 @@ index 3edfcdf..4a27ae9 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2438,6 +2894,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2437,6 +2893,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -73473,7 +73740,7 @@ index 3edfcdf..4a27ae9 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2449,6 +2938,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2448,6 +2937,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -73486,7 +73753,7 @@ index 3edfcdf..4a27ae9 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2519,6 +3014,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2518,6 +3013,22 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -74348,7 +74615,7 @@ index e901a36..9ff3f90 100644
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slob.c b/mm/slob.c
-index 8105be4..3e3e9cd 100644
+index 8105be4..33e52d7 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -29,7 +29,7 @@
@@ -74368,17 +74635,18 @@ index 8105be4..3e3e9cd 100644
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
-@@ -102,7 +103,8 @@ struct slob_page {
+@@ -100,9 +101,8 @@ struct slob_page {
+ union {
+ struct {
unsigned long flags; /* mandatory */
- atomic_t _count; /* mandatory */
+- atomic_t _count; /* mandatory */
slobidx_t units; /* free units left in page */
- unsigned long pad[2];
-+ unsigned long pad[1];
+ unsigned long size; /* size when >=PAGE_SIZE */
slob_t *free; /* first free slob_t in page */
struct list_head list; /* linked list of free pages */
};
-@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
+@@ -135,7 +135,7 @@ static LIST_HEAD(free_slob_large);
*/
static inline int is_slob_page(struct slob_page *sp)
{
@@ -74387,7 +74655,7 @@ index 8105be4..3e3e9cd 100644
}
static inline void set_slob_page(struct slob_page *sp)
-@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
+@@ -150,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
static inline struct slob_page *slob_page(const void *addr)
{
@@ -74396,7 +74664,7 @@ index 8105be4..3e3e9cd 100644
}
/*
-@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
+@@ -210,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
/*
* Return the size of a slob block.
*/
@@ -74405,7 +74673,7 @@ index 8105be4..3e3e9cd 100644
{
if (s->units > 0)
return s->units;
-@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
+@@ -220,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
/*
* Return the next free slob block pointer after this one.
*/
@@ -74414,7 +74682,7 @@ index 8105be4..3e3e9cd 100644
{
slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
slobidx_t next;
-@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
+@@ -235,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
/*
* Returns true if s is the last free block in its page.
*/
@@ -74423,7 +74691,7 @@ index 8105be4..3e3e9cd 100644
{
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
+@@ -254,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page)
return NULL;
@@ -74431,7 +74699,7 @@ index 8105be4..3e3e9cd 100644
return page_address(page);
}
-@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+@@ -370,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
if (!b)
return NULL;
sp = slob_page(b);
@@ -74444,7 +74712,7 @@ index 8105be4..3e3e9cd 100644
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
-@@ -476,10 +479,9 @@ out:
+@@ -476,10 +477,9 @@ out:
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/
@@ -74457,7 +74725,7 @@ index 8105be4..3e3e9cd 100644
void *ret;
gfp &= gfp_allowed_mask;
-@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+@@ -494,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
if (!m)
return NULL;
@@ -74469,7 +74737,7 @@ index 8105be4..3e3e9cd 100644
ret = (void *)m + align;
trace_kmalloc_node(_RET_IP_, ret,
-@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+@@ -506,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
if (ret) {
@@ -74499,7 +74767,7 @@ index 8105be4..3e3e9cd 100644
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
-@@ -533,13 +547,88 @@ void kfree(const void *block)
+@@ -533,13 +545,88 @@ void kfree(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -74557,7 +74825,7 @@ index 8105be4..3e3e9cd 100644
+ base = (void *)((unsigned long)ptr & PAGE_MASK);
+ free = sp->free;
+
-+ while (!slob_last(free) && (void *)free <= ptr) {
++ while ((void *)free <= ptr) {
+ base = free + slob_units(free);
+ free = slob_next(free);
+ }
@@ -74591,7 +74859,7 @@ index 8105be4..3e3e9cd 100644
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
-@@ -552,10 +641,10 @@ size_t ksize(const void *block)
+@@ -552,10 +639,10 @@ size_t ksize(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -74605,7 +74873,7 @@ index 8105be4..3e3e9cd 100644
}
EXPORT_SYMBOL(ksize);
-@@ -571,8 +660,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -571,8 +658,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *c;
@@ -74619,7 +74887,7 @@ index 8105be4..3e3e9cd 100644
if (c) {
c->name = name;
-@@ -614,17 +708,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+@@ -614,17 +706,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
lockdep_trace_alloc(flags);
@@ -74645,7 +74913,7 @@ index 8105be4..3e3e9cd 100644
if (c->ctor)
c->ctor(b);
-@@ -636,10 +738,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+@@ -636,10 +736,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
static void __kmem_cache_free(void *b, int size)
{
@@ -74664,7 +74932,7 @@ index 8105be4..3e3e9cd 100644
}
static void kmem_rcu_free(struct rcu_head *head)
-@@ -652,17 +760,31 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -652,17 +758,31 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
@@ -75651,10 +75919,10 @@ index 5914623..bedc768 100644
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
-index 4554e80..b778671 100644
+index 7568a6f..ea3097b 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
-@@ -2798,8 +2798,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
+@@ -2799,8 +2799,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
break;
case L2CAP_CONF_RFC:
@@ -76311,28 +76579,6 @@ index 5fd1467..8b70900 100644
}
EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
-diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
-index 75c3582..fb85d37 100644
---- a/net/dccp/ccid.h
-+++ b/net/dccp/ccid.h
-@@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
- u32 __user *optval, int __user *optlen)
- {
- int rc = -ENOPROTOOPT;
-- if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
-+ if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
- rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
- optval, optlen);
- return rc;
-@@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
- u32 __user *optval, int __user *optlen)
- {
- int rc = -ENOPROTOOPT;
-- if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
-+ if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
- rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
- optval, optlen);
- return rc;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 8c67bed..ce0d140 100644
--- a/net/dccp/ccids/ccid3.c
@@ -78608,7 +78854,7 @@ index 31c7bfc..bc380ae 100644
to += addrlen;
cnt++;
diff --git a/net/socket.c b/net/socket.c
-index 0452dca..5af9802 100644
+index a990aa9..5af9802 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -88,6 +88,7 @@
@@ -78779,24 +79025,6 @@ index 0452dca..5af9802 100644
uaddr_len = COMPAT_NAMELEN(msg);
if (MSG_CMSG_COMPAT & flags) {
err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
-@@ -2605,7 +2665,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
- set_fs(old_fs);
- if (!err)
-- err = compat_put_timeval(up, &ktv);
-+ err = compat_put_timeval(&ktv, up);
-
- return err;
- }
-@@ -2621,7 +2681,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
- set_fs(old_fs);
- if (!err)
-- err = compat_put_timespec(up, &kts);
-+ err = compat_put_timespec(&kts, up);
-
- return err;
- }
@@ -2658,6 +2718,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
return -EFAULT;
@@ -79396,10 +79624,31 @@ index 5b228f9..6aca4e3 100644
if (init_replay) {
err = xfrm_init_replay(x);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
-index 44293b3..be1d3e58 100644
+index 44293b3..2c00fd0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
-@@ -1413,6 +1413,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
+@@ -872,6 +872,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ {
+ struct xfrm_dump_info info;
+ struct sk_buff *skb;
++ int err;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+@@ -882,9 +883,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ info.nlmsg_seq = seq;
+ info.nlmsg_flags = 0;
+
+- if (dump_one_state(x, 0, &info)) {
++ err = dump_one_state(x, 0, &info);
++ if (err) {
+ kfree_skb(skb);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ return skb;
+@@ -1413,6 +1415,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
diff --git a/3.5.3/4430_grsec-remove-localversion-grsec.patch b/3.5.4/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.5.3/4430_grsec-remove-localversion-grsec.patch
+++ b/3.5.4/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.5.3/4435_grsec-mute-warnings.patch b/3.5.4/4435_grsec-mute-warnings.patch
index e1a7a3c..e1a7a3c 100644
--- a/3.5.3/4435_grsec-mute-warnings.patch
+++ b/3.5.4/4435_grsec-mute-warnings.patch
diff --git a/3.5.3/4440_grsec-remove-protected-paths.patch b/3.5.4/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.5.3/4440_grsec-remove-protected-paths.patch
+++ b/3.5.4/4440_grsec-remove-protected-paths.patch
diff --git a/3.5.3/4450_grsec-kconfig-default-gids.patch b/3.5.4/4450_grsec-kconfig-default-gids.patch
index eb5154a..eb5154a 100644
--- a/3.5.3/4450_grsec-kconfig-default-gids.patch
+++ b/3.5.4/4450_grsec-kconfig-default-gids.patch
diff --git a/3.5.3/4465_selinux-avc_audit-log-curr_ip.patch b/3.5.4/4465_selinux-avc_audit-log-curr_ip.patch
index fd7ab67..fd7ab67 100644
--- a/3.5.3/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.5.4/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.5.3/4470_disable-compat_vdso.patch b/3.5.4/4470_disable-compat_vdso.patch
index 68af025..68af025 100644
--- a/3.5.3/4470_disable-compat_vdso.patch
+++ b/3.5.4/4470_disable-compat_vdso.patch