summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2014-08-14 08:22:23 -0400
committerMike Pagano <mpagano@gentoo.org>2014-08-14 08:22:23 -0400
commite9024bd195267a655d045df3d853c21cf51924d3 (patch)
treeda254be28ba2f7e227ff335375381a4209445f6b
parentLinux patch 3.15.9 (diff)
downloadlinux-patches-e9024bd195267a655d045df3d853c21cf51924d3.tar.gz
linux-patches-e9024bd195267a655d045df3d853c21cf51924d3.tar.bz2
linux-patches-e9024bd195267a655d045df3d853c21cf51924d3.zip
Linux patch 3.15.103.15-12
-rw-r--r--0000_README4
-rw-r--r--1009_linux-3.15.10.patch1226
2 files changed, 1230 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 3189f7cb..6fdc22a9 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch: 1008_linux-3.15.9.patch
From: http://www.kernel.org
Desc: Linux 3.15.9
+Patch: 1009_linux-3.15.10.patch
+From: http://www.kernel.org
+Desc: Linux 3.15.10
+
Patch: 1700_enable-thinkpad-micled.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=449248
Desc: Enable mic mute led in thinkpads
diff --git a/1009_linux-3.15.10.patch b/1009_linux-3.15.10.patch
new file mode 100644
index 00000000..d664aac6
--- /dev/null
+++ b/1009_linux-3.15.10.patch
@@ -0,0 +1,1226 @@
+diff --git a/Makefile b/Makefile
+index 25b85aba1e2e..76b75f7b8485 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 15
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Double Funky Skunk
+
+diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
+index 3c3c89f52643..7f9bab26a499 100644
+--- a/arch/sparc/include/asm/tlbflush_64.h
++++ b/arch/sparc/include/asm/tlbflush_64.h
+@@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
+ {
+ }
+
++void flush_tlb_kernel_range(unsigned long start, unsigned long end);
++
+ #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+ extern void flush_tlb_pending(void);
+@@ -48,11 +50,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+ #ifndef CONFIG_SMP
+
+-#define flush_tlb_kernel_range(start,end) \
+-do { flush_tsb_kernel_range(start,end); \
+- __flush_tlb_kernel_range(start,end); \
+-} while (0)
+-
+ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+ {
+ __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
+@@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
+ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+ extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
+
+-#define flush_tlb_kernel_range(start, end) \
+-do { flush_tsb_kernel_range(start,end); \
+- smp_flush_tlb_kernel_range(start, end); \
+-} while (0)
+-
+ #define global_flush_tlb_page(mm, vaddr) \
+ smp_flush_tlb_page(mm, vaddr)
+
+diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
+index b73274fb961a..42f2bca1d338 100644
+--- a/arch/sparc/include/uapi/asm/unistd.h
++++ b/arch/sparc/include/uapi/asm/unistd.h
+@@ -410,8 +410,9 @@
+ #define __NR_finit_module 342
+ #define __NR_sched_setattr 343
+ #define __NR_sched_getattr 344
++#define __NR_renameat2 345
+
+-#define NR_syscalls 345
++#define NR_syscalls 346
+
+ /* Bitmask values returned from kern_features system call. */
+ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
+index e01d75d40329..66dacd56bb10 100644
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
+ if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
+ !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
+ lp->hs_state != LDC_HS_OPEN)
+- err = -EINVAL;
++ err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
+ else
+ err = start_handshake(lp);
+
+diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
+index d066eb18650c..f834224208ed 100644
+--- a/arch/sparc/kernel/sys32.S
++++ b/arch/sparc/kernel/sys32.S
+@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
+ SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
+ SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
+ SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
++SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
+
+ .globl sys32_mmap2
+ sys32_mmap2:
+diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
+index 151ace8766cc..85fe9b1087cd 100644
+--- a/arch/sparc/kernel/systbls_32.S
++++ b/arch/sparc/kernel/systbls_32.S
+@@ -86,3 +86,4 @@ sys_call_table:
+ /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
+ /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+ /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
++/*345*/ .long sys_renameat2
+diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
+index 4bd4e2bb26cf..33ecba2826ea 100644
+--- a/arch/sparc/kernel/systbls_64.S
++++ b/arch/sparc/kernel/systbls_64.S
+@@ -87,6 +87,7 @@ sys_call_table32:
+ /*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
+ .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
+ /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
++ .word sys32_renameat2
+
+ #endif /* CONFIG_COMPAT */
+
+@@ -165,3 +166,4 @@ sys_call_table:
+ /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
+ .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+ /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
++ .word sys_renameat2
+diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
+index aa4d55b0bdf0..5ce8f2f64604 100644
+--- a/arch/sparc/math-emu/math_32.c
++++ b/arch/sparc/math-emu/math_32.c
+@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
+ case 0: fsr = *pfsr;
+ if (IR == -1) IR = 2;
+ /* fcc is always fcc0 */
+- fsr &= ~0xc00; fsr |= (IR << 10); break;
++ fsr &= ~0xc00; fsr |= (IR << 10);
+ *pfsr = fsr;
+ break;
+ case 1: rd->s = IR; break;
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index ed3c969a5f4c..96862241b342 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -350,6 +350,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
+
+ mm = vma->vm_mm;
+
++ /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
++ if (!pte_accessible(mm, pte))
++ return;
++
+ spin_lock_irqsave(&mm->context.lock, flags);
+
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+@@ -2614,6 +2618,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+
+ pte = pmd_val(entry);
+
++ /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
++ if (!(pte & _PAGE_VALID))
++ return;
++
+ /* We are fabricating 8MB pages using 4MB real hw pages. */
+ pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
+
+@@ -2694,3 +2702,26 @@ void hugetlb_setup(struct pt_regs *regs)
+ }
+ }
+ #endif
++
++#ifdef CONFIG_SMP
++#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
++#else
++#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
++#endif
++
++void flush_tlb_kernel_range(unsigned long start, unsigned long end)
++{
++ if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
++ if (start < LOW_OBP_ADDRESS) {
++ flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
++ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
++ }
++ if (end > HI_OBP_ADDRESS) {
++ flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
++ do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
++ }
++ } else {
++ flush_tsb_kernel_range(start, end);
++ do_flush_tlb_kernel_range(start, end);
++ }
++}
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index 4d8f8aba0ea5..b87434c99f4d 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -346,6 +346,7 @@ struct sw_tx_bd {
+ u8 flags;
+ /* Set on the first BD descriptor when there is a split BD */
+ #define BNX2X_TSO_SPLIT_BD (1<<0)
++#define BNX2X_HAS_SECOND_PBD (1<<1)
+ };
+
+ struct sw_rx_page {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 0979967577a1..b2b0d2e684ef 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
++ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
++ /* Skip second parse bd... */
++ --nbd;
++ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
++ }
++
+ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
+ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+@@ -3877,6 +3883,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ /* set encapsulation flag in start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_TUNNEL_EXIST, 1);
++
++ tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
++
+ nbd++;
+ } else if (xmit_type & XMIT_CSUM) {
+ /* Set PBD in checksum offload case w/o encapsulation */
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 0966bd04375f..837224639148 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1149,6 +1149,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+ goto out;
+ }
+
++ if (skb_padto(skb, ETH_ZLEN)) {
++ ret = NETDEV_TX_OK;
++ goto out;
++ }
++
+ /* set the SKB transmit checksum */
+ if (priv->desc_64b_en) {
+ ret = bcmgenet_put_tx_csum(dev, skb);
+diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
+index 675550fe8ee9..ac1ebe0374be 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+ prefetch(bnad->netdev);
+
+ cq = ccb->sw_q;
+- cmpl = &cq[ccb->producer_index];
+
+ while (packets < budget) {
++ cmpl = &cq[ccb->producer_index];
+ if (!cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only after writing
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 7eec598c5cb6..d650b911dae4 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -547,6 +547,7 @@ static int macvlan_init(struct net_device *dev)
+ (lowerdev->state & MACVLAN_STATE_MASK);
+ dev->features = lowerdev->features & MACVLAN_FEATURES;
+ dev->features |= ALWAYS_ON_FEATURES;
++ dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
+ dev->gso_max_size = lowerdev->gso_max_size;
+ dev->iflink = lowerdev->ifindex;
+ dev->hard_header_len = lowerdev->hard_header_len;
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 4987a1c6dc52..b96c4a226624 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -354,7 +354,7 @@ int phy_device_register(struct phy_device *phydev)
+ phydev->bus->phy_map[phydev->addr] = phydev;
+
+ /* Run all of the fixups for this PHY */
+- err = phy_init_hw(phydev);
++ err = phy_scan_fixups(phydev);
+ if (err) {
+ pr_err("PHY %d failed to initialize\n", phydev->addr);
+ goto out;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 01805319e1e0..1aff970be33e 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ nf_reset(skb);
+
+ skb->ip_summed = CHECKSUM_NONE;
+- ip_select_ident(skb, &rt->dst, NULL);
++ ip_select_ident(skb, NULL);
+ ip_send_check(iph);
+
+ ip_local_out(skb);
+diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
+index 160e7510aca6..0787b9756165 100644
+--- a/drivers/sbus/char/bbc_envctrl.c
++++ b/drivers/sbus/char/bbc_envctrl.c
+@@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,
+ if (!tp)
+ return;
+
++ INIT_LIST_HEAD(&tp->bp_list);
++ INIT_LIST_HEAD(&tp->glob_list);
++
+ tp->client = bbc_i2c_attach(bp, op);
+ if (!tp->client) {
+ kfree(tp);
+@@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,
+ if (!fp)
+ return;
+
++ INIT_LIST_HEAD(&fp->bp_list);
++ INIT_LIST_HEAD(&fp->glob_list);
++
+ fp->client = bbc_i2c_attach(bp, op);
+ if (!fp->client) {
+ kfree(fp);
+diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
+index c7763e482eb2..812b5f0361b6 100644
+--- a/drivers/sbus/char/bbc_i2c.c
++++ b/drivers/sbus/char/bbc_i2c.c
+@@ -300,13 +300,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index
+ if (!bp)
+ return NULL;
+
++ INIT_LIST_HEAD(&bp->temps);
++ INIT_LIST_HEAD(&bp->fans);
++
+ bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");
+ if (!bp->i2c_control_regs)
+ goto fail;
+
+- bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
+- if (!bp->i2c_bussel_reg)
+- goto fail;
++ if (op->num_resources == 2) {
++ bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
++ if (!bp->i2c_bussel_reg)
++ goto fail;
++ }
+
+ bp->waiting = 0;
+ init_waitqueue_head(&bp->wq);
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
+index 2f57df9a71d9..a1e09c0d46f2 100644
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up,
+ (up->port.line == up->port.cons->index))
+ saw_console_brk = 1;
+
++ if (count == 0) {
++ if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
++ stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
++ SAB82532_ISR0_FERR);
++ up->port.icount.brk++;
++ uart_handle_break(&up->port);
++ }
++ }
++
+ for (i = 0; i < count; i++) {
+ unsigned char ch = buf[i], flag;
+
+diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
+index 2c4004475e71..84e0deb95abd 100644
+--- a/fs/xfs/xfs_log.h
++++ b/fs/xfs/xfs_log.h
+@@ -24,7 +24,8 @@ struct xfs_log_vec {
+ struct xfs_log_iovec *lv_iovecp; /* iovec array */
+ struct xfs_log_item *lv_item; /* owner */
+ char *lv_buf; /* formatted buffer */
+- int lv_buf_len; /* size of formatted buffer */
++ int lv_bytes; /* accounted space in buffer */
++ int lv_buf_len; /* aligned size of buffer */
+ int lv_size; /* size of allocated lv */
+ };
+
+@@ -52,15 +53,21 @@ xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
+ return vec->i_addr;
+ }
+
++/*
++ * We need to make sure the next buffer is naturally aligned for the biggest
++ * basic data type we put into it. We already accounted for this padding when
++ * sizing the buffer.
++ *
++ * However, this padding does not get written into the log, and hence we have to
++ * track the space used by the log vectors separately to prevent log space hangs
++ * due to inaccurate accounting (i.e. a leak) of the used log space through the
++ * CIL context ticket.
++ */
+ static inline void
+ xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
+ {
+- /*
+- * We need to make sure the next buffer is naturally aligned for the
+- * biggest basic data type we put into it. We already accounted for
+- * this when sizing the buffer.
+- */
+ lv->lv_buf_len += round_up(len, sizeof(uint64_t));
++ lv->lv_bytes += len;
+ vec->i_len = len;
+ }
+
+diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
+index 7e5455391176..de835da6764d 100644
+--- a/fs/xfs/xfs_log_cil.c
++++ b/fs/xfs/xfs_log_cil.c
+@@ -97,7 +97,7 @@ xfs_cil_prepare_item(
+ {
+ /* Account for the new LV being passed in */
+ if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
+- *diff_len += lv->lv_buf_len;
++ *diff_len += lv->lv_bytes;
+ *diff_iovecs += lv->lv_niovecs;
+ }
+
+@@ -111,7 +111,7 @@ xfs_cil_prepare_item(
+ else if (old_lv != lv) {
+ ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
+
+- *diff_len -= old_lv->lv_buf_len;
++ *diff_len -= old_lv->lv_bytes;
+ *diff_iovecs -= old_lv->lv_niovecs;
+ kmem_free(old_lv);
+ }
+@@ -239,7 +239,7 @@ xlog_cil_insert_format_items(
+ * that the space reservation accounting is correct.
+ */
+ *diff_iovecs -= lv->lv_niovecs;
+- *diff_len -= lv->lv_buf_len;
++ *diff_len -= lv->lv_bytes;
+ } else {
+ /* allocate new data chunk */
+ lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
+@@ -259,6 +259,7 @@ xlog_cil_insert_format_items(
+
+ /* The allocated data region lies beyond the iovec region */
+ lv->lv_buf_len = 0;
++ lv->lv_bytes = 0;
+ lv->lv_buf = (char *)lv + buf_size - nbytes;
+ ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
+
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 058271bde27a..823ec7bb9c67 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -41,14 +41,13 @@ struct inet_peer {
+ struct rcu_head gc_rcu;
+ };
+ /*
+- * Once inet_peer is queued for deletion (refcnt == -1), following fields
+- * are not available: rid, ip_id_count
++ * Once inet_peer is queued for deletion (refcnt == -1), following field
++ * is not available: rid
+ * We can share memory with rcu_head to help keep inet_peer small.
+ */
+ union {
+ struct {
+ atomic_t rid; /* Frag reception counter */
+- atomic_t ip_id_count; /* IP ID for the next packet */
+ };
+ struct rcu_head rcu;
+ struct inet_peer *gc_next;
+@@ -165,7 +164,7 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+ void inetpeer_invalidate_tree(struct inet_peer_base *);
+
+ /*
+- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
++ * temporary check to make sure we dont access rid, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+ */
+ static inline void inet_peer_refcheck(const struct inet_peer *p)
+@@ -173,13 +172,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
+ WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
+ }
+
+-
+-/* can be called with or without local BH being disabled */
+-static inline int inet_getid(struct inet_peer *p, int more)
+-{
+- more++;
+- inet_peer_refcheck(p);
+- return atomic_add_return(more, &p->ip_id_count) - more;
+-}
+-
+ #endif /* _NET_INETPEER_H */
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 3ec2b0fb9d83..54de0292ac53 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -310,9 +310,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
+ }
+ }
+
+-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
++u32 ip_idents_reserve(u32 hash, int segs);
++void __ip_select_ident(struct iphdr *iph, int segs);
+
+-static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
++static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
+ {
+ struct iphdr *iph = ip_hdr(skb);
+
+@@ -322,24 +323,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s
+ * does not change, they drop every other packet in
+ * a TCP stream using header compression.
+ */
+- iph->id = (sk && inet_sk(sk)->inet_daddr) ?
+- htons(inet_sk(sk)->inet_id++) : 0;
+- } else
+- __ip_select_ident(iph, dst, 0);
+-}
+-
+-static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
+-{
+- struct iphdr *iph = ip_hdr(skb);
+-
+- if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+ if (sk && inet_sk(sk)->inet_daddr) {
+ iph->id = htons(inet_sk(sk)->inet_id);
+- inet_sk(sk)->inet_id += 1 + more;
+- } else
++ inet_sk(sk)->inet_id += segs;
++ } else {
+ iph->id = 0;
+- } else
+- __ip_select_ident(iph, dst, more);
++ }
++ } else {
++ __ip_select_ident(iph, segs);
++ }
++}
++
++static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
++{
++ ip_select_ident_segs(skb, sk, 1);
+ }
+
+ /*
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index a4daf9eb8562..8dd8cab88b87 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry {
+
+ struct ip_tunnel_dst {
+ struct dst_entry __rcu *dst;
++ __be32 saddr;
+ };
+
+ struct ip_tunnel {
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index d640925bc454..d6815688ad9e 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -660,8 +660,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+
+-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+-
+ int ip6_dst_hoplimit(struct dst_entry *dst);
+
+ /*
+diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
+index f257486f17be..3f36d45b714a 100644
+--- a/include/net/secure_seq.h
++++ b/include/net/secure_seq.h
+@@ -3,8 +3,6 @@
+
+ #include <linux/types.h>
+
+-__u32 secure_ip_id(__be32 daddr);
+-__u32 secure_ipv6_id(const __be32 daddr[4]);
+ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport);
+diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
+index f14e54a05691..022d18ab27a6 100644
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
+ {
+ struct batadv_frag_table_entry *chain;
+ struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
++ struct batadv_frag_list_entry *frag_entry_last = NULL;
+ struct batadv_frag_packet *frag_packet;
+ uint8_t bucket;
+ uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
+@@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
+ ret = true;
+ goto out;
+ }
++
++ /* store current entry because it could be the last in list */
++ frag_entry_last = frag_entry_curr;
+ }
+
+- /* Reached the end of the list, so insert after 'frag_entry_curr'. */
+- if (likely(frag_entry_curr)) {
+- hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
++ /* Reached the end of the list, so insert after 'frag_entry_last'. */
++ if (likely(frag_entry_last)) {
++ hlist_add_after(&frag_entry_last->list, &frag_entry_new->list);
+ chain->size += skb->len - hdr_size;
+ chain->timestamp = jiffies;
+ ret = true;
+diff --git a/net/compat.c b/net/compat.c
+index 9a76eaf63184..bc8aeefddf3f 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ {
+ int tot_len;
+
+- if (kern_msg->msg_namelen) {
++ if (kern_msg->msg_name && kern_msg->msg_namelen) {
+ if (mode == VERIFY_READ) {
+ int err = move_addr_to_kernel(kern_msg->msg_name,
+ kern_msg->msg_namelen,
+@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ if (err < 0)
+ return err;
+ }
+- if (kern_msg->msg_name)
+- kern_msg->msg_name = kern_address;
+- } else
++ kern_msg->msg_name = kern_address;
++ } else {
+ kern_msg->msg_name = NULL;
++ kern_msg->msg_namelen = 0;
++ }
+
+ tot_len = iov_from_user_compat_to_kern(kern_iov,
+ (struct compat_iovec __user *)kern_msg->msg_iov,
+diff --git a/net/core/iovec.c b/net/core/iovec.c
+index b61869429f4c..26dc0062652f 100644
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
+ {
+ int size, ct, err;
+
+- if (m->msg_namelen) {
++ if (m->msg_name && m->msg_namelen) {
+ if (mode == VERIFY_READ) {
+ void __user *namep;
+ namep = (void __user __force *) m->msg_name;
+@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
+ if (err < 0)
+ return err;
+ }
+- if (m->msg_name)
+- m->msg_name = address;
++ m->msg_name = address;
+ } else {
+ m->msg_name = NULL;
++ m->msg_namelen = 0;
+ }
+
+ size = m->msg_iovlen * sizeof(struct iovec);
+@@ -107,6 +107,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
+ int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+ int offset, int len)
+ {
++ /* No data? Done! */
++ if (len == 0)
++ return 0;
++
+ /* Skip over the finished iovecs */
+ while (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
+index 897da56f3aff..ba71212f0251 100644
+--- a/net/core/secure_seq.c
++++ b/net/core/secure_seq.c
+@@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
+ #endif
+
+ #ifdef CONFIG_INET
+-__u32 secure_ip_id(__be32 daddr)
+-{
+- u32 hash[MD5_DIGEST_WORDS];
+-
+- net_secret_init();
+- hash[0] = (__force __u32) daddr;
+- hash[1] = net_secret[13];
+- hash[2] = net_secret[14];
+- hash[3] = net_secret[15];
+-
+- md5_transform(hash, net_secret);
+-
+- return hash[0];
+-}
+-
+-__u32 secure_ipv6_id(const __be32 daddr[4])
+-{
+- __u32 hash[4];
+-
+- net_secret_init();
+- memcpy(hash, daddr, 16);
+- md5_transform(hash, net_secret);
+-
+- return hash[0];
+-}
+
+ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 9433047b2453..6ab5f7721cdb 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2968,9 +2968,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ tail = nskb;
+
+ __copy_skb_header(nskb, head_skb);
+- nskb->mac_len = head_skb->mac_len;
+
+ skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
++ skb_reset_mac_len(nskb);
+
+ skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
+ nskb->data - tnl_hlen,
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 9db3b877fcaf..0ffcd4d64e0a 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+ pip->saddr = fl4.saddr;
+ pip->protocol = IPPROTO_IGMP;
+ pip->tot_len = 0; /* filled in later */
+- ip_select_ident(skb, &rt->dst, NULL);
++ ip_select_ident(skb, NULL);
+ ((u8 *)&pip[1])[0] = IPOPT_RA;
+ ((u8 *)&pip[1])[1] = 4;
+ ((u8 *)&pip[1])[2] = 0;
+@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
+ iph->daddr = dst;
+ iph->saddr = fl4.saddr;
+ iph->protocol = IPPROTO_IGMP;
+- ip_select_ident(skb, &rt->dst, NULL);
++ ip_select_ident(skb, NULL);
+ ((u8 *)&iph[1])[0] = IPOPT_RA;
+ ((u8 *)&iph[1])[1] = 4;
+ ((u8 *)&iph[1])[2] = 0;
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 48f424465112..bf2cb4a4714b 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -26,20 +26,7 @@
+ * Theory of operations.
+ * We keep one entry for each peer IP address. The nodes contains long-living
+ * information about the peer which doesn't depend on routes.
+- * At this moment this information consists only of ID field for the next
+- * outgoing IP packet. This field is incremented with each packet as encoded
+- * in inet_getid() function (include/net/inetpeer.h).
+- * At the moment of writing this notes identifier of IP packets is generated
+- * to be unpredictable using this code only for packets subjected
+- * (actually or potentially) to defragmentation. I.e. DF packets less than
+- * PMTU in size when local fragmentation is disabled use a constant ID and do
+- * not use this code (see ip_select_ident() in include/net/ip.h).
+ *
+- * Route cache entries hold references to our nodes.
+- * New cache entries get references via lookup by destination IP address in
+- * the avl tree. The reference is grabbed only when it's needed i.e. only
+- * when we try to output IP packet which needs an unpredictable ID (see
+- * __ip_select_ident() in net/ipv4/route.c).
+ * Nodes are removed only when reference counter goes to 0.
+ * When it's happened the node may be removed when a sufficient amount of
+ * time has been passed since its last use. The less-recently-used entry can
+@@ -62,7 +49,6 @@
+ * refcnt: atomically against modifications on other CPU;
+ * usually under some other lock to prevent node disappearing
+ * daddr: unchangeable
+- * ip_id_count: atomic value (no lock needed)
+ */
+
+ static struct kmem_cache *peer_cachep __read_mostly;
+@@ -497,10 +483,6 @@ relookup:
+ p->daddr = *daddr;
+ atomic_set(&p->refcnt, 1);
+ atomic_set(&p->rid, 0);
+- atomic_set(&p->ip_id_count,
+- (daddr->family == AF_INET) ?
+- secure_ip_id(daddr->addr.a4) :
+- secure_ipv6_id(daddr->addr.a6));
+ p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ p->rate_tokens = 0;
+ /* 60*HZ is arbitrary, but chosen enough high so that the first
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index a52f50187b54..4ecc1600f84d 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+ iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
+ iph->saddr = saddr;
+ iph->protocol = sk->sk_protocol;
+- ip_select_ident(skb, &rt->dst, sk);
++ ip_select_ident(skb, sk);
+
+ if (opt && opt->opt.optlen) {
+ iph->ihl += opt->opt.optlen>>2;
+@@ -430,8 +430,7 @@ packet_routed:
+ ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
+ }
+
+- ip_select_ident_more(skb, &rt->dst, sk,
+- (skb_shinfo(skb)->gso_segs ?: 1) - 1);
++ ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
+
+ /* TODO : should we use skb->sk here instead of sk ? */
+ skb->priority = sk->sk_priority;
+@@ -1379,7 +1378,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ iph->ttl = ttl;
+ iph->protocol = sk->sk_protocol;
+ ip_copy_addrs(iph, fl4);
+- ip_select_ident(skb, &rt->dst, sk);
++ ip_select_ident(skb, sk);
+
+ if (opt) {
+ iph->ihl += opt->optlen>>2;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index b77b6a55b05e..e3e3a91f249e 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
+ }
+
+ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
+- struct dst_entry *dst)
++ struct dst_entry *dst, __be32 saddr)
+ {
+ struct dst_entry *old_dst;
+
+ dst_clone(dst);
+ old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
+ dst_release(old_dst);
++ idst->saddr = saddr;
+ }
+
+-static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
++static void tunnel_dst_set(struct ip_tunnel *t,
++ struct dst_entry *dst, __be32 saddr)
+ {
+- __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
++ __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
+ }
+
+ static void tunnel_dst_reset(struct ip_tunnel *t)
+ {
+- tunnel_dst_set(t, NULL);
++ tunnel_dst_set(t, NULL, 0);
+ }
+
+ void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
+@@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
+ int i;
+
+ for_each_possible_cpu(i)
+- __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
++ __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
+ }
+ EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
+
+-static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
++static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
++ u32 cookie, __be32 *saddr)
+ {
++ struct ip_tunnel_dst *idst;
+ struct dst_entry *dst;
+
+ rcu_read_lock();
+- dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
++ idst = this_cpu_ptr(t->dst_cache);
++ dst = rcu_dereference(idst->dst);
+ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+ dst = NULL;
+ if (dst) {
+- if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
++ if (!dst->obsolete || dst->ops->check(dst, cookie)) {
++ *saddr = idst->saddr;
++ } else {
+ tunnel_dst_reset(t);
+ dst_release(dst);
+ dst = NULL;
+@@ -366,7 +373,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
+
+ if (!IS_ERR(rt)) {
+ tdev = rt->dst.dev;
+- tunnel_dst_set(tunnel, &rt->dst);
++ tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
+ ip_rt_put(rt);
+ }
+ if (dev->type != ARPHRD_ETHER)
+@@ -610,7 +617,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
+ tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
+
+- rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
++ rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
+
+ if (!rt) {
+ rt = ip_route_output_key(tunnel->net, &fl4);
+@@ -620,7 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ goto tx_error;
+ }
+ if (connected)
+- tunnel_dst_set(tunnel, &rt->dst);
++ tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
+ }
+
+ if (rt->dst.dev == dev) {
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index bcf206c79005..847e69cbff7e 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -74,7 +74,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
+ iph->daddr = dst;
+ iph->saddr = src;
+ iph->ttl = ttl;
+- __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
++ __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
+
+ err = ip_local_out_sk(sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index d84dc8d4c916..d11a50d24295 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+ iph->protocol = IPPROTO_IPIP;
+ iph->ihl = 5;
+ iph->tot_len = htons(skb->len);
+- ip_select_ident(skb, skb_dst(skb), NULL);
++ ip_select_ident(skb, NULL);
+ ip_send_check(iph);
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index a9dbe58bdfe7..2c65160565e1 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
+ iph->check = 0;
+ iph->tot_len = htons(length);
+ if (!iph->id)
+- ip_select_ident(skb, &rt->dst, NULL);
++ ip_select_ident(skb, NULL);
+
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index be9f2b1ac3ab..fd618d48f4ce 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -89,6 +89,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/times.h>
+ #include <linux/slab.h>
++#include <linux/jhash.h>
+ #include <net/dst.h>
+ #include <net/net_namespace.h>
+ #include <net/protocol.h>
+@@ -456,39 +457,45 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ return neigh_create(&arp_tbl, pkey, dev);
+ }
+
+-/*
+- * Peer allocation may fail only in serious out-of-memory conditions. However
+- * we still can generate some output.
+- * Random ID selection looks a bit dangerous because we have no chances to
+- * select ID being unique in a reasonable period of time.
+- * But broken packet identifier may be better than no packet at all.
++#define IP_IDENTS_SZ 2048u
++struct ip_ident_bucket {
++ atomic_t id;
++ u32 stamp32;
++};
++
++static struct ip_ident_bucket *ip_idents __read_mostly;
++
++/* In order to protect privacy, we add a perturbation to identifiers
++ * if one generator is seldom used. This makes hard for an attacker
++ * to infer how many packets were sent between two points in time.
+ */
+-static void ip_select_fb_ident(struct iphdr *iph)
++u32 ip_idents_reserve(u32 hash, int segs)
+ {
+- static DEFINE_SPINLOCK(ip_fb_id_lock);
+- static u32 ip_fallback_id;
+- u32 salt;
++ struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
++ u32 old = ACCESS_ONCE(bucket->stamp32);
++ u32 now = (u32)jiffies;
++ u32 delta = 0;
++
++ if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
++ delta = prandom_u32_max(now - old);
+
+- spin_lock_bh(&ip_fb_id_lock);
+- salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
+- iph->id = htons(salt & 0xFFFF);
+- ip_fallback_id = salt;
+- spin_unlock_bh(&ip_fb_id_lock);
++ return atomic_add_return(segs + delta, &bucket->id) - segs;
+ }
++EXPORT_SYMBOL(ip_idents_reserve);
+
+-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
++void __ip_select_ident(struct iphdr *iph, int segs)
+ {
+- struct net *net = dev_net(dst->dev);
+- struct inet_peer *peer;
++ static u32 ip_idents_hashrnd __read_mostly;
++ u32 hash, id;
+
+- peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
+- if (peer) {
+- iph->id = htons(inet_getid(peer, more));
+- inet_putpeer(peer);
+- return;
+- }
++ net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
+
+- ip_select_fb_ident(iph);
++ hash = jhash_3words((__force u32)iph->daddr,
++ (__force u32)iph->saddr,
++ iph->protocol,
++ ip_idents_hashrnd);
++ id = ip_idents_reserve(hash, segs);
++ iph->id = htons(id);
+ }
+ EXPORT_SYMBOL(__ip_select_ident);
+
+@@ -2705,6 +2712,12 @@ int __init ip_rt_init(void)
+ {
+ int rc = 0;
+
++ ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
++ if (!ip_idents)
++ panic("IP: failed to allocate ip_idents\n");
++
++ prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
++
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+ ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
+ if (!ip_rt_acct)
+diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
+index 48539fff6357..08c8ab490fe5 100644
+--- a/net/ipv4/tcp_vegas.c
++++ b/net/ipv4/tcp_vegas.c
+@@ -219,7 +219,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+ * This is:
+ * (actual rate in segments) * baseRTT
+ */
+- target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
++ target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
++ do_div(target_cwnd, rtt);
+
+ /* Calculate the difference between the window we had,
+ * and the window we would like to have. This quantity
+diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
+index 1b8e28fcd7e1..4cd4e1be3a71 100644
+--- a/net/ipv4/tcp_veno.c
++++ b/net/ipv4/tcp_veno.c
+@@ -145,7 +145,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+
+ rtt = veno->minrtt;
+
+- target_cwnd = (tp->snd_cwnd * veno->basertt);
++ target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
+ target_cwnd <<= V_PARAM_SHIFT;
+ do_div(target_cwnd, rtt);
+
+diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
+index 05f2b484954f..91771a7c802f 100644
+--- a/net/ipv4/xfrm4_mode_tunnel.c
++++ b/net/ipv4/xfrm4_mode_tunnel.c
+@@ -58,12 +58,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
+ 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
+- ip_select_ident(skb, dst->child, NULL);
+
+ top_iph->ttl = ip4_dst_hoplimit(dst->child);
+
+ top_iph->saddr = x->props.saddr.a4;
+ top_iph->daddr = x->id.daddr.a4;
++ ip_select_ident(skb, NULL);
+
+ return 0;
+ }
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index fbf11562b54c..1362d3a7b26f 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -537,6 +537,20 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
+ skb_copy_secmark(to, from);
+ }
+
++static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
++{
++ static u32 ip6_idents_hashrnd __read_mostly;
++ u32 hash, id;
++
++ net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
++
++ hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
++ hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
++
++ id = ip_idents_reserve(hash, 1);
++ fhdr->identification = htonl(id);
++}
++
+ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+ {
+ struct sk_buff *frag;
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 56596ce390a1..6179ac186ab9 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -8,31 +8,6 @@
+ #include <net/addrconf.h>
+ #include <net/secure_seq.h>
+
+-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+-{
+- static atomic_t ipv6_fragmentation_id;
+- struct in6_addr addr;
+- int ident;
+-
+-#if IS_ENABLED(CONFIG_IPV6)
+- struct inet_peer *peer;
+- struct net *net;
+-
+- net = dev_net(rt->dst.dev);
+- peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+- if (peer) {
+- fhdr->identification = htonl(inet_getid(peer, 0));
+- inet_putpeer(peer);
+- return;
+- }
+-#endif
+- ident = atomic_inc_return(&ipv6_fragmentation_id);
+-
+- addr = rt->rt6i_dst.addr;
+- addr.s6_addr32[0] ^= (__force __be32)ident;
+- fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));
+-}
+-EXPORT_SYMBOL(ipv6_select_ident);
+
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index c47444e4cf8c..7f0e1cf2d7e8 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ iph->daddr = cp->daddr.ip;
+ iph->saddr = saddr;
+ iph->ttl = old_iph->ttl;
+- ip_select_ident(skb, &rt->dst, NULL);
++ ip_select_ident(skb, NULL);
+
+ /* Another hack: avoid icmp_send in ip_fragment */
+ skb->local_df = 1;
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 0b999987b658..a6953b0436a5 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1151,6 +1151,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
+ asoc->c = new->c;
+ asoc->peer.rwnd = new->peer.rwnd;
+ asoc->peer.sack_needed = new->peer.sack_needed;
++ asoc->peer.auth_capable = new->peer.auth_capable;
+ asoc->peer.i = new->peer.i;
+ sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ asoc->peer.i.initial_tsn, GFP_ATOMIC);
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 0f4d15fc2627..8267b06c3646 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -599,7 +599,7 @@ out:
+ return err;
+ no_route:
+ kfree_skb(nskb);
+- IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+
+ /* FIXME: Returning the 'err' will effect all the associations
+ * associated with a socket, although only one of the paths of the
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index c08fbd11ceff..ed608432e4f9 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -2107,6 +2107,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ goto no_transform;
+ }
+
++ dst_hold(&xdst->u.dst);
++ xdst->u.dst.flags |= DST_NOCACHE;
+ route = xdst->route;
+ }
+ }
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 51398ae6cda8..d4c0fbe568ff 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
+ attrs[XFRMA_ALG_AEAD] ||
+ attrs[XFRMA_ALG_CRYPT] ||
+ attrs[XFRMA_ALG_COMP] ||
+- attrs[XFRMA_TFCPAD] ||
+- (ntohl(p->id.spi) >= 0x10000))
+-
++ attrs[XFRMA_TFCPAD])
+ goto out;
+ break;
+
+@@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
+ attrs[XFRMA_ALG_AUTH] ||
+ attrs[XFRMA_ALG_AUTH_TRUNC] ||
+ attrs[XFRMA_ALG_CRYPT] ||
+- attrs[XFRMA_TFCPAD])
++ attrs[XFRMA_TFCPAD] ||
++ (ntohl(p->id.spi) >= 0x10000))
+ goto out;
+ break;
+