mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-25 08:11:45 +00:00
330 lines
11 KiB
Diff
330 lines
11 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index d049e53a6960..52f2dd8dcebd 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 4
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 81
|
|
+SUBLEVEL = 82
|
|
EXTRAVERSION =
|
|
NAME = Blurry Fish Butt
|
|
|
|
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
|
|
index 1f1ff7e7b9cf..ba079e279b58 100644
|
|
--- a/arch/arm/kvm/mmu.c
|
|
+++ b/arch/arm/kvm/mmu.c
|
|
@@ -1629,12 +1629,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
|
|
|
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
|
{
|
|
+ if (!kvm->arch.pgd)
|
|
+ return 0;
|
|
trace_kvm_age_hva(start, end);
|
|
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
|
|
}
|
|
|
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
|
{
|
|
+ if (!kvm->arch.pgd)
|
|
+ return 0;
|
|
trace_kvm_test_age_hva(hva);
|
|
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
|
|
}
|
|
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
|
|
index 0e2919dd8df3..1395eeb6005f 100644
|
|
--- a/arch/s390/net/bpf_jit_comp.c
|
|
+++ b/arch/s390/net/bpf_jit_comp.c
|
|
@@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
|
insn_count = bpf_jit_insn(jit, fp, i);
|
|
if (insn_count < 0)
|
|
return -1;
|
|
- jit->addrs[i + 1] = jit->prg; /* Next instruction address */
|
|
+ /* Next instruction address */
|
|
+ jit->addrs[i + insn_count] = jit->prg;
|
|
}
|
|
bpf_jit_epilogue(jit);
|
|
|
|
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
|
|
index 349dd23e2876..0cdeb2b483a0 100644
|
|
--- a/arch/sparc/include/asm/mmu_context_64.h
|
|
+++ b/arch/sparc/include/asm/mmu_context_64.h
|
|
@@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm);
|
|
void __tsb_context_switch(unsigned long pgd_pa,
|
|
struct tsb_config *tsb_base,
|
|
struct tsb_config *tsb_huge,
|
|
- unsigned long tsb_descr_pa);
|
|
+ unsigned long tsb_descr_pa,
|
|
+ unsigned long secondary_ctx);
|
|
|
|
-static inline void tsb_context_switch(struct mm_struct *mm)
|
|
+static inline void tsb_context_switch_ctx(struct mm_struct *mm,
|
|
+ unsigned long ctx)
|
|
{
|
|
__tsb_context_switch(__pa(mm->pgd),
|
|
&mm->context.tsb_block[0],
|
|
@@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
|
|
#else
|
|
NULL
|
|
#endif
|
|
- , __pa(&mm->context.tsb_descr[0]));
|
|
+ , __pa(&mm->context.tsb_descr[0]),
|
|
+ ctx);
|
|
}
|
|
|
|
+#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
|
|
+
|
|
void tsb_grow(struct mm_struct *mm,
|
|
unsigned long tsb_index,
|
|
unsigned long mm_rss);
|
|
@@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
|
* cpu0 to update it's TSB because at that point the cpu_vm_mask
|
|
* only had cpu1 set in it.
|
|
*/
|
|
- load_secondary_context(mm);
|
|
- tsb_context_switch(mm);
|
|
+ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
|
|
|
|
/* Any time a processor runs a context on an address space
|
|
* for the first time, we must flush that context out of the
|
|
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
|
|
index 395ec1800530..7d961f6e3907 100644
|
|
--- a/arch/sparc/kernel/tsb.S
|
|
+++ b/arch/sparc/kernel/tsb.S
|
|
@@ -375,6 +375,7 @@ tsb_flush:
|
|
* %o1: TSB base config pointer
|
|
* %o2: TSB huge config pointer, or NULL if none
|
|
* %o3: Hypervisor TSB descriptor physical address
|
|
+ * %o4: Secondary context to load, if non-zero
|
|
*
|
|
* We have to run this whole thing with interrupts
|
|
* disabled so that the current cpu doesn't change
|
|
@@ -387,6 +388,17 @@ __tsb_context_switch:
|
|
rdpr %pstate, %g1
|
|
wrpr %g1, PSTATE_IE, %pstate
|
|
|
|
+ brz,pn %o4, 1f
|
|
+ mov SECONDARY_CONTEXT, %o5
|
|
+
|
|
+661: stxa %o4, [%o5] ASI_DMMU
|
|
+ .section .sun4v_1insn_patch, "ax"
|
|
+ .word 661b
|
|
+ stxa %o4, [%o5] ASI_MMU
|
|
+ .previous
|
|
+ flush %g6
|
|
+
|
|
+1:
|
|
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
|
|
|
|
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
|
|
diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
|
|
index 17bd2e167e07..df707a8ad311 100644
|
|
--- a/arch/sparc/power/hibernate.c
|
|
+++ b/arch/sparc/power/hibernate.c
|
|
@@ -35,6 +35,5 @@ void restore_processor_state(void)
|
|
{
|
|
struct mm_struct *mm = current->active_mm;
|
|
|
|
- load_secondary_context(mm);
|
|
- tsb_context_switch(mm);
|
|
+ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
|
|
}
|
|
diff --git a/mm/mempool.c b/mm/mempool.c
|
|
index 004d42b1dfaf..7924f4f58a6d 100644
|
|
--- a/mm/mempool.c
|
|
+++ b/mm/mempool.c
|
|
@@ -135,8 +135,8 @@ static void *remove_element(mempool_t *pool)
|
|
void *element = pool->elements[--pool->curr_nr];
|
|
|
|
BUG_ON(pool->curr_nr < 0);
|
|
- check_element(pool, element);
|
|
kasan_unpoison_element(pool, element);
|
|
+ check_element(pool, element);
|
|
return element;
|
|
}
|
|
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 4b0853194a03..24d243084aab 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -2551,7 +2551,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
|
|
{
|
|
if (tx_path)
|
|
return skb->ip_summed != CHECKSUM_PARTIAL &&
|
|
- skb->ip_summed != CHECKSUM_NONE;
|
|
+ skb->ip_summed != CHECKSUM_UNNECESSARY;
|
|
|
|
return skb->ip_summed == CHECKSUM_NONE;
|
|
}
|
|
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
|
|
index 5d58a6703a43..09c73dd541c5 100644
|
|
--- a/net/ipv4/ip_output.c
|
|
+++ b/net/ipv4/ip_output.c
|
|
@@ -922,11 +922,12 @@ static int __ip_append_data(struct sock *sk,
|
|
csummode = CHECKSUM_PARTIAL;
|
|
|
|
cork->length += length;
|
|
- if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
|
|
- (skb && skb_is_gso(skb))) &&
|
|
+ if ((skb && skb_is_gso(skb)) ||
|
|
+ (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
|
|
+ (skb_queue_len(queue) <= 1) &&
|
|
(sk->sk_protocol == IPPROTO_UDP) &&
|
|
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
|
- (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
|
|
+ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
|
|
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
|
|
hh_len, fragheaderlen, transhdrlen,
|
|
maxfraglen, flags);
|
|
@@ -1242,6 +1243,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
|
|
return -EINVAL;
|
|
|
|
if ((size + skb->len > mtu) &&
|
|
+ (skb_queue_len(&sk->sk_write_queue) == 1) &&
|
|
(sk->sk_protocol == IPPROTO_UDP) &&
|
|
(rt->dst.dev->features & NETIF_F_UFO)) {
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index 8f13b2eaabf8..f0dabd125c43 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -2503,8 +2503,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
|
|
- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
|
|
- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
|
|
+ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
|
|
+ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
|
|
tp->snd_cwnd = tp->snd_ssthresh;
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
}
|
|
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
|
|
index 3fdcdc730f71..850d1b5bfd81 100644
|
|
--- a/net/ipv4/tcp_output.c
|
|
+++ b/net/ipv4/tcp_output.c
|
|
@@ -3256,6 +3256,9 @@ int tcp_connect(struct sock *sk)
|
|
struct sk_buff *buff;
|
|
int err;
|
|
|
|
+ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
|
|
+ return -EHOSTUNREACH; /* Routing failure or similar. */
|
|
+
|
|
tcp_connect_init(sk);
|
|
|
|
if (unlikely(tp->repair)) {
|
|
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
|
|
index ebb34d0c5e80..1ec12a4f327e 100644
|
|
--- a/net/ipv4/tcp_timer.c
|
|
+++ b/net/ipv4/tcp_timer.c
|
|
@@ -606,7 +606,8 @@ static void tcp_keepalive_timer (unsigned long data)
|
|
goto death;
|
|
}
|
|
|
|
- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
|
|
+ if (!sock_flag(sk, SOCK_KEEPOPEN) ||
|
|
+ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
|
|
goto out;
|
|
|
|
elapsed = keepalive_time_when(tp);
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index e9513e397c4f..301e60829c7e 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -819,7 +819,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
|
|
if (is_udplite) /* UDP-Lite */
|
|
csum = udplite_csum(skb);
|
|
|
|
- else if (sk->sk_no_check_tx) { /* UDP csum disabled */
|
|
+ else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
goto send;
|
|
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
|
|
index 6396f1c80ae9..6dfc3daf7c21 100644
|
|
--- a/net/ipv4/udp_offload.c
|
|
+++ b/net/ipv4/udp_offload.c
|
|
@@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
|
|
if (uh->check == 0)
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
- skb->ip_summed = CHECKSUM_NONE;
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
/* Fragment the skb. IP headers of the fragments are updated in
|
|
* inet_gso_segment()
|
|
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
|
|
index 0de3245ea42f..e22339fad10b 100644
|
|
--- a/net/ipv6/ip6_output.c
|
|
+++ b/net/ipv6/ip6_output.c
|
|
@@ -1357,11 +1357,12 @@ emsgsize:
|
|
*/
|
|
|
|
cork->length += length;
|
|
- if ((((length + (skb ? skb->len : headersize)) > mtu) ||
|
|
- (skb && skb_is_gso(skb))) &&
|
|
+ if ((skb && skb_is_gso(skb)) ||
|
|
+ (((length + (skb ? skb->len : headersize)) > mtu) &&
|
|
+ (skb_queue_len(queue) <= 1) &&
|
|
(sk->sk_protocol == IPPROTO_UDP) &&
|
|
(rt->dst.dev->features & NETIF_F_UFO) &&
|
|
- (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
|
|
+ (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
|
|
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
|
|
hh_len, fragheaderlen, exthdrlen,
|
|
transhdrlen, mtu, flags, fl6);
|
|
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
|
|
index 01582966ffa0..2e3c12eeca07 100644
|
|
--- a/net/ipv6/udp_offload.c
|
|
+++ b/net/ipv6/udp_offload.c
|
|
@@ -86,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
|
|
if (uh->check == 0)
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
- skb->ip_summed = CHECKSUM_NONE;
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
/* Check if there is enough headroom to insert fragment header. */
|
|
tnl_hlen = skb_tnl_header_len(skb);
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index 061771ca2582..148ec130d99d 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -3622,14 +3622,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
|
|
|
|
if (optlen != sizeof(val))
|
|
return -EINVAL;
|
|
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
|
|
- return -EBUSY;
|
|
if (copy_from_user(&val, optval, sizeof(val)))
|
|
return -EFAULT;
|
|
if (val > INT_MAX)
|
|
return -EINVAL;
|
|
- po->tp_reserve = val;
|
|
- return 0;
|
|
+ lock_sock(sk);
|
|
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
|
|
+ ret = -EBUSY;
|
|
+ } else {
|
|
+ po->tp_reserve = val;
|
|
+ ret = 0;
|
|
+ }
|
|
+ release_sock(sk);
|
|
+ return ret;
|
|
}
|
|
case PACKET_LOSS:
|
|
{
|
|
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
|
|
index d05869646515..0915d448ba23 100644
|
|
--- a/net/sched/act_ipt.c
|
|
+++ b/net/sched/act_ipt.c
|
|
@@ -42,8 +42,8 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int
|
|
return PTR_ERR(target);
|
|
|
|
t->u.kernel.target = target;
|
|
+ memset(&par, 0, sizeof(par));
|
|
par.table = table;
|
|
- par.entryinfo = NULL;
|
|
par.target = target;
|
|
par.targinfo = t->data;
|
|
par.hook_mask = hook;
|