mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-21 22:46:35 +00:00
tcp: fix recv with flags MSG_WAITALL | MSG_PEEK
Currently, tcp_recvmsg enters a busy loop in sk_wait_data if called with flags = MSG_WAITALL | MSG_PEEK. sk_wait_data waits for sk_receive_queue not empty, but in this case, the receive queue is not empty, but does not contain any skb that we can use. Add a "last skb seen on receive queue" argument to sk_wait_data, so that it sleeps until the receive queue has new skbs. Link: https://bugzilla.kernel.org/show_bug.cgi?id=99461 Link: https://sourceware.org/bugzilla/show_bug.cgi?id=18493 Link: https://bugzilla.redhat.com/show_bug.cgi?id=1205258 Reported-by: Enrico Scholz <rh-bugzilla@ensc.de> Reported-by: Dan Searle <dan@censornet.com> Signed-off-by: Sabrina Dubroca <sd@queasysnail.net> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
3d3af88592
commit
dfbafc9953
5 changed files with 14 additions and 10 deletions
|
@ -902,7 +902,7 @@ void sk_stream_kill_queues(struct sock *sk);
|
||||||
void sk_set_memalloc(struct sock *sk);
|
void sk_set_memalloc(struct sock *sk);
|
||||||
void sk_clear_memalloc(struct sock *sk);
|
void sk_clear_memalloc(struct sock *sk);
|
||||||
|
|
||||||
int sk_wait_data(struct sock *sk, long *timeo);
|
int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
|
||||||
|
|
||||||
struct request_sock_ops;
|
struct request_sock_ops;
|
||||||
struct timewait_sock_ops;
|
struct timewait_sock_ops;
|
||||||
|
|
|
@ -1967,20 +1967,21 @@ static void __release_sock(struct sock *sk)
|
||||||
* sk_wait_data - wait for data to arrive at sk_receive_queue
|
* sk_wait_data - wait for data to arrive at sk_receive_queue
|
||||||
* @sk: sock to wait on
|
* @sk: sock to wait on
|
||||||
* @timeo: for how long
|
* @timeo: for how long
|
||||||
|
* @skb: last skb seen on sk_receive_queue
|
||||||
*
|
*
|
||||||
* Now socket state including sk->sk_err is changed only under lock,
|
* Now socket state including sk->sk_err is changed only under lock,
|
||||||
* hence we may omit checks after joining wait queue.
|
* hence we may omit checks after joining wait queue.
|
||||||
* We check receive queue before schedule() only as optimization;
|
* We check receive queue before schedule() only as optimization;
|
||||||
* it is very likely that release_sock() added new data.
|
* it is very likely that release_sock() added new data.
|
||||||
*/
|
*/
|
||||||
int sk_wait_data(struct sock *sk, long *timeo)
|
int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
DEFINE_WAIT(wait);
|
DEFINE_WAIT(wait);
|
||||||
|
|
||||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||||
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||||
rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
|
rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
|
||||||
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||||
finish_wait(sk_sleep(sk), &wait);
|
finish_wait(sk_sleep(sk), &wait);
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -886,7 +886,7 @@ verify_sock_status:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
sk_wait_data(sk, &timeo);
|
sk_wait_data(sk, &timeo, NULL);
|
||||||
continue;
|
continue;
|
||||||
found_ok_skb:
|
found_ok_skb:
|
||||||
if (len > skb->len)
|
if (len > skb->len)
|
||||||
|
|
|
@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sk_wait_data(sk, &timeo);
|
sk_wait_data(sk, &timeo, NULL);
|
||||||
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
||||||
ret = sock_intr_errno(timeo);
|
ret = sock_intr_errno(timeo);
|
||||||
break;
|
break;
|
||||||
|
@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
||||||
int target; /* Read at least this many bytes */
|
int target; /* Read at least this many bytes */
|
||||||
long timeo;
|
long timeo;
|
||||||
struct task_struct *user_recv = NULL;
|
struct task_struct *user_recv = NULL;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb, *last;
|
||||||
u32 urg_hole = 0;
|
u32 urg_hole = 0;
|
||||||
|
|
||||||
if (unlikely(flags & MSG_ERRQUEUE))
|
if (unlikely(flags & MSG_ERRQUEUE))
|
||||||
|
@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
||||||
|
|
||||||
/* Next get a buffer. */
|
/* Next get a buffer. */
|
||||||
|
|
||||||
|
last = skb_peek_tail(&sk->sk_receive_queue);
|
||||||
skb_queue_walk(&sk->sk_receive_queue, skb) {
|
skb_queue_walk(&sk->sk_receive_queue, skb) {
|
||||||
|
last = skb;
|
||||||
/* Now that we have two receive queues this
|
/* Now that we have two receive queues this
|
||||||
* shouldn't happen.
|
* shouldn't happen.
|
||||||
*/
|
*/
|
||||||
|
@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
||||||
/* Do not sleep, just process backlog. */
|
/* Do not sleep, just process backlog. */
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
} else
|
} else {
|
||||||
sk_wait_data(sk, &timeo);
|
sk_wait_data(sk, &timeo, last);
|
||||||
|
}
|
||||||
|
|
||||||
if (user_recv) {
|
if (user_recv) {
|
||||||
int chunk;
|
int chunk;
|
||||||
|
|
|
@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
break;
|
break;
|
||||||
rc = 0;
|
rc = 0;
|
||||||
if (sk_wait_data(sk, &timeo))
|
if (sk_wait_data(sk, &timeo, NULL))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
} else
|
} else
|
||||||
sk_wait_data(sk, &timeo);
|
sk_wait_data(sk, &timeo, NULL);
|
||||||
|
|
||||||
if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
|
if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
|
||||||
net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
|
net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
|
||||||
|
|
Loading…
Add table
Reference in a new issue