mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-11 17:04:35 +00:00
tcp: add a spinlock to protect struct request_sock_queue
struct request_sock_queue fields are currently protected by the listener 'lock' (not a real spinlock) We need to add a private spinlock instead, so that softirq handlers creating children do not have to worry with backlog notion that the listener 'lock' carries. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
f6d3125fa3
commit
fff1f3001c
3 changed files with 26 additions and 33 deletions
|
@ -176,9 +176,11 @@ struct fastopen_queue {
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
struct request_sock_queue {
|
struct request_sock_queue {
|
||||||
|
spinlock_t rskq_lock;
|
||||||
|
u8 rskq_defer_accept;
|
||||||
|
|
||||||
struct request_sock *rskq_accept_head;
|
struct request_sock *rskq_accept_head;
|
||||||
struct request_sock *rskq_accept_tail;
|
struct request_sock *rskq_accept_tail;
|
||||||
u8 rskq_defer_accept;
|
|
||||||
struct listen_sock *listen_opt;
|
struct listen_sock *listen_opt;
|
||||||
struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
|
struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
|
||||||
* if TFO is enabled.
|
* if TFO is enabled.
|
||||||
|
@ -196,16 +198,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue);
|
||||||
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
|
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
|
||||||
bool reset);
|
bool reset);
|
||||||
|
|
||||||
static inline struct request_sock *
|
static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
|
||||||
reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
|
|
||||||
{
|
|
||||||
struct request_sock *req = queue->rskq_accept_head;
|
|
||||||
|
|
||||||
queue->rskq_accept_head = NULL;
|
|
||||||
return req;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int reqsk_queue_empty(struct request_sock_queue *queue)
|
|
||||||
{
|
{
|
||||||
return queue->rskq_accept_head == NULL;
|
return queue->rskq_accept_head == NULL;
|
||||||
}
|
}
|
||||||
|
@ -215,6 +208,7 @@ static inline void reqsk_queue_add(struct request_sock_queue *queue,
|
||||||
struct sock *parent,
|
struct sock *parent,
|
||||||
struct sock *child)
|
struct sock *child)
|
||||||
{
|
{
|
||||||
|
spin_lock(&queue->rskq_lock);
|
||||||
req->sk = child;
|
req->sk = child;
|
||||||
sk_acceptq_added(parent);
|
sk_acceptq_added(parent);
|
||||||
|
|
||||||
|
@ -225,18 +219,23 @@ static inline void reqsk_queue_add(struct request_sock_queue *queue,
|
||||||
|
|
||||||
queue->rskq_accept_tail = req;
|
queue->rskq_accept_tail = req;
|
||||||
req->dl_next = NULL;
|
req->dl_next = NULL;
|
||||||
|
spin_unlock(&queue->rskq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
|
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
|
||||||
|
struct sock *parent)
|
||||||
{
|
{
|
||||||
struct request_sock *req = queue->rskq_accept_head;
|
struct request_sock *req;
|
||||||
|
|
||||||
WARN_ON(req == NULL);
|
|
||||||
|
|
||||||
queue->rskq_accept_head = req->dl_next;
|
|
||||||
if (queue->rskq_accept_head == NULL)
|
|
||||||
queue->rskq_accept_tail = NULL;
|
|
||||||
|
|
||||||
|
spin_lock_bh(&queue->rskq_lock);
|
||||||
|
req = queue->rskq_accept_head;
|
||||||
|
if (req) {
|
||||||
|
sk_acceptq_removed(parent);
|
||||||
|
queue->rskq_accept_head = req->dl_next;
|
||||||
|
if (queue->rskq_accept_head == NULL)
|
||||||
|
queue->rskq_accept_tail = NULL;
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&queue->rskq_lock);
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,6 +58,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
|
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
|
||||||
|
spin_lock_init(&queue->rskq_lock);
|
||||||
spin_lock_init(&queue->syn_wait_lock);
|
spin_lock_init(&queue->syn_wait_lock);
|
||||||
|
|
||||||
spin_lock_init(&queue->fastopenq.lock);
|
spin_lock_init(&queue->fastopenq.lock);
|
||||||
|
|
|
@ -330,10 +330,9 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
|
||||||
if (error)
|
if (error)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
req = reqsk_queue_remove(queue);
|
req = reqsk_queue_remove(queue, sk);
|
||||||
newsk = req->sk;
|
newsk = req->sk;
|
||||||
|
|
||||||
sk_acceptq_removed(sk);
|
|
||||||
if (sk->sk_protocol == IPPROTO_TCP &&
|
if (sk->sk_protocol == IPPROTO_TCP &&
|
||||||
tcp_rsk(req)->tfo_listener) {
|
tcp_rsk(req)->tfo_listener) {
|
||||||
spin_lock_bh(&queue->fastopenq.lock);
|
spin_lock_bh(&queue->fastopenq.lock);
|
||||||
|
@ -832,11 +831,7 @@ void inet_csk_listen_stop(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
||||||
struct request_sock *acc_req;
|
struct request_sock *next, *req;
|
||||||
struct request_sock *req;
|
|
||||||
|
|
||||||
/* make all the listen_opt local to us */
|
|
||||||
acc_req = reqsk_queue_yank_acceptq(queue);
|
|
||||||
|
|
||||||
/* Following specs, it would be better either to send FIN
|
/* Following specs, it would be better either to send FIN
|
||||||
* (and enter FIN-WAIT-1, it is normal close)
|
* (and enter FIN-WAIT-1, it is normal close)
|
||||||
|
@ -848,11 +843,9 @@ void inet_csk_listen_stop(struct sock *sk)
|
||||||
*/
|
*/
|
||||||
reqsk_queue_destroy(queue);
|
reqsk_queue_destroy(queue);
|
||||||
|
|
||||||
while ((req = acc_req) != NULL) {
|
while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
|
||||||
struct sock *child = req->sk;
|
struct sock *child = req->sk;
|
||||||
|
|
||||||
acc_req = req->dl_next;
|
|
||||||
|
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
bh_lock_sock(child);
|
bh_lock_sock(child);
|
||||||
WARN_ON(sock_owned_by_user(child));
|
WARN_ON(sock_owned_by_user(child));
|
||||||
|
@ -882,18 +875,18 @@ void inet_csk_listen_stop(struct sock *sk)
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
sock_put(child);
|
sock_put(child);
|
||||||
|
|
||||||
sk_acceptq_removed(sk);
|
|
||||||
reqsk_put(req);
|
reqsk_put(req);
|
||||||
}
|
}
|
||||||
if (queue->fastopenq.rskq_rst_head) {
|
if (queue->fastopenq.rskq_rst_head) {
|
||||||
/* Free all the reqs queued in rskq_rst_head. */
|
/* Free all the reqs queued in rskq_rst_head. */
|
||||||
spin_lock_bh(&queue->fastopenq.lock);
|
spin_lock_bh(&queue->fastopenq.lock);
|
||||||
acc_req = queue->fastopenq.rskq_rst_head;
|
req = queue->fastopenq.rskq_rst_head;
|
||||||
queue->fastopenq.rskq_rst_head = NULL;
|
queue->fastopenq.rskq_rst_head = NULL;
|
||||||
spin_unlock_bh(&queue->fastopenq.lock);
|
spin_unlock_bh(&queue->fastopenq.lock);
|
||||||
while ((req = acc_req) != NULL) {
|
while (req != NULL) {
|
||||||
acc_req = req->dl_next;
|
next = req->dl_next;
|
||||||
reqsk_put(req);
|
reqsk_put(req);
|
||||||
|
req = next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
WARN_ON(sk->sk_ack_backlog);
|
WARN_ON(sk->sk_ack_backlog);
|
||||||
|
|
Loading…
Add table
Reference in a new issue