mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-20 21:33:58 +00:00
mptcp: avoid flipping mp_capable field in syn_recv_sock()
If multiple CPUs races on the same req_sock in syn_recv_sock(),
flipping such field can cause inconsistent child socket status.
When racing, the CPU losing the req ownership may still change
the mptcp request socket mp_capable flag while the CPU owning
the request is cloning the socket, leaving the child socket with
'is_mptcp' set but no 'mp_capable' flag.
Such socket will stay with 'conn' field cleared, heading to oops
in later mptcp callback.
Address the issue tracking the fallback status in a local variable.
Fixes: 58b0991962
("mptcp: create msk early")
Co-developed-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5e20087d1b
commit
4c8941de78
1 changed files with 30 additions and 16 deletions
|
@ -376,6 +376,17 @@ static void mptcp_force_close(struct sock *sk)
|
||||||
sk_common_release(sk);
|
sk_common_release(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void subflow_ulp_fallback(struct sock *sk,
|
||||||
|
struct mptcp_subflow_context *old_ctx)
|
||||||
|
{
|
||||||
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
|
|
||||||
|
mptcp_subflow_tcp_fallback(sk, old_ctx);
|
||||||
|
icsk->icsk_ulp_ops = NULL;
|
||||||
|
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
|
||||||
|
tcp_sk(sk)->is_mptcp = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct request_sock *req,
|
struct request_sock *req,
|
||||||
|
@ -388,6 +399,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
||||||
struct tcp_options_received opt_rx;
|
struct tcp_options_received opt_rx;
|
||||||
bool fallback_is_fatal = false;
|
bool fallback_is_fatal = false;
|
||||||
struct sock *new_msk = NULL;
|
struct sock *new_msk = NULL;
|
||||||
|
bool fallback = false;
|
||||||
struct sock *child;
|
struct sock *child;
|
||||||
|
|
||||||
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
|
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
|
||||||
|
@ -412,14 +424,14 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
||||||
subflow_req->remote_key = opt_rx.mptcp.sndr_key;
|
subflow_req->remote_key = opt_rx.mptcp.sndr_key;
|
||||||
subflow_req->remote_key_valid = 1;
|
subflow_req->remote_key_valid = 1;
|
||||||
} else {
|
} else {
|
||||||
subflow_req->mp_capable = 0;
|
fallback = true;
|
||||||
goto create_child;
|
goto create_child;
|
||||||
}
|
}
|
||||||
|
|
||||||
create_msk:
|
create_msk:
|
||||||
new_msk = mptcp_sk_clone(listener->conn, req);
|
new_msk = mptcp_sk_clone(listener->conn, req);
|
||||||
if (!new_msk)
|
if (!new_msk)
|
||||||
subflow_req->mp_capable = 0;
|
fallback = true;
|
||||||
} else if (subflow_req->mp_join) {
|
} else if (subflow_req->mp_join) {
|
||||||
fallback_is_fatal = true;
|
fallback_is_fatal = true;
|
||||||
opt_rx.mptcp.mp_join = 0;
|
opt_rx.mptcp.mp_join = 0;
|
||||||
|
@ -438,12 +450,18 @@ create_child:
|
||||||
if (child && *own_req) {
|
if (child && *own_req) {
|
||||||
struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
|
struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
|
||||||
|
|
||||||
/* we have null ctx on TCP fallback, which is fatal on
|
/* we need to fallback on ctx allocation failure and on pre-reqs
|
||||||
* MPJ handshake
|
* checking above. In the latter scenario we additionally need
|
||||||
|
* to reset the context to non MPTCP status.
|
||||||
*/
|
*/
|
||||||
if (!ctx) {
|
if (!ctx || fallback) {
|
||||||
if (fallback_is_fatal)
|
if (fallback_is_fatal)
|
||||||
goto close_child;
|
goto close_child;
|
||||||
|
|
||||||
|
if (ctx) {
|
||||||
|
subflow_ulp_fallback(child, ctx);
|
||||||
|
kfree_rcu(ctx, rcu);
|
||||||
|
}
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -474,6 +492,13 @@ out:
|
||||||
/* dispose of the left over mptcp master, if any */
|
/* dispose of the left over mptcp master, if any */
|
||||||
if (unlikely(new_msk))
|
if (unlikely(new_msk))
|
||||||
mptcp_force_close(new_msk);
|
mptcp_force_close(new_msk);
|
||||||
|
|
||||||
|
/* check for expected invariant - should never trigger, just help
|
||||||
|
* catching eariler subtle bugs
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(*own_req && child && tcp_sk(child)->is_mptcp &&
|
||||||
|
(!mptcp_subflow_ctx(child) ||
|
||||||
|
!mptcp_subflow_ctx(child)->conn));
|
||||||
return child;
|
return child;
|
||||||
|
|
||||||
close_child:
|
close_child:
|
||||||
|
@ -1076,17 +1101,6 @@ static void subflow_ulp_release(struct sock *sk)
|
||||||
kfree_rcu(ctx, rcu);
|
kfree_rcu(ctx, rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void subflow_ulp_fallback(struct sock *sk,
|
|
||||||
struct mptcp_subflow_context *old_ctx)
|
|
||||||
{
|
|
||||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
||||||
|
|
||||||
mptcp_subflow_tcp_fallback(sk, old_ctx);
|
|
||||||
icsk->icsk_ulp_ops = NULL;
|
|
||||||
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
|
|
||||||
tcp_sk(sk)->is_mptcp = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void subflow_ulp_clone(const struct request_sock *req,
|
static void subflow_ulp_clone(const struct request_sock *req,
|
||||||
struct sock *newsk,
|
struct sock *newsk,
|
||||||
const gfp_t priority)
|
const gfp_t priority)
|
||||||
|
|
Loading…
Add table
Reference in a new issue