net: reduce indentation level in sk_clone_lock()

Rework initial test to jump over init code
if memory allocation has failed.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20210127152731.748663-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet 2021-01-27 07:27:31 -08:00 committed by Jakub Kicinski
parent d1f3bdd4ea
commit bbc20b7042

View file

@ -1876,123 +1876,120 @@ static void sk_init_common(struct sock *sk)
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
{ {
struct proto *prot = READ_ONCE(sk->sk_prot); struct proto *prot = READ_ONCE(sk->sk_prot);
struct sock *newsk; struct sk_filter *filter;
bool is_charged = true; bool is_charged = true;
struct sock *newsk;
newsk = sk_prot_alloc(prot, priority, sk->sk_family); newsk = sk_prot_alloc(prot, priority, sk->sk_family);
if (newsk != NULL) { if (!newsk)
struct sk_filter *filter; goto out;
sock_copy(newsk, sk); sock_copy(newsk, sk);
newsk->sk_prot_creator = prot; newsk->sk_prot_creator = prot;
/* SANITY */ /* SANITY */
if (likely(newsk->sk_net_refcnt)) if (likely(newsk->sk_net_refcnt))
get_net(sock_net(newsk)); get_net(sock_net(newsk));
sk_node_init(&newsk->sk_node); sk_node_init(&newsk->sk_node);
sock_lock_init(newsk); sock_lock_init(newsk);
bh_lock_sock(newsk); bh_lock_sock(newsk);
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_backlog.len = 0; newsk->sk_backlog.len = 0;
atomic_set(&newsk->sk_rmem_alloc, 0); atomic_set(&newsk->sk_rmem_alloc, 0);
/*
* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
refcount_set(&newsk->sk_wmem_alloc, 1);
atomic_set(&newsk->sk_omem_alloc, 0);
sk_init_common(newsk);
newsk->sk_dst_cache = NULL;
newsk->sk_dst_pending_confirm = 0;
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE);
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
cgroup_sk_clone(&newsk->sk_cgrp_data);
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
/* though it's an empty new sock, the charging may fail
* if sysctl_optmem_max was changed between creation of
* original socket and cloning
*/ */
refcount_set(&newsk->sk_wmem_alloc, 1); is_charged = sk_filter_charge(newsk, filter);
atomic_set(&newsk->sk_omem_alloc, 0); RCU_INIT_POINTER(newsk->sk_filter, filter);
sk_init_common(newsk); rcu_read_unlock();
newsk->sk_dst_cache = NULL; if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
newsk->sk_dst_pending_confirm = 0; /* We need to make sure that we don't uncharge the new
newsk->sk_wmem_queued = 0; * socket if we couldn't charge it in the first place
newsk->sk_forward_alloc = 0; * as otherwise we uncharge the parent's filter.
atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE);
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
cgroup_sk_clone(&newsk->sk_cgrp_data);
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
/* though it's an empty new sock, the charging may fail
* if sysctl_optmem_max was changed between creation of
* original socket and cloning
*/
is_charged = sk_filter_charge(newsk, filter);
RCU_INIT_POINTER(newsk->sk_filter, filter);
rcu_read_unlock();
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
/* We need to make sure that we don't uncharge the new
* socket if we couldn't charge it in the first place
* as otherwise we uncharge the parent's filter.
*/
if (!is_charged)
RCU_INIT_POINTER(newsk->sk_filter, NULL);
sk_free_unlock_clone(newsk);
newsk = NULL;
goto out;
}
RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
if (bpf_sk_storage_clone(sk, newsk)) {
sk_free_unlock_clone(newsk);
newsk = NULL;
goto out;
}
/* Clear sk_user_data if parent had the pointer tagged
* as not suitable for copying when cloning.
*/ */
if (sk_user_data_is_nocopy(newsk)) if (!is_charged)
newsk->sk_user_data = NULL; RCU_INIT_POINTER(newsk->sk_filter, NULL);
sk_free_unlock_clone(newsk);
newsk->sk_err = 0; newsk = NULL;
newsk->sk_err_soft = 0; goto out;
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
if (likely(newsk->sk_net_refcnt))
sock_inuse_add(sock_net(newsk), 1);
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.rst for details)
*/
smp_wmb();
refcount_set(&newsk->sk_refcnt, 2);
/*
* Increment the counter in the same struct proto as the master
* sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
* is the same as sk->sk_prot->socks, as this field was copied
* with memcpy).
*
* This _changes_ the previous behaviour, where
* tcp_create_openreq_child always was incrementing the
* equivalent to tcp_prot->socks (inet_sock_nr), so this have
* to be taken into account in all callers. -acme
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);
if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk);
if (sock_needs_netstamp(sk) &&
newsk->sk_flags & SK_FLAGS_TIMESTAMP)
net_enable_timestamp();
} }
RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
if (bpf_sk_storage_clone(sk, newsk)) {
sk_free_unlock_clone(newsk);
newsk = NULL;
goto out;
}
/* Clear sk_user_data if parent had the pointer tagged
* as not suitable for copying when cloning.
*/
if (sk_user_data_is_nocopy(newsk))
newsk->sk_user_data = NULL;
newsk->sk_err = 0;
newsk->sk_err_soft = 0;
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
if (likely(newsk->sk_net_refcnt))
sock_inuse_add(sock_net(newsk), 1);
/* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.rst for details)
*/
smp_wmb();
refcount_set(&newsk->sk_refcnt, 2);
/* Increment the counter in the same struct proto as the master
* sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
* is the same as sk->sk_prot->socks, as this field was copied
* with memcpy).
*
* This _changes_ the previous behaviour, where
* tcp_create_openreq_child always was incrementing the
* equivalent to tcp_prot->socks (inet_sock_nr), so this have
* to be taken into account in all callers. -acme
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);
if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk);
if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
net_enable_timestamp();
out: out:
return newsk; return newsk;
} }