mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-04-02 20:33:54 +00:00
caif: fix leaks and race in caif_queue_rcv_skb()
1) If sk_filter() is applied, skb was leaked (not freed) 2) Testing SOCK_DEAD twice is racy : packet could be freed while already queued. 3) Remove obsolete comment about caching skb->len Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e3426ca7bc
commit
b8a23e8d8e
1 changed files with 8 additions and 11 deletions
|
@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
|
||||||
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
|
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
|
||||||
* not dropped, but CAIF is sending flow off instead.
|
* not dropped, but CAIF is sending flow off instead.
|
||||||
*/
|
*/
|
||||||
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||||
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
||||||
|
bool queued = false;
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
||||||
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
|
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
|
||||||
|
@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
||||||
err = sk_filter(sk, skb);
|
err = sk_filter(sk, skb);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto out;
|
||||||
|
|
||||||
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
|
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
|
||||||
set_rx_flow_off(cf_sk);
|
set_rx_flow_off(cf_sk);
|
||||||
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
|
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
|
||||||
|
@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
skb->dev = NULL;
|
skb->dev = NULL;
|
||||||
skb_set_owner_r(skb, sk);
|
skb_set_owner_r(skb, sk);
|
||||||
/* Cache the SKB length before we tack it onto the receive
|
|
||||||
* queue. Once it is added it no longer belongs to us and
|
|
||||||
* may be freed by other threads of control pulling packets
|
|
||||||
* from the queue.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&list->lock, flags);
|
spin_lock_irqsave(&list->lock, flags);
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
queued = !sock_flag(sk, SOCK_DEAD);
|
||||||
|
if (queued)
|
||||||
__skb_queue_tail(list, skb);
|
__skb_queue_tail(list, skb);
|
||||||
spin_unlock_irqrestore(&list->lock, flags);
|
spin_unlock_irqrestore(&list->lock, flags);
|
||||||
|
out:
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (queued)
|
||||||
sk->sk_data_ready(sk);
|
sk->sk_data_ready(sk);
|
||||||
else
|
else
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Packet Receive Callback function called from CAIF Stack */
|
/* Packet Receive Callback function called from CAIF Stack */
|
||||||
|
|
Loading…
Add table
Reference in a new issue