[NET]: Transform skb_queue_len() binary tests into skb_queue_empty()

This is part of the grand scheme to eliminate the qlen
member of skb_queue_head, and subsequently remove the
'list' member of sk_buff.

Most users of skb_queue_len() want to know if the queue is
empty or not, and that's trivially done with skb_queue_empty()
which doesn't use the skb_queue_head->qlen member and instead
uses the queue list emptyness as the test.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2005-07-08 14:57:23 -07:00
parent a92b7b8057
commit b03efcfb21
34 changed files with 84 additions and 89 deletions

View file

@ -120,7 +120,7 @@ static unsigned int hci_vhci_chr_poll(struct file *file, poll_table * wait)
poll_wait(file, &hci_vhci->read_wait, wait); poll_wait(file, &hci_vhci->read_wait, wait);
if (skb_queue_len(&hci_vhci->readq)) if (!skb_queue_empty(&hci_vhci->readq))
return POLLIN | POLLRDNORM; return POLLIN | POLLRDNORM;
return POLLOUT | POLLWRNORM; return POLLOUT | POLLWRNORM;

View file

@ -279,7 +279,8 @@ BChannel_proc_xmt(struct BCState *bcs)
if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags))
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
if (!test_bit(BC_FLG_ACTIV, &bcs->Flag)) { if (!test_bit(BC_FLG_ACTIV, &bcs->Flag)) {
if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && (!skb_queue_len(&bcs->squeue))) { if (!test_bit(BC_FLG_BUSY, &bcs->Flag) &&
skb_queue_empty(&bcs->squeue)) {
st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL); st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL);
} }
} }

View file

@ -108,7 +108,8 @@ static int l2addrsize(struct Layer2 *l2);
static void static void
set_peer_busy(struct Layer2 *l2) { set_peer_busy(struct Layer2 *l2) {
test_and_set_bit(FLG_PEER_BUSY, &l2->flag); test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue)) if (!skb_queue_empty(&l2->i_queue) ||
!skb_queue_empty(&l2->ui_queue))
test_and_set_bit(FLG_L2BLOCK, &l2->flag); test_and_set_bit(FLG_L2BLOCK, &l2->flag);
} }
@ -754,7 +755,7 @@ l2_restart_multi(struct FsmInst *fi, int event, void *arg)
st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL); st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
if ((ST_L2_7==state) || (ST_L2_8 == state)) if ((ST_L2_7==state) || (ST_L2_8 == state))
if (skb_queue_len(&st->l2.i_queue) && cansend(st)) if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
st->l2.l2l1(st, PH_PULL | REQUEST, NULL); st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
} }
@ -810,7 +811,7 @@ l2_connected(struct FsmInst *fi, int event, void *arg)
if (pr != -1) if (pr != -1)
st->l2.l2l3(st, pr, NULL); st->l2.l2l3(st, pr, NULL);
if (skb_queue_len(&st->l2.i_queue) && cansend(st)) if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
st->l2.l2l1(st, PH_PULL | REQUEST, NULL); st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
} }
@ -1014,7 +1015,7 @@ l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
if(typ != RR) FsmDelTimer(&st->l2.t203, 9); if(typ != RR) FsmDelTimer(&st->l2.t203, 9);
restart_t200(st, 12); restart_t200(st, 12);
} }
if (skb_queue_len(&st->l2.i_queue) && (typ == RR)) if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR))
st->l2.l2l1(st, PH_PULL | REQUEST, NULL); st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
} else } else
nrerrorrecovery(fi); nrerrorrecovery(fi);
@ -1120,7 +1121,7 @@ l2_got_iframe(struct FsmInst *fi, int event, void *arg)
return; return;
} }
if (skb_queue_len(&st->l2.i_queue) && (fi->state == ST_L2_7)) if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7))
st->l2.l2l1(st, PH_PULL | REQUEST, NULL); st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag)) if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag))
enquiry_cr(st, RR, RSP, 0); enquiry_cr(st, RR, RSP, 0);
@ -1138,7 +1139,7 @@ l2_got_tei(struct FsmInst *fi, int event, void *arg)
test_and_set_bit(FLG_L3_INIT, &st->l2.flag); test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
} else } else
FsmChangeState(fi, ST_L2_4); FsmChangeState(fi, ST_L2_4);
if (skb_queue_len(&st->l2.ui_queue)) if (!skb_queue_empty(&st->l2.ui_queue))
tx_ui(st); tx_ui(st);
} }
@ -1301,7 +1302,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
FsmDelTimer(&st->l2.t203, 13); FsmDelTimer(&st->l2.t203, 13);
FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11); FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11);
} }
if (skb_queue_len(&l2->i_queue) && cansend(st)) if (!skb_queue_empty(&l2->i_queue) && cansend(st))
st->l2.l2l1(st, PH_PULL | REQUEST, NULL); st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
} }
@ -1347,7 +1348,7 @@ l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
} }
invoke_retransmission(st, nr); invoke_retransmission(st, nr);
FsmChangeState(fi, ST_L2_7); FsmChangeState(fi, ST_L2_7);
if (skb_queue_len(&l2->i_queue) && cansend(st)) if (!skb_queue_empty(&l2->i_queue) && cansend(st))
st->l2.l2l1(st, PH_PULL | REQUEST, NULL); st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
} else } else
nrerrorrecovery(fi); nrerrorrecovery(fi);

View file

@ -302,7 +302,7 @@ release_l3_process(struct l3_process *p)
!test_bit(FLG_PTP, &p->st->l2.flag)) { !test_bit(FLG_PTP, &p->st->l2.flag)) {
if (p->debug) if (p->debug)
l3_debug(p->st, "release_l3_process: last process"); l3_debug(p->st, "release_l3_process: last process");
if (!skb_queue_len(&p->st->l3.squeue)) { if (skb_queue_empty(&p->st->l3.squeue)) {
if (p->debug) if (p->debug)
l3_debug(p->st, "release_l3_process: release link"); l3_debug(p->st, "release_l3_process: release link");
if (p->st->protocol != ISDN_PTYPE_NI1) if (p->st->protocol != ISDN_PTYPE_NI1)

View file

@ -1223,7 +1223,7 @@ isdn_tty_write(struct tty_struct *tty, const u_char * buf, int count)
total += c; total += c;
} }
atomic_dec(&info->xmit_lock); atomic_dec(&info->xmit_lock);
if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) { if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) {
if (m->mdmreg[REG_DXMT] & BIT_DXMT) { if (m->mdmreg[REG_DXMT] & BIT_DXMT) {
isdn_tty_senddown(info); isdn_tty_senddown(info);
isdn_tty_tint(info); isdn_tty_tint(info);
@ -1284,7 +1284,7 @@ isdn_tty_flush_chars(struct tty_struct *tty)
if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_flush_chars")) if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_flush_chars"))
return; return;
if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue))
isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, 1); isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, 1);
} }

View file

@ -304,12 +304,12 @@ icn_pollbchan_send(int channel, icn_card * card)
isdn_ctrl cmd; isdn_ctrl cmd;
if (!(card->sndcount[channel] || card->xskb[channel] || if (!(card->sndcount[channel] || card->xskb[channel] ||
skb_queue_len(&card->spqueue[channel]))) !skb_queue_empty(&card->spqueue[channel])))
return; return;
if (icn_trymaplock_channel(card, mch)) { if (icn_trymaplock_channel(card, mch)) {
while (sbfree && while (sbfree &&
(card->sndcount[channel] || (card->sndcount[channel] ||
skb_queue_len(&card->spqueue[channel]) || !skb_queue_empty(&card->spqueue[channel]) ||
card->xskb[channel])) { card->xskb[channel])) {
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
if (card->xmit_lock[channel]) { if (card->xmit_lock[channel]) {

View file

@ -304,7 +304,7 @@ static inline void scc_discard_buffers(struct scc_channel *scc)
scc->tx_buff = NULL; scc->tx_buff = NULL;
} }
while (skb_queue_len(&scc->tx_queue)) while (!skb_queue_empty(&scc->tx_queue))
dev_kfree_skb(skb_dequeue(&scc->tx_queue)); dev_kfree_skb(skb_dequeue(&scc->tx_queue));
spin_unlock_irqrestore(&scc->lock, flags); spin_unlock_irqrestore(&scc->lock, flags);
@ -1126,8 +1126,7 @@ static void t_dwait(unsigned long channel)
if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */ if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */
{ {
if (skb_queue_len(&scc->tx_queue) == 0) /* nothing to send */ if (skb_queue_empty(&scc->tx_queue)) { /* nothing to send */
{
scc->stat.tx_state = TXS_IDLE; scc->stat.tx_state = TXS_IDLE;
netif_wake_queue(scc->dev); /* t_maxkeyup locked it. */ netif_wake_queue(scc->dev); /* t_maxkeyup locked it. */
return; return;

View file

@ -364,7 +364,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
spin_lock_irqsave(&ap->recv_lock, flags); spin_lock_irqsave(&ap->recv_lock, flags);
ppp_async_input(ap, buf, cflags, count); ppp_async_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags); spin_unlock_irqrestore(&ap->recv_lock, flags);
if (skb_queue_len(&ap->rqueue)) if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk); tasklet_schedule(&ap->tsk);
ap_put(ap); ap_put(ap);
if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)

View file

@ -1237,8 +1237,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
pch = list_entry(list, struct channel, clist); pch = list_entry(list, struct channel, clist);
navail += pch->avail = (pch->chan != NULL); navail += pch->avail = (pch->chan != NULL);
if (pch->avail) { if (pch->avail) {
if (skb_queue_len(&pch->file.xq) == 0 if (skb_queue_empty(&pch->file.xq) ||
|| !pch->had_frag) { !pch->had_frag) {
pch->avail = 2; pch->avail = 2;
++nfree; ++nfree;
} }
@ -1374,8 +1374,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* try to send it down the channel */ /* try to send it down the channel */
chan = pch->chan; chan = pch->chan;
if (skb_queue_len(&pch->file.xq) if (!skb_queue_empty(&pch->file.xq) ||
|| !chan->ops->start_xmit(chan, frag)) !chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag); skb_queue_tail(&pch->file.xq, frag);
pch->had_frag = 1; pch->had_frag = 1;
p += flen; p += flen;
@ -1412,7 +1412,7 @@ ppp_channel_push(struct channel *pch)
spin_lock_bh(&pch->downl); spin_lock_bh(&pch->downl);
if (pch->chan != 0) { if (pch->chan != 0) {
while (skb_queue_len(&pch->file.xq) > 0) { while (!skb_queue_empty(&pch->file.xq)) {
skb = skb_dequeue(&pch->file.xq); skb = skb_dequeue(&pch->file.xq);
if (!pch->chan->ops->start_xmit(pch->chan, skb)) { if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
/* put the packet back and try again later */ /* put the packet back and try again later */
@ -1426,7 +1426,7 @@ ppp_channel_push(struct channel *pch)
} }
spin_unlock_bh(&pch->downl); spin_unlock_bh(&pch->downl);
/* see if there is anything from the attached unit to be sent */ /* see if there is anything from the attached unit to be sent */
if (skb_queue_len(&pch->file.xq) == 0) { if (skb_queue_empty(&pch->file.xq)) {
read_lock_bh(&pch->upl); read_lock_bh(&pch->upl);
ppp = pch->ppp; ppp = pch->ppp;
if (ppp != 0) if (ppp != 0)

View file

@ -406,7 +406,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
spin_lock_irqsave(&ap->recv_lock, flags); spin_lock_irqsave(&ap->recv_lock, flags);
ppp_sync_input(ap, buf, cflags, count); ppp_sync_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags); spin_unlock_irqrestore(&ap->recv_lock, flags);
if (skb_queue_len(&ap->rqueue)) if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk); tasklet_schedule(&ap->tsk);
sp_put(ap); sp_put(ap);
if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)

View file

@ -215,7 +215,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
poll_wait(file, &tun->read_wait, wait); poll_wait(file, &tun->read_wait, wait);
if (skb_queue_len(&tun->readq)) if (!skb_queue_empty(&tun->readq))
mask |= POLLIN | POLLRDNORM; mask |= POLLIN | POLLRDNORM;
return mask; return mask;

View file

@ -2374,7 +2374,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
/* /*
* Clean out tx queue * Clean out tx queue
*/ */
if (test_bit(FLAG_MPI, &ai->flags) && skb_queue_len (&ai->txq) > 0) { if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
for (;(skb = skb_dequeue(&ai->txq));) for (;(skb = skb_dequeue(&ai->txq));)
dev_kfree_skb(skb); dev_kfree_skb(skb);
@ -3287,7 +3287,7 @@ exitrx:
if (status & EV_TXEXC) if (status & EV_TXEXC)
get_tx_error(apriv, -1); get_tx_error(apriv, -1);
spin_lock_irqsave(&apriv->aux_lock, flags); spin_lock_irqsave(&apriv->aux_lock, flags);
if (skb_queue_len (&apriv->txq)) { if (!skb_queue_empty(&apriv->txq)) {
spin_unlock_irqrestore(&apriv->aux_lock,flags); spin_unlock_irqrestore(&apriv->aux_lock,flags);
mpi_send_packet (dev); mpi_send_packet (dev);
} else { } else {

View file

@ -428,7 +428,7 @@ claw_pack_skb(struct claw_privbk *privptr)
new_skb = NULL; /* assume no dice */ new_skb = NULL; /* assume no dice */
pkt_cnt = 0; pkt_cnt = 0;
CLAW_DBF_TEXT(4,trace,"PackSKBe"); CLAW_DBF_TEXT(4,trace,"PackSKBe");
if (skb_queue_len(&p_ch->collect_queue) > 0) { if (!skb_queue_empty(&p_ch->collect_queue)) {
/* some data */ /* some data */
held_skb = skb_dequeue(&p_ch->collect_queue); held_skb = skb_dequeue(&p_ch->collect_queue);
if (p_env->packing != DO_PACKED) if (p_env->packing != DO_PACKED)
@ -1254,7 +1254,7 @@ claw_write_next ( struct chbk * p_ch )
privptr = (struct claw_privbk *) dev->priv; privptr = (struct claw_privbk *) dev->priv;
claw_free_wrt_buf( dev ); claw_free_wrt_buf( dev );
if ((privptr->write_free_count > 0) && if ((privptr->write_free_count > 0) &&
(skb_queue_len(&p_ch->collect_queue) > 0)) { !skb_queue_empty(&p_ch->collect_queue)) {
pk_skb = claw_pack_skb(privptr); pk_skb = claw_pack_skb(privptr);
while (pk_skb != NULL) { while (pk_skb != NULL) {
rc = claw_hw_tx( pk_skb, dev,1); rc = claw_hw_tx( pk_skb, dev,1);

View file

@ -156,7 +156,7 @@ ctc_tty_readmodem(ctc_tty_info *info)
skb_queue_head(&info->rx_queue, skb); skb_queue_head(&info->rx_queue, skb);
else { else {
kfree_skb(skb); kfree_skb(skb);
ret = skb_queue_len(&info->rx_queue); ret = !skb_queue_empty(&info->rx_queue);
} }
} }
} }
@ -530,7 +530,7 @@ ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count)
total += c; total += c;
count -= c; count -= c;
} }
if (skb_queue_len(&info->tx_queue)) { if (!skb_queue_empty(&info->tx_queue)) {
info->lsr &= ~UART_LSR_TEMT; info->lsr &= ~UART_LSR_TEMT;
tasklet_schedule(&info->tasklet); tasklet_schedule(&info->tasklet);
} }
@ -594,7 +594,7 @@ ctc_tty_flush_chars(struct tty_struct *tty)
return; return;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars")) if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
return; return;
if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue))) if (tty->stopped || tty->hw_stopped || skb_queue_empty(&info->tx_queue))
return; return;
tasklet_schedule(&info->tasklet); tasklet_schedule(&info->tasklet);
} }

View file

@ -3227,9 +3227,9 @@ static int usbnet_stop (struct net_device *net)
temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq); temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);
// maybe wait for deletions to finish. // maybe wait for deletions to finish.
while (skb_queue_len (&dev->rxq) while (!skb_queue_empty(&dev->rxq) &&
&& skb_queue_len (&dev->txq) !skb_queue_empty(&dev->txq) &&
&& skb_queue_len (&dev->done)) { !skb_queue_empty(&dev->done)) {
msleep(UNLINK_TIMEOUT_MS); msleep(UNLINK_TIMEOUT_MS);
if (netif_msg_ifdown (dev)) if (netif_msg_ifdown (dev))
devdbg (dev, "waited for %d urb completions", temp); devdbg (dev, "waited for %d urb completions", temp);

View file

@ -224,7 +224,7 @@ int irda_device_is_receiving(struct net_device *dev);
/* Interface for internal use */ /* Interface for internal use */
static inline int irda_device_txqueue_empty(const struct net_device *dev) static inline int irda_device_txqueue_empty(const struct net_device *dev)
{ {
return (skb_queue_len(&dev->qdisc->q) == 0); return skb_queue_empty(&dev->qdisc->q);
} }
int irda_device_set_raw_mode(struct net_device* self, int status); int irda_device_set_raw_mode(struct net_device* self, int status);
struct net_device *alloc_irdadev(int sizeof_priv); struct net_device *alloc_irdadev(int sizeof_priv);

View file

@ -991,7 +991,7 @@ static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
{ {
if (skb_queue_len(&tp->out_of_order_queue) == 0 && if (skb_queue_empty(&tp->out_of_order_queue) &&
tp->rcv_wnd && tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data) !tp->urg_data)

View file

@ -213,7 +213,7 @@ static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, in
return kernel_sendmsg(sock, &msg, &iv, 1, len); return kernel_sendmsg(sock, &msg, &iv, 1, len);
} }
static int cmtp_process_transmit(struct cmtp_session *session) static void cmtp_process_transmit(struct cmtp_session *session)
{ {
struct sk_buff *skb, *nskb; struct sk_buff *skb, *nskb;
unsigned char *hdr; unsigned char *hdr;
@ -223,7 +223,7 @@ static int cmtp_process_transmit(struct cmtp_session *session)
if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
BT_ERR("Can't allocate memory for new frame"); BT_ERR("Can't allocate memory for new frame");
return -ENOMEM; return;
} }
while ((skb = skb_dequeue(&session->transmit))) { while ((skb = skb_dequeue(&session->transmit))) {
@ -275,8 +275,6 @@ static int cmtp_process_transmit(struct cmtp_session *session)
cmtp_send_frame(session, nskb->data, nskb->len); cmtp_send_frame(session, nskb->data, nskb->len);
kfree_skb(nskb); kfree_skb(nskb);
return skb_queue_len(&session->transmit);
} }
static int cmtp_session(void *arg) static int cmtp_session(void *arg)

View file

@ -428,7 +428,7 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
return kernel_sendmsg(sock, &msg, &iv, 1, len); return kernel_sendmsg(sock, &msg, &iv, 1, len);
} }
static int hidp_process_transmit(struct hidp_session *session) static void hidp_process_transmit(struct hidp_session *session)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@ -453,9 +453,6 @@ static int hidp_process_transmit(struct hidp_session *session)
hidp_set_timer(session); hidp_set_timer(session);
kfree_skb(skb); kfree_skb(skb);
} }
return skb_queue_len(&session->ctrl_transmit) +
skb_queue_len(&session->intr_transmit);
} }
static int hidp_session(void *arg) static int hidp_session(void *arg)

View file

@ -590,8 +590,11 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
for (;;) { for (;;) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || if (!skb_queue_empty(&sk->sk_receive_queue) ||
signal_pending(current) || !timeo) sk->sk_err ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
!timeo)
break; break;
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);

View file

@ -781,7 +781,7 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
BT_DBG("tty %p dev %p", tty, dev); BT_DBG("tty %p dev %p", tty, dev);
if (skb_queue_len(&dlc->tx_queue)) if (!skb_queue_empty(&dlc->tx_queue))
return dlc->mtu; return dlc->mtu;
return 0; return 0;

View file

@ -536,7 +536,7 @@ static void dn_keepalive(struct sock *sk)
* we are double checking that we are not sending too * we are double checking that we are not sending too
* many of these keepalive frames. * many of these keepalive frames.
*/ */
if (skb_queue_len(&scp->other_xmit_queue) == 0) if (skb_queue_empty(&scp->other_xmit_queue))
dn_nsp_send_link(sk, DN_NOCHANGE, 0); dn_nsp_send_link(sk, DN_NOCHANGE, 0);
} }
@ -1191,7 +1191,7 @@ static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table
struct dn_scp *scp = DN_SK(sk); struct dn_scp *scp = DN_SK(sk);
int mask = datagram_poll(file, sock, wait); int mask = datagram_poll(file, sock, wait);
if (skb_queue_len(&scp->other_receive_queue)) if (!skb_queue_empty(&scp->other_receive_queue))
mask |= POLLRDBAND; mask |= POLLRDBAND;
return mask; return mask;
@ -1214,7 +1214,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCATMARK: case SIOCATMARK:
lock_sock(sk); lock_sock(sk);
val = (skb_queue_len(&scp->other_receive_queue) != 0); val = !skb_queue_empty(&scp->other_receive_queue);
if (scp->state != DN_RUN) if (scp->state != DN_RUN)
val = -ENOTCONN; val = -ENOTCONN;
release_sock(sk); release_sock(sk);
@ -1630,7 +1630,7 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
int len = 0; int len = 0;
if (flags & MSG_OOB) if (flags & MSG_OOB)
return skb_queue_len(q) ? 1 : 0; return !skb_queue_empty(q) ? 1 : 0;
while(skb != (struct sk_buff *)q) { while(skb != (struct sk_buff *)q) {
struct dn_skb_cb *cb = DN_SKB_CB(skb); struct dn_skb_cb *cb = DN_SKB_CB(skb);
@ -1707,7 +1707,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
if (sk->sk_err) if (sk->sk_err)
goto out; goto out;
if (skb_queue_len(&scp->other_receive_queue)) { if (!skb_queue_empty(&scp->other_receive_queue)) {
if (!(flags & MSG_OOB)) { if (!(flags & MSG_OOB)) {
msg->msg_flags |= MSG_OOB; msg->msg_flags |= MSG_OOB;
if (!scp->other_report) { if (!scp->other_report) {

View file

@ -342,7 +342,8 @@ int dn_nsp_xmit_timeout(struct sock *sk)
dn_nsp_output(sk); dn_nsp_output(sk);
if (skb_queue_len(&scp->data_xmit_queue) || skb_queue_len(&scp->other_xmit_queue)) if (!skb_queue_empty(&scp->data_xmit_queue) ||
!skb_queue_empty(&scp->other_xmit_queue))
scp->persist = dn_nsp_persist(sk); scp->persist = dn_nsp_persist(sk);
return 0; return 0;

View file

@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk)
struct sk_buff *skb; struct sk_buff *skb;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
/* RX process wants to run with disabled BHs, though it is not /* RX process wants to run with disabled BHs, though it is not
* necessary */ * necessary */
@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* is not empty. It is more elegant, but eats cycles, * is not empty. It is more elegant, but eats cycles,
* unfortunately. * unfortunately.
*/ */
if (skb_queue_len(&tp->ucopy.prequeue)) if (!skb_queue_empty(&tp->ucopy.prequeue))
goto do_prequeue; goto do_prequeue;
/* __ Set realtime policy in scheduler __ */ /* __ Set realtime policy in scheduler __ */
@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
} }
if (tp->rcv_nxt == tp->copied_seq && if (tp->rcv_nxt == tp->copied_seq &&
skb_queue_len(&tp->ucopy.prequeue)) { !skb_queue_empty(&tp->ucopy.prequeue)) {
do_prequeue: do_prequeue:
tcp_prequeue_process(sk); tcp_prequeue_process(sk);
@ -1476,7 +1476,7 @@ skip_copy:
} while (len > 0); } while (len > 0);
if (user_recv) { if (user_recv) {
if (skb_queue_len(&tp->ucopy.prequeue)) { if (!skb_queue_empty(&tp->ucopy.prequeue)) {
int chunk; int chunk;
tp->ucopy.len = copied > 0 ? len : 0; tp->ucopy.len = copied > 0 ? len : 0;

View file

@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
int this_sack; int this_sack;
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
if (skb_queue_len(&tp->out_of_order_queue) == 0) { if (skb_queue_empty(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0; tp->rx_opt.num_sacks = 0;
tp->rx_opt.eff_sacks = tp->rx_opt.dsack; tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
return; return;
@ -2935,13 +2935,13 @@ queue_and_out:
if(th->fin) if(th->fin)
tcp_fin(skb, sk, th); tcp_fin(skb, sk, th);
if (skb_queue_len(&tp->out_of_order_queue)) { if (!skb_queue_empty(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk); tcp_ofo_queue(sk);
/* RFC2581. 4.2. SHOULD send immediate ACK, when /* RFC2581. 4.2. SHOULD send immediate ACK, when
* gap in queue is filled. * gap in queue is filled.
*/ */
if (!skb_queue_len(&tp->out_of_order_queue)) if (skb_queue_empty(&tp->out_of_order_queue))
tp->ack.pingpong = 0; tp->ack.pingpong = 0;
} }
@ -3249,9 +3249,8 @@ static int tcp_prune_queue(struct sock *sk)
* This must not ever occur. */ * This must not ever occur. */
/* First, purge the out_of_order queue. */ /* First, purge the out_of_order queue. */
if (skb_queue_len(&tp->out_of_order_queue)) { if (!skb_queue_empty(&tp->out_of_order_queue)) {
NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
skb_queue_len(&tp->out_of_order_queue));
__skb_queue_purge(&tp->out_of_order_queue); __skb_queue_purge(&tp->out_of_order_queue);
/* Reset SACK state. A conforming SACK implementation will /* Reset SACK state. A conforming SACK implementation will

View file

@ -231,11 +231,10 @@ static void tcp_delack_timer(unsigned long data)
} }
tp->ack.pending &= ~TCP_ACK_TIMER; tp->ack.pending &= ~TCP_ACK_TIMER;
if (skb_queue_len(&tp->ucopy.prequeue)) { if (!skb_queue_empty(&tp->ucopy.prequeue)) {
struct sk_buff *skb; struct sk_buff *skb;
NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED);
skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb); sk->sk_backlog_rcv(sk, skb);

View file

@ -445,9 +445,8 @@ void irlap_disconnect_request(struct irlap_cb *self)
IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
/* Don't disconnect until all data frames are successfully sent */ /* Don't disconnect until all data frames are successfully sent */
if (skb_queue_len(&self->txq) > 0) { if (!skb_queue_empty(&self->txq)) {
self->disconnect_pending = TRUE; self->disconnect_pending = TRUE;
return; return;
} }

View file

@ -191,7 +191,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
* Send out the RR frames faster if our own transmit queue is empty, or * Send out the RR frames faster if our own transmit queue is empty, or
* if the peer is busy. The effect is a much faster conversation * if the peer is busy. The effect is a much faster conversation
*/ */
if ((skb_queue_len(&self->txq) == 0) || (self->remote_busy)) { if (skb_queue_empty(&self->txq) || self->remote_busy) {
if (self->fast_RR == TRUE) { if (self->fast_RR == TRUE) {
/* /*
* Assert that the fast poll timer has not reached the * Assert that the fast poll timer has not reached the
@ -263,7 +263,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__,
skb_queue_len(&self->txq)); skb_queue_len(&self->txq));
if (skb_queue_len(&self->txq)) { if (!skb_queue_empty(&self->txq)) {
/* Prevent race conditions with irlap_data_request() */ /* Prevent race conditions with irlap_data_request() */
self->local_busy = TRUE; self->local_busy = TRUE;
@ -1074,7 +1074,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
#else /* CONFIG_IRDA_DYNAMIC_WINDOW */ #else /* CONFIG_IRDA_DYNAMIC_WINDOW */
/* Window has been adjusted for the max packet /* Window has been adjusted for the max packet
* size, so much simpler... - Jean II */ * size, so much simpler... - Jean II */
nextfit = (skb_queue_len(&self->txq) > 0); nextfit = !skb_queue_empty(&self->txq);
#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
/* /*
* Send data with poll bit cleared only if window > 1 * Send data with poll bit cleared only if window > 1
@ -1814,7 +1814,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
#else /* CONFIG_IRDA_DYNAMIC_WINDOW */ #else /* CONFIG_IRDA_DYNAMIC_WINDOW */
/* Window has been adjusted for the max packet /* Window has been adjusted for the max packet
* size, so much simpler... - Jean II */ * size, so much simpler... - Jean II */
nextfit = (skb_queue_len(&self->txq) > 0); nextfit = !skb_queue_empty(&self->txq);
#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
/* /*
* Send data with final bit cleared only if window > 1 * Send data with final bit cleared only if window > 1
@ -1937,7 +1937,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
irlap_data_indication(self, skb, FALSE); irlap_data_indication(self, skb, FALSE);
/* Any pending data requests? */ /* Any pending data requests? */
if ((skb_queue_len(&self->txq) > 0) && if (!skb_queue_empty(&self->txq) &&
(self->window > 0)) (self->window > 0))
{ {
self->ack_required = TRUE; self->ack_required = TRUE;
@ -2038,7 +2038,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
/* /*
* Any pending data requests? * Any pending data requests?
*/ */
if ((skb_queue_len(&self->txq) > 0) && if (!skb_queue_empty(&self->txq) &&
(self->window > 0) && !self->remote_busy) (self->window > 0) && !self->remote_busy)
{ {
irlap_data_indication(self, skb, TRUE); irlap_data_indication(self, skb, TRUE);
@ -2069,7 +2069,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
*/ */
nr_status = irlap_validate_nr_received(self, info->nr); nr_status = irlap_validate_nr_received(self, info->nr);
if (nr_status == NR_EXPECTED) { if (nr_status == NR_EXPECTED) {
if ((skb_queue_len( &self->txq) > 0) && if (!skb_queue_empty(&self->txq) &&
(self->window > 0)) { (self->window > 0)) {
self->remote_busy = FALSE; self->remote_busy = FALSE;

View file

@ -1018,11 +1018,10 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
/* /*
* We can now fill the window with additional data frames * We can now fill the window with additional data frames
*/ */
while (skb_queue_len( &self->txq) > 0) { while (!skb_queue_empty(&self->txq)) {
IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__); IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__);
if ((skb_queue_len( &self->txq) > 0) && if (self->window > 0) {
(self->window > 0)) {
skb = skb_dequeue( &self->txq); skb = skb_dequeue( &self->txq);
IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(skb != NULL, return;);
@ -1031,8 +1030,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
* bit cleared * bit cleared
*/ */
if ((self->window > 1) && if ((self->window > 1) &&
skb_queue_len(&self->txq) > 0) !skb_queue_empty(&self->txq)) {
{
irlap_send_data_primary(self, skb); irlap_send_data_primary(self, skb);
} else { } else {
irlap_send_data_primary_poll(self, skb); irlap_send_data_primary_poll(self, skb);

View file

@ -1513,7 +1513,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
/* /*
* Check if there is still data segments in the transmit queue * Check if there is still data segments in the transmit queue
*/ */
if (skb_queue_len(&self->tx_queue) > 0) { if (!skb_queue_empty(&self->tx_queue)) {
if (priority == P_HIGH) { if (priority == P_HIGH) {
/* /*
* No need to send the queued data, if we are * No need to send the queued data, if we are

View file

@ -84,7 +84,7 @@ static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr)
if (llc->dev->flags & IFF_LOOPBACK) if (llc->dev->flags & IFF_LOOPBACK)
goto out; goto out;
rc = 1; rc = 1;
if (!skb_queue_len(&llc->pdu_unack_q)) if (skb_queue_empty(&llc->pdu_unack_q))
goto out; goto out;
skb = skb_peek(&llc->pdu_unack_q); skb = skb_peek(&llc->pdu_unack_q);
pdu = llc_pdu_sn_hdr(skb); pdu = llc_pdu_sn_hdr(skb);

View file

@ -858,7 +858,7 @@ static inline void netlink_rcv_wake(struct sock *sk)
{ {
struct netlink_sock *nlk = nlk_sk(sk); struct netlink_sock *nlk = nlk_sk(sk);
if (!skb_queue_len(&sk->sk_receive_queue)) if (skb_queue_empty(&sk->sk_receive_queue))
clear_bit(0, &nlk->state); clear_bit(0, &nlk->state);
if (!test_bit(0, &nlk->state)) if (!test_bit(0, &nlk->state))
wake_up_interruptible(&nlk->wait); wake_up_interruptible(&nlk->wait);

View file

@ -385,7 +385,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt)
memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256); memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256);
q->qcount = -1; q->qcount = -1;
if (skb_queue_len(&sch->q) == 0) if (skb_queue_empty(&sch->q))
PSCHED_SET_PASTPERFECT(q->qidlestart); PSCHED_SET_PASTPERFECT(q->qidlestart);
sch_tree_unlock(sch); sch_tree_unlock(sch);
return 0; return 0;

View file

@ -302,7 +302,7 @@ static void unix_write_space(struct sock *sk)
* may receive messages only from that peer. */ * may receive messages only from that peer. */
static void unix_dgram_disconnected(struct sock *sk, struct sock *other) static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{ {
if (skb_queue_len(&sk->sk_receive_queue)) { if (!skb_queue_empty(&sk->sk_receive_queue)) {
skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_receive_queue);
wake_up_interruptible_all(&unix_sk(sk)->peer_wait); wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
@ -1619,7 +1619,7 @@ static long unix_stream_data_wait(struct sock * sk, long timeo)
for (;;) { for (;;) {
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
if (skb_queue_len(&sk->sk_receive_queue) || if (!skb_queue_empty(&sk->sk_receive_queue) ||
sk->sk_err || sk->sk_err ||
(sk->sk_shutdown & RCV_SHUTDOWN) || (sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) || signal_pending(current) ||