mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-06 06:37:59 +00:00
[SCTP]: Flush fragment queue when exiting partial delivery.
At the end of partial delivery, we may have complete messages sitting on the fragment queue. These messages are stuck there until a new fragment arrives. This can comletely stall a given association. When clearing partial delivery state, flush any complete messages from the fragment queue and send them on their way up. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
215f7b08f2
commit
ef5d4cf2f9
1 changed files with 33 additions and 0 deletions
|
@ -53,6 +53,7 @@ static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
|
||||||
struct sctp_ulpevent *);
|
struct sctp_ulpevent *);
|
||||||
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
|
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
|
||||||
struct sctp_ulpevent *);
|
struct sctp_ulpevent *);
|
||||||
|
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
|
||||||
|
|
||||||
/* 1st Level Abstractions */
|
/* 1st Level Abstractions */
|
||||||
|
|
||||||
|
@ -190,6 +191,7 @@ static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
|
||||||
static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
|
static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
|
||||||
{
|
{
|
||||||
ulpq->pd_mode = 0;
|
ulpq->pd_mode = 0;
|
||||||
|
sctp_ulpq_reasm_drain(ulpq);
|
||||||
return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
|
return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -699,6 +701,37 @@ void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Drain the reassembly queue. If we just cleared parted delivery, it
|
||||||
|
* is possible that the reassembly queue will contain already reassembled
|
||||||
|
* messages. Retrieve any such messages and give them to the user.
|
||||||
|
*/
|
||||||
|
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
|
||||||
|
{
|
||||||
|
struct sctp_ulpevent *event = NULL;
|
||||||
|
struct sk_buff_head temp;
|
||||||
|
|
||||||
|
if (skb_queue_empty(&ulpq->reasm))
|
||||||
|
return;
|
||||||
|
|
||||||
|
while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
|
||||||
|
/* Do ordering if needed. */
|
||||||
|
if ((event) && (event->msg_flags & MSG_EOR)){
|
||||||
|
skb_queue_head_init(&temp);
|
||||||
|
__skb_queue_tail(&temp, sctp_event2skb(event));
|
||||||
|
|
||||||
|
event = sctp_ulpq_order(ulpq, event);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Send event to the ULP. 'event' is the
|
||||||
|
* sctp_ulpevent for very first SKB on the temp' list.
|
||||||
|
*/
|
||||||
|
if (event)
|
||||||
|
sctp_ulpq_tail_event(ulpq, event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Helper function to gather skbs that have possibly become
|
/* Helper function to gather skbs that have possibly become
|
||||||
* ordered by an an incoming chunk.
|
* ordered by an an incoming chunk.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Add table
Reference in a new issue