mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 07:01:23 +00:00
Automatic merge of /spare/repo/netdev-2.6 branch ppp
This commit is contained in:
commit
21035ffeb7
1 changed files with 109 additions and 66 deletions
|
@ -1217,36 +1217,43 @@ ppp_push(struct ppp *ppp)
|
||||||
*/
|
*/
|
||||||
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int nch, len, fragsize;
|
int len, fragsize;
|
||||||
int i, bits, hdrlen, mtu;
|
int i, bits, hdrlen, mtu;
|
||||||
int flen, fnb;
|
int flen;
|
||||||
|
int navail, nfree;
|
||||||
|
int nbigger;
|
||||||
unsigned char *p, *q;
|
unsigned char *p, *q;
|
||||||
struct list_head *list;
|
struct list_head *list;
|
||||||
struct channel *pch;
|
struct channel *pch;
|
||||||
struct sk_buff *frag;
|
struct sk_buff *frag;
|
||||||
struct ppp_channel *chan;
|
struct ppp_channel *chan;
|
||||||
|
|
||||||
nch = 0;
|
nfree = 0; /* # channels which have no packet already queued */
|
||||||
|
navail = 0; /* total # of usable channels (not deregistered) */
|
||||||
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
|
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
|
||||||
|
i = 0;
|
||||||
list = &ppp->channels;
|
list = &ppp->channels;
|
||||||
while ((list = list->next) != &ppp->channels) {
|
while ((list = list->next) != &ppp->channels) {
|
||||||
pch = list_entry(list, struct channel, clist);
|
pch = list_entry(list, struct channel, clist);
|
||||||
nch += pch->avail = (skb_queue_len(&pch->file.xq) == 0);
|
navail += pch->avail = (pch->chan != NULL);
|
||||||
/*
|
if (pch->avail) {
|
||||||
* If a channel hasn't had a fragment yet, it has to get
|
if (skb_queue_len(&pch->file.xq) == 0
|
||||||
* one before we send any fragments on later channels.
|
|| !pch->had_frag) {
|
||||||
* If it can't take a fragment now, don't give any
|
pch->avail = 2;
|
||||||
* to subsequent channels.
|
++nfree;
|
||||||
*/
|
|
||||||
if (!pch->had_frag && !pch->avail) {
|
|
||||||
while ((list = list->next) != &ppp->channels) {
|
|
||||||
pch = list_entry(list, struct channel, clist);
|
|
||||||
pch->avail = 0;
|
|
||||||
}
|
}
|
||||||
break;
|
if (!pch->had_frag && i < ppp->nxchan)
|
||||||
|
ppp->nxchan = i;
|
||||||
}
|
}
|
||||||
|
++i;
|
||||||
}
|
}
|
||||||
if (nch == 0)
|
|
||||||
|
/*
|
||||||
|
* Don't start sending this packet unless at least half of
|
||||||
|
* the channels are free. This gives much better TCP
|
||||||
|
* performance if we have a lot of channels.
|
||||||
|
*/
|
||||||
|
if (nfree == 0 || nfree < navail / 2)
|
||||||
return 0; /* can't take now, leave it in xmit_pending */
|
return 0; /* can't take now, leave it in xmit_pending */
|
||||||
|
|
||||||
/* Do protocol field compression (XXX this should be optional) */
|
/* Do protocol field compression (XXX this should be optional) */
|
||||||
|
@ -1257,14 +1264,19 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
||||||
--len;
|
--len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* decide on fragment size */
|
/*
|
||||||
|
* Decide on fragment size.
|
||||||
|
* We create a fragment for each free channel regardless of
|
||||||
|
* how small they are (i.e. even 0 length) in order to minimize
|
||||||
|
* the time that it will take to detect when a channel drops
|
||||||
|
* a fragment.
|
||||||
|
*/
|
||||||
fragsize = len;
|
fragsize = len;
|
||||||
if (nch > 1) {
|
if (nfree > 1)
|
||||||
int maxch = ROUNDUP(len, MIN_FRAG_SIZE);
|
fragsize = ROUNDUP(fragsize, nfree);
|
||||||
if (nch > maxch)
|
/* nbigger channels get fragsize bytes, the rest get fragsize-1,
|
||||||
nch = maxch;
|
except if nbigger==0, then they all get fragsize. */
|
||||||
fragsize = ROUNDUP(fragsize, nch);
|
nbigger = len % nfree;
|
||||||
}
|
|
||||||
|
|
||||||
/* skip to the channel after the one we last used
|
/* skip to the channel after the one we last used
|
||||||
and start at that one */
|
and start at that one */
|
||||||
|
@ -1278,7 +1290,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
||||||
|
|
||||||
/* create a fragment for each channel */
|
/* create a fragment for each channel */
|
||||||
bits = B;
|
bits = B;
|
||||||
do {
|
while (nfree > 0 || len > 0) {
|
||||||
list = list->next;
|
list = list->next;
|
||||||
if (list == &ppp->channels) {
|
if (list == &ppp->channels) {
|
||||||
i = 0;
|
i = 0;
|
||||||
|
@ -1289,61 +1301,92 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
||||||
if (!pch->avail)
|
if (!pch->avail)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Skip this channel if it has a fragment pending already and
|
||||||
|
* we haven't given a fragment to all of the free channels.
|
||||||
|
*/
|
||||||
|
if (pch->avail == 1) {
|
||||||
|
if (nfree > 0)
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
--nfree;
|
||||||
|
pch->avail = 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* check the channel's mtu and whether it is still attached. */
|
/* check the channel's mtu and whether it is still attached. */
|
||||||
spin_lock_bh(&pch->downl);
|
spin_lock_bh(&pch->downl);
|
||||||
if (pch->chan == 0 || (mtu = pch->chan->mtu) < hdrlen) {
|
if (pch->chan == NULL) {
|
||||||
/* can't use this channel */
|
/* can't use this channel, it's being deregistered */
|
||||||
spin_unlock_bh(&pch->downl);
|
spin_unlock_bh(&pch->downl);
|
||||||
pch->avail = 0;
|
pch->avail = 0;
|
||||||
if (--nch == 0)
|
if (--navail == 0)
|
||||||
break;
|
break;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to create multiple fragments for this channel
|
* Create a fragment for this channel of
|
||||||
* if fragsize is greater than the channel's mtu.
|
* min(max(mtu+2-hdrlen, 4), fragsize, len) bytes.
|
||||||
|
* If mtu+2-hdrlen < 4, that is a ridiculously small
|
||||||
|
* MTU, so we use mtu = 2 + hdrlen.
|
||||||
*/
|
*/
|
||||||
if (fragsize > len)
|
if (fragsize > len)
|
||||||
fragsize = len;
|
fragsize = len;
|
||||||
for (flen = fragsize; flen > 0; flen -= fnb) {
|
flen = fragsize;
|
||||||
fnb = flen;
|
mtu = pch->chan->mtu + 2 - hdrlen;
|
||||||
if (fnb > mtu + 2 - hdrlen)
|
if (mtu < 4)
|
||||||
fnb = mtu + 2 - hdrlen;
|
mtu = 4;
|
||||||
if (fnb >= len)
|
if (flen > mtu)
|
||||||
bits |= E;
|
flen = mtu;
|
||||||
frag = alloc_skb(fnb + hdrlen, GFP_ATOMIC);
|
if (flen == len && nfree == 0)
|
||||||
if (frag == 0)
|
bits |= E;
|
||||||
goto noskb;
|
frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
|
||||||
q = skb_put(frag, fnb + hdrlen);
|
if (frag == 0)
|
||||||
/* make the MP header */
|
goto noskb;
|
||||||
q[0] = PPP_MP >> 8;
|
q = skb_put(frag, flen + hdrlen);
|
||||||
q[1] = PPP_MP;
|
|
||||||
if (ppp->flags & SC_MP_XSHORTSEQ) {
|
|
||||||
q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
|
|
||||||
q[3] = ppp->nxseq;
|
|
||||||
} else {
|
|
||||||
q[2] = bits;
|
|
||||||
q[3] = ppp->nxseq >> 16;
|
|
||||||
q[4] = ppp->nxseq >> 8;
|
|
||||||
q[5] = ppp->nxseq;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* copy the data in */
|
/* make the MP header */
|
||||||
memcpy(q + hdrlen, p, fnb);
|
q[0] = PPP_MP >> 8;
|
||||||
|
q[1] = PPP_MP;
|
||||||
/* try to send it down the channel */
|
if (ppp->flags & SC_MP_XSHORTSEQ) {
|
||||||
chan = pch->chan;
|
q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
|
||||||
if (!chan->ops->start_xmit(chan, frag))
|
q[3] = ppp->nxseq;
|
||||||
skb_queue_tail(&pch->file.xq, frag);
|
} else {
|
||||||
pch->had_frag = 1;
|
q[2] = bits;
|
||||||
p += fnb;
|
q[3] = ppp->nxseq >> 16;
|
||||||
len -= fnb;
|
q[4] = ppp->nxseq >> 8;
|
||||||
++ppp->nxseq;
|
q[5] = ppp->nxseq;
|
||||||
bits = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy the data in.
|
||||||
|
* Unfortunately there is a bug in older versions of
|
||||||
|
* the Linux PPP multilink reconstruction code where it
|
||||||
|
* drops 0-length fragments. Therefore we make sure the
|
||||||
|
* fragment has at least one byte of data. Any bytes
|
||||||
|
* we add in this situation will end up as padding on the
|
||||||
|
* end of the reconstructed packet.
|
||||||
|
*/
|
||||||
|
if (flen == 0)
|
||||||
|
*skb_put(frag, 1) = 0;
|
||||||
|
else
|
||||||
|
memcpy(q + hdrlen, p, flen);
|
||||||
|
|
||||||
|
/* try to send it down the channel */
|
||||||
|
chan = pch->chan;
|
||||||
|
if (skb_queue_len(&pch->file.xq)
|
||||||
|
|| !chan->ops->start_xmit(chan, frag))
|
||||||
|
skb_queue_tail(&pch->file.xq, frag);
|
||||||
|
pch->had_frag = 1;
|
||||||
|
p += flen;
|
||||||
|
len -= flen;
|
||||||
|
++ppp->nxseq;
|
||||||
|
bits = 0;
|
||||||
spin_unlock_bh(&pch->downl);
|
spin_unlock_bh(&pch->downl);
|
||||||
} while (len > 0);
|
|
||||||
|
if (--nbigger == 0 && fragsize > 0)
|
||||||
|
--fragsize;
|
||||||
|
}
|
||||||
ppp->nxchan = i;
|
ppp->nxchan = i;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1691,7 +1734,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
|
||||||
struct list_head *l;
|
struct list_head *l;
|
||||||
int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
|
int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
|
||||||
|
|
||||||
if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
|
if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0)
|
||||||
goto err; /* no good, throw it away */
|
goto err; /* no good, throw it away */
|
||||||
|
|
||||||
/* Decode sequence number and begin/end bits */
|
/* Decode sequence number and begin/end bits */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue