mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-04 13:21:45 +00:00
[NET]: Merge TSO/UFO fields in sk_buff
Having separate fields in sk_buff for TSO/UFO (tso_size/ufo_size) is not going to scale if we add any more segmentation methods (e.g., DCCP). So let's merge them. They were used to tell the protocol of a packet. This function has been subsumed by the new gso_type field. This is essentially a set of netdev feature bits (shifted by 16 bits) that are required to process a specific skb. As such it's easy to tell whether a given device can process a GSO skb: you just have to and the gso_type field and the netdev's features field. I've made gso_type a conjunction. The idea is that you have a base type (e.g., SKB_GSO_TCPV4) that can be modified further to support new features. For example, if we add a hardware TSO type that supports ECN, they would declare NETIF_F_TSO | NETIF_F_TSO_ECN. All TSO packets with CWR set would have a gso_type of SKB_GSO_TCPV4 | SKB_GSO_TCPV4_ECN while all other TSO packets would be SKB_GSO_TCPV4. This means that only the CWR packets need to be emulated in software. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d4828d85d1
commit
7967168cef
27 changed files with 120 additions and 90 deletions
|
@ -515,15 +515,17 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
|
|||
/* Avoid the costly divide in the normal
|
||||
* non-TSO case.
|
||||
*/
|
||||
skb_shinfo(skb)->tso_segs = 1;
|
||||
skb_shinfo(skb)->tso_size = 0;
|
||||
skb_shinfo(skb)->gso_segs = 1;
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
skb_shinfo(skb)->gso_type = 0;
|
||||
} else {
|
||||
unsigned int factor;
|
||||
|
||||
factor = skb->len + (mss_now - 1);
|
||||
factor /= mss_now;
|
||||
skb_shinfo(skb)->tso_segs = factor;
|
||||
skb_shinfo(skb)->tso_size = mss_now;
|
||||
skb_shinfo(skb)->gso_segs = factor;
|
||||
skb_shinfo(skb)->gso_size = mss_now;
|
||||
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -914,7 +916,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int
|
|||
|
||||
if (!tso_segs ||
|
||||
(tso_segs > 1 &&
|
||||
skb_shinfo(skb)->tso_size != mss_now)) {
|
||||
tcp_skb_mss(skb) != mss_now)) {
|
||||
tcp_set_skb_tso_segs(sk, skb, mss_now);
|
||||
tso_segs = tcp_skb_pcount(skb);
|
||||
}
|
||||
|
@ -1724,8 +1726,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
|
||||
if (!pskb_trim(skb, 0)) {
|
||||
TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
|
||||
skb_shinfo(skb)->tso_segs = 1;
|
||||
skb_shinfo(skb)->tso_size = 0;
|
||||
skb_shinfo(skb)->gso_segs = 1;
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
skb_shinfo(skb)->gso_type = 0;
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->csum = 0;
|
||||
}
|
||||
|
@ -1930,8 +1933,9 @@ void tcp_send_fin(struct sock *sk)
|
|||
skb->csum = 0;
|
||||
TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
|
||||
TCP_SKB_CB(skb)->sacked = 0;
|
||||
skb_shinfo(skb)->tso_segs = 1;
|
||||
skb_shinfo(skb)->tso_size = 0;
|
||||
skb_shinfo(skb)->gso_segs = 1;
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
skb_shinfo(skb)->gso_type = 0;
|
||||
|
||||
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
|
||||
TCP_SKB_CB(skb)->seq = tp->write_seq;
|
||||
|
@ -1963,8 +1967,9 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
|
|||
skb->csum = 0;
|
||||
TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
|
||||
TCP_SKB_CB(skb)->sacked = 0;
|
||||
skb_shinfo(skb)->tso_segs = 1;
|
||||
skb_shinfo(skb)->tso_size = 0;
|
||||
skb_shinfo(skb)->gso_segs = 1;
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
skb_shinfo(skb)->gso_type = 0;
|
||||
|
||||
/* Send it off. */
|
||||
TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
|
||||
|
@ -2047,8 +2052,9 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
|
|||
TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
|
||||
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
|
||||
TCP_SKB_CB(skb)->sacked = 0;
|
||||
skb_shinfo(skb)->tso_segs = 1;
|
||||
skb_shinfo(skb)->tso_size = 0;
|
||||
skb_shinfo(skb)->gso_segs = 1;
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
skb_shinfo(skb)->gso_type = 0;
|
||||
th->seq = htonl(TCP_SKB_CB(skb)->seq);
|
||||
th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
|
||||
if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
|
||||
|
@ -2152,8 +2158,9 @@ int tcp_connect(struct sock *sk)
|
|||
TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
|
||||
TCP_ECN_send_syn(sk, tp, buff);
|
||||
TCP_SKB_CB(buff)->sacked = 0;
|
||||
skb_shinfo(buff)->tso_segs = 1;
|
||||
skb_shinfo(buff)->tso_size = 0;
|
||||
skb_shinfo(buff)->gso_segs = 1;
|
||||
skb_shinfo(buff)->gso_size = 0;
|
||||
skb_shinfo(buff)->gso_type = 0;
|
||||
buff->csum = 0;
|
||||
TCP_SKB_CB(buff)->seq = tp->write_seq++;
|
||||
TCP_SKB_CB(buff)->end_seq = tp->write_seq;
|
||||
|
@ -2257,8 +2264,9 @@ void tcp_send_ack(struct sock *sk)
|
|||
buff->csum = 0;
|
||||
TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
|
||||
TCP_SKB_CB(buff)->sacked = 0;
|
||||
skb_shinfo(buff)->tso_segs = 1;
|
||||
skb_shinfo(buff)->tso_size = 0;
|
||||
skb_shinfo(buff)->gso_segs = 1;
|
||||
skb_shinfo(buff)->gso_size = 0;
|
||||
skb_shinfo(buff)->gso_type = 0;
|
||||
|
||||
/* Send it off, this clears delayed acks for us. */
|
||||
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
|
||||
|
@ -2293,8 +2301,9 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
|
|||
skb->csum = 0;
|
||||
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
|
||||
TCP_SKB_CB(skb)->sacked = urgent;
|
||||
skb_shinfo(skb)->tso_segs = 1;
|
||||
skb_shinfo(skb)->tso_size = 0;
|
||||
skb_shinfo(skb)->gso_segs = 1;
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
skb_shinfo(skb)->gso_type = 0;
|
||||
|
||||
/* Use a previous sequence. This should cause the other
|
||||
* end to send an ack. Don't queue or clone SKB, just
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue