[NET] IPV4: Fix whitespace errors.

Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
YOSHIFUJI Hideaki 2007-02-09 23:24:47 +09:00 committed by David S. Miller
parent 642656518b
commit e905a9edab
120 changed files with 1775 additions and 1775 deletions

View file

@ -198,7 +198,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
(*rcv_wscale) = 0;
if (wscale_ok) {
/* Set window scaling on max possible window
* See RFC1323 for an explanation of the limit to 14
* See RFC1323 for an explanation of the limit to 14
*/
space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
space = min_t(u32, space, *window_clamp);
@ -451,7 +451,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
(tp->rx_opt.eff_sacks *
TCPOLEN_SACK_PERBLOCK));
}
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
@ -555,7 +555,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
}
/* This routine just queue's the buffer
/* This routine just queue's the buffer
*
* NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
* otherwise socket can stall.
@ -597,7 +597,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
/* Function to create two new TCP segments. Shrinks the given segment
* to the specified size and appends a new segment with the rest of the
* packet to the list. This won't be called frequently, I hope.
* packet to the list. This won't be called frequently, I hope.
* Remember, these are still headerless SKBs at this point.
*/
int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
@ -610,7 +610,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
BUG_ON(len > skb->len);
clear_all_retrans_hints(tp);
clear_all_retrans_hints(tp);
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;
@ -821,7 +821,7 @@ void tcp_mtup_init(struct sock *sk)
icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
icsk->icsk_af_ops->net_header_len;
icsk->icsk_af_ops->net_header_len;
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
icsk->icsk_mtup.probe_size = 0;
}
@ -1008,7 +1008,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp)
*/
static inline int tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
const struct sk_buff *skb,
unsigned mss_now, int nonagle)
{
return (skb->len < mss_now &&
@ -1078,7 +1078,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
return cwnd_quota;
}
static inline int tcp_skb_is_last(const struct sock *sk,
static inline int tcp_skb_is_last(const struct sock *sk,
const struct sk_buff *skb)
{
return skb->next == (struct sk_buff *)&sk->sk_write_queue;
@ -1298,7 +1298,7 @@ static int tcp_mtu_probe(struct sock *sk)
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
else
nskb->csum = skb_copy_and_csum_bits(skb, 0,
skb_put(nskb, copy), copy, nskb->csum);
skb_put(nskb, copy), copy, nskb->csum);
if (skb->len <= copy) {
/* We've eaten all the data from this skb.
@ -1308,7 +1308,7 @@ static int tcp_mtu_probe(struct sock *sk)
sk_stream_free_skb(sk, skb);
} else {
TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy);
if (skb->ip_summed != CHECKSUM_PARTIAL)
@ -1501,7 +1501,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
/* This function returns the amount that we can raise the
* usable window based on the following constraints
*
*
* 1. The window can never be shrunk once it is offered (RFC 793)
* 2. We limit memory per socket
*
@ -1520,12 +1520,12 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
* side SWS prevention criteria. The problem is that under this rule
* a stream of single byte packets will cause the right side of the
* window to always advance by a single byte.
*
*
* Of course, if the sender implements sender side SWS prevention
* then this will not be a problem.
*
*
* BSD seems to make the following compromise:
*
*
* If the free space is less than the 1/4 of the maximum
* space available and the free space is less than 1/2 mss,
* then set the window to 0.
@ -1567,7 +1567,7 @@ u32 __tcp_select_window(struct sock *sk)
int window;
if (mss > full_space)
mss = full_space;
mss = full_space;
if (free_space < full_space/2) {
icsk->icsk_ack.quick = 0;
@ -1691,9 +1691,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
}
/* Do a simple retransmit without using the backoff mechanisms in
* tcp_timer. This is used for path mtu discovery.
* tcp_timer. This is used for path mtu discovery.
* The socket is already locked here.
*/
*/
void tcp_simple_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@ -1703,7 +1703,7 @@ void tcp_simple_retransmit(struct sock *sk)
int lost = 0;
sk_stream_for_retrans_queue(skb, sk) {
if (skb->len > mss &&
if (skb->len > mss &&
!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
@ -1724,7 +1724,7 @@ void tcp_simple_retransmit(struct sock *sk)
tcp_sync_left_out(tp);
/* Don't muck with the congestion window here.
/* Don't muck with the congestion window here.
* Reason is that we do not increase amount of _data_
* in network, but units changed and effective
* cwnd/ssthresh really reduced now.
@ -1747,7 +1747,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int cur_mss = tcp_current_mss(sk, 0);
unsigned int cur_mss = tcp_current_mss(sk, 0);
int err;
/* Inconslusive MTU probe */
@ -1984,10 +1984,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
*/
void tcp_send_fin(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
int mss_now;
/* Optimization, tack on the FIN if we have a queue of
* unsent frames. But be careful about outgoing SACKS
* and IP options.
@ -2146,17 +2146,17 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
th->seq = htonl(TCP_SKB_CB(skb)->seq);
th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
__u8 rcv_wscale;
__u8 rcv_wscale;
/* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
/* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(tcp_full_space(sk),
tcp_select_initial_window(tcp_full_space(sk),
dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
&req->rcv_wnd,
&req->window_clamp,
ireq->wscale_ok,
&rcv_wscale);
ireq->rcv_wscale = rcv_wscale;
ireq->rcv_wscale = rcv_wscale;
}
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
@ -2192,9 +2192,9 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
return skb;
}
/*
/*
* Do all connect socket setups that can be done AF independent.
*/
*/
static void tcp_connect_init(struct sock *sk)
{
struct dst_entry *dst = __sk_dst_get(sk);
@ -2251,7 +2251,7 @@ static void tcp_connect_init(struct sock *sk)
/*
* Build a SYN and send it off.
*/
*/
int tcp_connect(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@ -2409,7 +2409,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
/* We don't queue it, tcp_transmit_skb() sets ownership. */
skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
if (skb == NULL)
if (skb == NULL)
return -1;
/* Reserve space for headers and set control bits. */
@ -2498,7 +2498,7 @@ void tcp_send_probe0(struct sock *sk)
if (icsk->icsk_backoff < sysctl_tcp_retries2)
icsk->icsk_backoff++;
icsk->icsk_probes_out++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
TCP_RTO_MAX);
} else {
@ -2510,7 +2510,7 @@ void tcp_send_probe0(struct sock *sk)
*/
if (!icsk->icsk_probes_out)
icsk->icsk_probes_out = 1;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL),
TCP_RTO_MAX);