[NET] SCTP: Fix whitespace errors.

Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
YOSHIFUJI Hideaki 2007-02-09 23:25:18 +09:00 committed by David S. Miller
parent 10297b9931
commit d808ad9ab8
23 changed files with 365 additions and 365 deletions

View file

@ -158,7 +158,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
* recommended value of 5 times 'RTO.Max'.
*/
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
= 5 * asoc->rto_max;
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
@ -1343,7 +1343,7 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
sctp_read_lock(&asoc->base.addr_lock);
if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
sctp_sk(asoc->base.sk))) {
sctp_sk(asoc->base.sk))) {
found = 1;
goto out;
}

View file

@ -306,7 +306,7 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
if (opt->pf->cmp_addr(&laddr->a, addr, opt))
return 1;
return 1;
}
return 0;

View file

@ -138,7 +138,7 @@ static const __u32 crc_c[256] = {
__u32 sctp_start_cksum(__u8 *buffer, __u16 length)
{
__u32 crc32 = ~(__u32) 0;
__u32 crc32 = ~(__u32) 0;
__u32 i;
/* Optimize this routine to be SCTP specific, knowing how
@ -147,7 +147,7 @@ __u32 sctp_start_cksum(__u8 *buffer, __u16 length)
/* Calculate CRC up to the checksum. */
for (i = 0; i < (sizeof(struct sctphdr) - sizeof(__u32)); i++)
CRC32C(crc32, buffer[i]);
CRC32C(crc32, buffer[i]);
/* Skip checksum field of the header. */
for (i = 0; i < sizeof(__u32); i++)

View file

@ -154,7 +154,7 @@ const char *sctp_pname(const sctp_subtype_t id)
static const char *sctp_other_tbl[] = {
"NO_PENDING_TSN",
"ICMP_PROTO_UNREACH",
"ICMP_PROTO_UNREACH",
};
/* Lookup "other" debug name. */

View file

@ -369,7 +369,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
chunk->transport->last_time_heard = jiffies;
error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state,
ep, asoc, chunk, GFP_ATOMIC);
ep, asoc, chunk, GFP_ATOMIC);
if (error && chunk)
chunk->pdiscard = 1;

View file

@ -226,7 +226,7 @@ int sctp_rcv(struct sk_buff *skb)
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_release;
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
@ -293,11 +293,11 @@ discard_release:
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL;
struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL;
int backloged = 0;
rcvr = chunk->rcvr;
rcvr = chunk->rcvr;
/* If the rcvr is dead then the association or endpoint
* has been deleted and we can safely drop the chunk
@ -347,7 +347,7 @@ done:
else
BUG();
return 0;
return 0;
}
static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
@ -416,8 +416,8 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
*
*/
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t)
struct sctp_association *asoc,
struct sctp_transport *t)
{
SCTP_DEBUG_PRINTK("%s\n", __FUNCTION__);

View file

@ -152,8 +152,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
chunk->data_accepted = 0;
}
chunk->chunk_hdr = ch;
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
chunk->chunk_hdr = ch;
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
/* In the unlikely case of an IP reassembly, the skb could be
* non-linear. If so, update chunk_end so that it doesn't go past
* the skb->tail.
@ -169,7 +169,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
/* This is not a singleton */
chunk->singleton = 0;
} else if (chunk->chunk_end > chunk->skb->tail) {
/* RFC 2960, Section 6.10 Bundling
/* RFC 2960, Section 6.10 Bundling
*
* Partial chunks MUST NOT be placed in an SCTP packet.
* If the receiver detects a partial chunk, it MUST drop

View file

@ -121,7 +121,7 @@ done:
if (len > length)
len = length;
return len;
return len;
}
/* Initialize the objcount in the proc filesystem. */

View file

@ -85,8 +85,8 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
chunk = sctp_get_ecne_prepend(packet->transport->asoc);
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
* any other chunks get appended.
*/
if (chunk)
sctp_packet_append_chunk(packet, chunk);
}
@ -442,7 +442,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* acknowledged or have failed.
*/
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
sctp_chunk_free(chunk);
}
/* Perform final transformation on checksum. */
@ -528,7 +528,7 @@ err:
list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
list_del_init(&chunk->list);
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
sctp_chunk_free(chunk);
}
goto out;
nomem:

View file

@ -578,7 +578,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
break;
case SCTP_XMIT_RWND_FULL:
/* Send this packet. */
/* Send this packet. */
if ((error = sctp_packet_transmit(pkt)) == 0)
*start_timer = 1;
@ -590,7 +590,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
break;
case SCTP_XMIT_NAGLE_DELAY:
/* Send this packet. */
/* Send this packet. */
if ((error = sctp_packet_transmit(pkt)) == 0)
*start_timer = 1;
@ -1266,7 +1266,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* first instance of the packet or a later
* instance).
*/
if (!tchunk->tsn_gap_acked &&
if (!tchunk->tsn_gap_acked &&
!tchunk->resent &&
tchunk->rtt_in_progress) {
tchunk->rtt_in_progress = 0;
@ -1275,7 +1275,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
rtt);
}
}
if (TSN_lte(tsn, sack_ctsn)) {
if (TSN_lte(tsn, sack_ctsn)) {
/* RFC 2960 6.3.2 Retransmission Timer Rules
*
* R3) Whenever a SACK is received
@ -1590,7 +1590,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, pba: %d\n",
__FUNCTION__, transport, transport->cwnd,
transport->ssthresh, transport->flight_size,
transport->ssthresh, transport->flight_size,
transport->partial_bytes_acked);
}
}
@ -1603,7 +1603,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
__u16 gap;
__u32 ctsn = ntohl(sack->cum_tsn_ack);
if (TSN_lte(tsn, ctsn))
if (TSN_lte(tsn, ctsn))
goto pass;
/* 3.3.4 Selective Acknowledgement (SACK) (3):

View file

@ -70,7 +70,7 @@ int sctp_primitive_ ## name(struct sctp_association *asoc, \
\
error = sctp_do_sm(event_type, subtype, state, ep, asoc, \
arg, GFP_KERNEL); \
return error; \
return error; \
}
/* 10.1 ULP-to-SCTP

View file

@ -344,9 +344,9 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr))
return 0;
/* Is this a broadcast address? */
if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST)
return 0;
/* Is this a broadcast address? */
if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST)
return 0;
return 1;
}
@ -494,7 +494,7 @@ out_unlock:
out:
if (dst)
SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n",
NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src));
NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src));
else
SCTP_DEBUG_PRINTK("NO ROUTE\n");
@ -524,7 +524,7 @@ static void sctp_v4_get_saddr(struct sctp_association *asoc,
/* What interface did this skb arrive on? */
static int sctp_v4_skb_iif(const struct sk_buff *skb)
{
return ((struct rtable *)skb->dst)->rt_iif;
return ((struct rtable *)skb->dst)->rt_iif;
}
/* Was this packet marked by Explicit Congestion Notification? */
@ -569,7 +569,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
newinet->dport = htons(asoc->peer.port);
newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
newinet->pmtudisc = inet->pmtudisc;
newinet->id = asoc->next_tsn ^ jiffies;
newinet->id = asoc->next_tsn ^ jiffies;
newinet->uc_ttl = -1;
newinet->mc_loop = 1;

View file

@ -118,7 +118,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
int padlen;
__u16 len;
/* Cause code constants are now defined in network order. */
/* Cause code constants are now defined in network order. */
err.cause = cause_code;
len = sizeof(sctp_errhdr_t) + paylen;
padlen = len % 4;
@ -295,11 +295,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
*/
chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len;
/* Tell peer that we'll do ECN only if peer advertised such cap. */
/* Tell peer that we'll do ECN only if peer advertised such cap. */
if (asoc->peer.ecn_capable)
chunksize += sizeof(ecap_param);
/* Tell peer that we'll do PR-SCTP only if peer advertised. */
/* Tell peer that we'll do PR-SCTP only if peer advertised. */
if (asoc->peer.prsctp_capable)
chunksize += sizeof(prsctp_param);
@ -728,7 +728,7 @@ struct sctp_chunk *sctp_make_shutdown_complete(
if (retval && chunk)
retval->transport = chunk->transport;
return retval;
return retval;
}
/* Create an ABORT. Note that we set the T bit if we have no
@ -1315,7 +1315,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
memcpy((__u8 *)&cookie->c.peer_init[0] +
ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
if (sctp_sk(ep->base.sk)->hmac) {
if (sctp_sk(ep->base.sk)->hmac) {
struct hash_desc desc;
/* Sign the message. */
@ -1324,8 +1324,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
sg.length = bodysize;
keylen = SCTP_SECRET_SIZE;
key = (char *)ep->secret_key[ep->current_key];
desc.tfm = sctp_sk(ep->base.sk)->hmac;
desc.flags = 0;
desc.tfm = sctp_sk(ep->base.sk)->hmac;
desc.flags = 0;
if (crypto_hash_setkey(desc.tfm, key, keylen) ||
crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
@ -1861,7 +1861,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
sctp_walk_params(param, peer_init, init_hdr.params) {
if (!sctp_process_param(asoc, param, peer_addr, gfp))
goto clean_up;
goto clean_up;
}
/* Walk list of transports, removing transports in the UNKNOWN state. */
@ -1937,7 +1937,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
*/
/* Allocate storage for the negotiated streams if it is not a temporary
* association.
* association.
*/
if (!asoc->temp) {
int assoc_id;
@ -2424,11 +2424,11 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
/* ADDIP 4.3 D9) If an endpoint receives an ADD IP address
* request and does not have the local resources to add this
* new address to the association, it MUST return an Error
* Cause TLV set to the new error code 'Operation Refused
* Due to Resource Shortage'.
*/
* request and does not have the local resources to add this
* new address to the association, it MUST return an Error
* Cause TLV set to the new error code 'Operation Refused
* Due to Resource Shortage'.
*/
peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
if (!peer)
@ -2440,10 +2440,10 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
break;
case SCTP_PARAM_DEL_IP:
/* ADDIP 4.3 D7) If a request is received to delete the
* last remaining IP address of a peer endpoint, the receiver
* MUST send an Error Cause TLV with the error cause set to the
* new error code 'Request to Delete Last Remaining IP Address'.
*/
* last remaining IP address of a peer endpoint, the receiver
* MUST send an Error Cause TLV with the error cause set to the
* new error code 'Request to Delete Last Remaining IP Address'.
*/
pos = asoc->peer.transport_addr_list.next;
if (pos->next == &asoc->peer.transport_addr_list)
return SCTP_ERROR_DEL_LAST_IP;
@ -2755,7 +2755,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
/* Skip the processed asconf parameter and move to the next
* one.
*/
*/
length = ntohs(asconf_param->param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
length);

View file

@ -61,7 +61,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
struct sctp_endpoint *ep,
struct sctp_association *asoc,
void *event_arg,
sctp_disposition_t status,
sctp_disposition_t status,
sctp_cmd_seq_t *commands,
gfp_t gfp);
static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
@ -178,7 +178,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
if (max_tsn_seen != ctsn)
if (max_tsn_seen != ctsn)
asoc->peer.sack_needed = 1;
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
@ -338,8 +338,8 @@ static void sctp_generate_t4_rto_event(unsigned long data)
static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
{
struct sctp_association *asoc = (struct sctp_association *)data;
sctp_generate_timeout_event(asoc,
struct sctp_association *asoc = (struct sctp_association *)data;
sctp_generate_timeout_event(asoc,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
} /* sctp_generate_t5_shutdown_guard_event() */
@ -380,7 +380,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
asoc->state, asoc->ep, asoc,
transport, GFP_ATOMIC);
if (error)
if (error)
asoc->base.sk->sk_err = -error;
out_unlock:
@ -570,7 +570,7 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
/* Helper function to stop any pending T3-RTX timers */
static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc)
struct sctp_association *asoc)
{
struct sctp_transport *t;
struct list_head *pos;
@ -727,7 +727,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
sctp_state(asoc, SHUTDOWN_RECEIVED)) {
/* Wake up any processes waiting in the asoc's wait queue in
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
*/
*/
if (waitqueue_active(&asoc->wait))
wake_up_interruptible(&asoc->wait);
@ -840,7 +840,7 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, transports);
if (!sctp_cmp_addr_exact(&t->ipaddr,
&asoc->peer.primary_addr)) {
&asoc->peer.primary_addr)) {
sctp_assoc_del_peer(asoc, &t->ipaddr);
}
}
@ -968,7 +968,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
error = -ENOMEM;
break;
case SCTP_DISPOSITION_DELETE_TCB:
case SCTP_DISPOSITION_DELETE_TCB:
/* This should now be a command. */
break;
@ -1021,7 +1021,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
struct sctp_endpoint *ep,
struct sctp_association *asoc,
void *event_arg,
sctp_disposition_t status,
sctp_disposition_t status,
sctp_cmd_seq_t *commands,
gfp_t gfp)
{
@ -1104,7 +1104,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_PROCESS_FWDTSN:
sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr);
break;
break;
case SCTP_CMD_GEN_SACK:
/* Generate a Selective ACK.
@ -1177,7 +1177,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
asoc->peer.primary_path) &&
(asoc->init_err_counter > 0)) {
sctp_add_cmd_sf(commands,
SCTP_CMD_FORCE_PRIM_RETRAN,
SCTP_CMD_FORCE_PRIM_RETRAN,
SCTP_NULL());
}

View file

@ -189,7 +189,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
0, 0, 0, GFP_ATOMIC);
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
SCTP_ULPEVENT(ev));
/* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint
* will verify that it is in SHUTDOWN-ACK-SENT state, if it is
@ -326,7 +326,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
}
}
/* Grab the INIT header. */
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data;
/* Tag the variable length parameters. */
@ -594,7 +594,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr =
chunk->subh.cookie_hdr =
(struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
@ -891,7 +891,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
return SCTP_DISPOSITION_CONSUME;
}
/*
@ -1548,7 +1548,7 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
/* Per the above section, we'll discard the chunk if we have an
* endpoint. If this is an OOTB INIT-ACK, treat it as such.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands);
else
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
@ -1760,9 +1760,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
/* Clarification from Implementor's Guide:
* D) When both local and remote tags match the endpoint should
* enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
* It should stop any cookie timer that may be running and send
* a COOKIE ACK.
* enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
* It should stop any cookie timer that may be running and send
* a COOKIE ACK.
*/
/* Don't accidentally move back into established state. */
@ -1786,7 +1786,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
SCTP_COMM_UP, 0,
asoc->c.sinit_num_ostreams,
asoc->c.sinit_max_instreams,
GFP_ATOMIC);
GFP_ATOMIC);
if (!ev)
goto nomem;
@ -1870,7 +1870,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
@ -1936,7 +1936,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
default: /* Discard packet for all others. */
retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
break;
};
};
/* Delete the tempory new association. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
@ -2274,7 +2274,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@ -2661,7 +2661,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
}
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
@ -2930,7 +2930,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
/* Make an ABORT. The T bit will be set if the asoc
* is NULL.
*/
abort = sctp_make_abort(asoc, chunk, 0);
abort = sctp_make_abort(asoc, chunk, 0);
if (!abort) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
@ -3175,8 +3175,8 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
if (packet) {
/* Make an SHUTDOWN_COMPLETE.
* The T bit will be set if the asoc is NULL.
*/
* The T bit will be set if the asoc is NULL.
*/
shut = sctp_make_shutdown_complete(asoc, chunk);
if (!shut) {
sctp_ootb_pkt_free(packet);
@ -3264,7 +3264,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
* 'Peer-Serial-Number'.
*/
if (serial == asoc->peer.addip_serial + 1) {
/* ADDIP 4.2 C2) If the value found in the serial number is
/* ADDIP 4.2 C2) If the value found in the serial number is
* equal to the ('Peer-Serial-Number' + 1), the endpoint MUST
* do V1-V5.
*/
@ -3307,7 +3307,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
*/
sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *asconf_ack = arg;
@ -3359,7 +3359,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@ -3387,7 +3387,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@ -3527,7 +3527,7 @@ gen_shutdown:
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
return SCTP_DISPOSITION_CONSUME;
}
/*
@ -3747,7 +3747,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@ -4437,7 +4437,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
@ -4515,7 +4515,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
@ -4953,7 +4953,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
/* sctpimpguide-05 Section 2.12.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
*/
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
disposition = SCTP_DISPOSITION_CONSUME;
@ -5310,7 +5310,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.

View file

@ -517,7 +517,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
continue;
/* Check if any address in the packed array of addresses is
* in the bind address list of the association. If so,
* in the bind address list of the association. If so,
* do not send the asconf chunk to its peer, but continue with
* other associations.
*/
@ -710,7 +710,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
continue;
/* Check if any address in the packed array of addresses is
* not present in the bind address list of the association.
* not present in the bind address list of the association.
* If so, do not send the asconf chunk to its peer, but
* continue with other associations.
*/
@ -935,7 +935,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
default:
err = -EINVAL;
break;
};
};
out:
kfree(kaddrs);
@ -1094,8 +1094,8 @@ static int __sctp_connect(struct sock* sk,
out_free:
SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
" kaddrs: %p err: %d\n",
asoc, kaddrs, err);
" kaddrs: %p err: %d\n",
asoc, kaddrs, err);
if (asoc)
sctp_association_free(asoc);
return err;
@ -1435,7 +1435,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
* length messages when SCTP_EOF|SCTP_ABORT is not set.
* If SCTP_ABORT is set, the message length could be non zero with
* the msg_iov set to the user abort reason.
*/
*/
if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
(!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
err = -EINVAL;
@ -2298,7 +2298,7 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
/* Get association, if assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
*/
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
@ -3015,7 +3015,7 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
out:
sctp_release_sock(sk);
*err = error;
*err = error;
return newsk;
}
@ -3099,8 +3099,8 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->pathmtu = 0; // allow default discovery
sp->sackdelay = sctp_sack_timeout;
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
/* If enabled no SCTP message fragmentation will be performed.
* Configure through SCTP_DISABLE_FRAGMENTS socket option.
@ -3680,7 +3680,7 @@ static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len,
/* Get association, if assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
*/
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
@ -5010,7 +5010,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
return (!list_empty(&sp->ep->asocs)) ?
(POLLIN | POLLRDNORM) : 0;
(POLLIN | POLLRDNORM) : 0;
mask = 0;
@ -5430,7 +5430,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
DEFINE_WAIT(wait);
SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n",
asoc, (long)(*timeo_p), msg_len);
asoc, (long)(*timeo_p), msg_len);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);

View file

@ -130,9 +130,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
gfp_t gfp)
{
struct sctp_transport *transport;
struct sctp_transport *transport;
transport = t_new(struct sctp_transport, gfp);
transport = t_new(struct sctp_transport, gfp);
if (!transport)
goto fail;
@ -185,7 +185,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
if (transport->asoc)
sctp_association_put(transport->asoc);
sctp_packet_free(&transport->packet);
sctp_packet_free(&transport->packet);
dst_release(transport->dst);
kfree(transport);
@ -459,8 +459,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* destination address(es) to which the missing DATA chunks
* were last sent, according to the formula described in
* Section 7.2.3.
*
* RFC 2960 7.2.3, sctpimpguide Upon detection of packet
*
* RFC 2960 7.2.3, sctpimpguide Upon detection of packet
* losses from SACK (see Section 7.2.4), An endpoint
* should do the following:
* ssthresh = max(cwnd/2, 4*MTU)
@ -488,7 +488,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
if ((jiffies - transport->last_time_ecne_reduced) >
transport->rtt) {
transport->ssthresh = max(transport->cwnd/2,
4*transport->asoc->pathmtu);
4*transport->asoc->pathmtu);
transport->cwnd = transport->ssthresh;
transport->last_time_ecne_reduced = jiffies;
}

View file

@ -277,7 +277,7 @@ static void sctp_tsnmap_update(struct sctp_tsnmap *map)
/* Now tsn_map must have been all '1's,
* so we swap the map and check the overflow table
*/
__u8 *tmp = map->tsn_map;
__u8 *tmp = map->tsn_map;
memset(tmp, 0, map->len);
map->tsn_map = map->overflow_map;
map->overflow_map = tmp;

View file

@ -749,7 +749,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
*/
pd->pdapi_length = sizeof(struct sctp_pdapi_event);
/* pdapi_indication: 32 bits (unsigned integer)
/* pdapi_indication: 32 bits (unsigned integer)
*
* This field holds the indication being sent to the application.
*/
@ -790,13 +790,13 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
return;
/* Sockets API Extensions for SCTP
* Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*
* sinfo_stream: 16 bits (unsigned integer)
*
* For recvmsg() the SCTP stack places the message's stream number in
* this value.
*/
* Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*
* sinfo_stream: 16 bits (unsigned integer)
*
* For recvmsg() the SCTP stack places the message's stream number in
* this value.
*/
sinfo.sinfo_stream = event->stream;
/* sinfo_ssn: 16 bits (unsigned integer)
*

View file

@ -191,7 +191,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
queue = &sk->sk_receive_queue;
} else if (ulpq->pd_mode) {
if (event->msg_flags & MSG_NOTIFICATION)
queue = &sctp_sk(sk)->pd_lobby;
queue = &sctp_sk(sk)->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
queue = &sk->sk_receive_queue;
@ -298,32 +298,32 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
*/
if (last)
last->next = pos;
else {
if (skb_cloned(f_frag)) {
/* This is a cloned skb, we can't just modify
* the frag_list. We need a new skb to do that.
* Instead of calling skb_unshare(), we'll do it
* ourselves since we need to delay the free.
*/
new = skb_copy(f_frag, GFP_ATOMIC);
if (!new)
return NULL; /* try again later */
else {
if (skb_cloned(f_frag)) {
/* This is a cloned skb, we can't just modify
* the frag_list. We need a new skb to do that.
* Instead of calling skb_unshare(), we'll do it
* ourselves since we need to delay the free.
*/
new = skb_copy(f_frag, GFP_ATOMIC);
if (!new)
return NULL; /* try again later */
sctp_skb_set_owner_r(new, f_frag->sk);
sctp_skb_set_owner_r(new, f_frag->sk);
skb_shinfo(new)->frag_list = pos;
} else
skb_shinfo(f_frag)->frag_list = pos;
}
skb_shinfo(new)->frag_list = pos;
} else
skb_shinfo(f_frag)->frag_list = pos;
}
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, queue);
/* if we did unshare, then free the old skb and re-assign */
if (new) {
kfree_skb(f_frag);
f_frag = new;
}
/* if we did unshare, then free the old skb and re-assign */
if (new) {
kfree_skb(f_frag);
f_frag = new;
}
while (pos) {