4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
22 static inline void dccp_event_ack_sent(struct sock
*sk
)
24 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
28 * All SKB's seen here are completely headerless. It is our
29 * job to build the DCCP header, and pass the packet down to
30 * IP so it can do the same plus pass the packet off to the
33 int dccp_transmit_skb(struct sock
*sk
, struct sk_buff
*skb
)
35 if (likely(skb
!= NULL
)) {
36 const struct inet_sock
*inet
= inet_sk(sk
);
37 struct dccp_sock
*dp
= dccp_sk(sk
);
38 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
40 /* XXX For now we're using only 48 bits sequence numbers */
41 const int dccp_header_size
= sizeof(*dh
) +
42 sizeof(struct dccp_hdr_ext
) +
43 dccp_packet_hdr_len(dcb
->dccpd_type
);
45 u64 ackno
= dp
->dccps_gsr
;
48 * FIXME: study DCCP_PKT_SYNC[ACK] to see what is the right thing
51 dccp_inc_seqno(&dp
->dccps_gss
);
53 dcb
->dccpd_seq
= dp
->dccps_gss
;
54 dccp_insert_options(sk
, skb
);
56 switch (dcb
->dccpd_type
) {
61 case DCCP_PKT_SYNCACK
:
62 ackno
= dcb
->dccpd_seq
;
66 skb
->h
.raw
= skb_push(skb
, dccp_header_size
);
68 /* Data packets are not cloned as they are never retransmitted */
70 skb_set_owner_w(skb
, sk
);
72 /* Build DCCP header and checksum it. */
73 memset(dh
, 0, dccp_header_size
);
74 dh
->dccph_type
= dcb
->dccpd_type
;
75 dh
->dccph_sport
= inet
->sport
;
76 dh
->dccph_dport
= inet
->dport
;
77 dh
->dccph_doff
= (dccp_header_size
+ dcb
->dccpd_opt_len
) / 4;
78 dh
->dccph_ccval
= dcb
->dccpd_ccval
;
79 /* XXX For now we're using only 48 bits sequence numbers */
82 dp
->dccps_awh
= dp
->dccps_gss
;
83 dccp_hdr_set_seq(dh
, dp
->dccps_gss
);
85 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), ackno
);
87 switch (dcb
->dccpd_type
) {
88 case DCCP_PKT_REQUEST
:
89 dccp_hdr_request(skb
)->dccph_req_service
= dcb
->dccpd_service
;
92 dccp_hdr_reset(skb
)->dccph_reset_code
= dcb
->dccpd_reset_code
;
96 dh
->dccph_checksum
= dccp_v4_checksum(skb
, inet
->saddr
,
99 if (dcb
->dccpd_type
== DCCP_PKT_ACK
||
100 dcb
->dccpd_type
== DCCP_PKT_DATAACK
)
101 dccp_event_ack_sent(sk
);
103 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
105 err
= ip_queue_xmit(skb
, 0);
109 /* NET_XMIT_CN is special. It does not guarantee,
110 * that this packet is lost. It tells that device
111 * is about to start to drop packets or already
112 * drops some packets of the same priority and
113 * invokes us to send less aggressively.
115 return err
== NET_XMIT_CN
? 0 : err
;
120 unsigned int dccp_sync_mss(struct sock
*sk
, u32 pmtu
)
122 struct dccp_sock
*dp
= dccp_sk(sk
);
126 * FIXME: we really should be using the af_specific thing to support IPv6.
127 * mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext);
129 mss_now
= pmtu
- sizeof(struct iphdr
) - sizeof(struct dccp_hdr
) - sizeof(struct dccp_hdr_ext
);
131 /* Now subtract optional transport overhead */
132 mss_now
-= dp
->dccps_ext_header_len
;
135 * FIXME: this should come from the CCID infrastructure, where, say,
136 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
137 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
138 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
139 * make it a multiple of 4
142 mss_now
-= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
144 /* And store cached results */
145 dp
->dccps_pmtu_cookie
= pmtu
;
146 dp
->dccps_mss_cache
= mss_now
;
151 int dccp_write_xmit(struct sock
*sk
, struct sk_buff
*skb
, const int len
)
153 const struct dccp_sock
*dp
= dccp_sk(sk
);
154 int err
= ccid_hc_tx_send_packet(dp
->dccps_hc_tx_ccid
, sk
, skb
, len
);
157 const struct dccp_ackpkts
*ap
= dp
->dccps_hc_rx_ackpkts
;
158 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
160 if (sk
->sk_state
== DCCP_PARTOPEN
) {
161 /* See 8.1.5. Handshake Completion */
162 inet_csk_schedule_ack(sk
);
163 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
164 inet_csk(sk
)->icsk_rto
,
166 dcb
->dccpd_type
= DCCP_PKT_DATAACK
;
168 * FIXME: we really should have a
169 * dccps_ack_pending or use icsk.
171 } else if (inet_csk_ack_scheduled(sk
) ||
172 (dp
->dccps_options
.dccpo_send_ack_vector
&&
173 ap
->dccpap_buf_ackno
!= DCCP_MAX_SEQNO
+ 1 &&
174 ap
->dccpap_ack_seqno
== DCCP_MAX_SEQNO
+ 1))
175 dcb
->dccpd_type
= DCCP_PKT_DATAACK
;
177 dcb
->dccpd_type
= DCCP_PKT_DATA
;
179 err
= dccp_transmit_skb(sk
, skb
);
180 ccid_hc_tx_packet_sent(dp
->dccps_hc_tx_ccid
, sk
, 0, len
);
186 int dccp_retransmit_skb(struct sock
*sk
, struct sk_buff
*skb
)
188 if (inet_sk_rebuild_header(sk
) != 0)
189 return -EHOSTUNREACH
; /* Routing failure or similar. */
191 return dccp_transmit_skb(sk
, (skb_cloned(skb
) ?
192 pskb_copy(skb
, GFP_ATOMIC
):
193 skb_clone(skb
, GFP_ATOMIC
)));
196 struct sk_buff
*dccp_make_response(struct sock
*sk
, struct dst_entry
*dst
,
197 struct request_sock
*req
)
200 const int dccp_header_size
= sizeof(struct dccp_hdr
) +
201 sizeof(struct dccp_hdr_ext
) +
202 sizeof(struct dccp_hdr_response
);
203 struct sk_buff
*skb
= sock_wmalloc(sk
, MAX_HEADER
+ DCCP_MAX_OPT_LEN
+
209 /* Reserve space for headers. */
210 skb_reserve(skb
, MAX_HEADER
+ DCCP_MAX_OPT_LEN
+ dccp_header_size
);
212 skb
->dst
= dst_clone(dst
);
215 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_RESPONSE
;
216 DCCP_SKB_CB(skb
)->dccpd_seq
= dccp_rsk(req
)->dreq_iss
;
217 dccp_insert_options(sk
, skb
);
219 skb
->h
.raw
= skb_push(skb
, dccp_header_size
);
222 memset(dh
, 0, dccp_header_size
);
224 dh
->dccph_sport
= inet_sk(sk
)->sport
;
225 dh
->dccph_dport
= inet_rsk(req
)->rmt_port
;
226 dh
->dccph_doff
= (dccp_header_size
+ DCCP_SKB_CB(skb
)->dccpd_opt_len
) / 4;
227 dh
->dccph_type
= DCCP_PKT_RESPONSE
;
229 dccp_hdr_set_seq(dh
, dccp_rsk(req
)->dreq_iss
);
230 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), dccp_rsk(req
)->dreq_isr
);
232 dh
->dccph_checksum
= dccp_v4_checksum(skb
, inet_rsk(req
)->loc_addr
,
233 inet_rsk(req
)->rmt_addr
);
235 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
239 struct sk_buff
*dccp_make_reset(struct sock
*sk
, struct dst_entry
*dst
,
240 const enum dccp_reset_codes code
)
244 struct dccp_sock
*dp
= dccp_sk(sk
);
245 const int dccp_header_size
= sizeof(struct dccp_hdr
) +
246 sizeof(struct dccp_hdr_ext
) +
247 sizeof(struct dccp_hdr_reset
);
248 struct sk_buff
*skb
= sock_wmalloc(sk
, MAX_HEADER
+ DCCP_MAX_OPT_LEN
+
254 /* Reserve space for headers. */
255 skb_reserve(skb
, MAX_HEADER
+ DCCP_MAX_OPT_LEN
+ dccp_header_size
);
257 skb
->dst
= dst_clone(dst
);
260 dccp_inc_seqno(&dp
->dccps_gss
);
262 DCCP_SKB_CB(skb
)->dccpd_reset_code
= code
;
263 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_RESET
;
264 DCCP_SKB_CB(skb
)->dccpd_seq
= dp
->dccps_gss
;
265 dccp_insert_options(sk
, skb
);
267 skb
->h
.raw
= skb_push(skb
, dccp_header_size
);
270 memset(dh
, 0, dccp_header_size
);
272 dh
->dccph_sport
= inet_sk(sk
)->sport
;
273 dh
->dccph_dport
= inet_sk(sk
)->dport
;
274 dh
->dccph_doff
= (dccp_header_size
+ DCCP_SKB_CB(skb
)->dccpd_opt_len
) / 4;
275 dh
->dccph_type
= DCCP_PKT_RESET
;
277 dccp_hdr_set_seq(dh
, dp
->dccps_gss
);
278 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), dp
->dccps_gsr
);
280 dccp_hdr_reset(skb
)->dccph_reset_code
= code
;
282 dh
->dccph_checksum
= dccp_v4_checksum(skb
, inet_sk(sk
)->saddr
,
285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
290 * Do all connect socket setups that can be done AF independent.
292 static inline void dccp_connect_init(struct sock
*sk
)
294 struct dst_entry
*dst
= __sk_dst_get(sk
);
295 struct inet_connection_sock
*icsk
= inet_csk(sk
);
298 sock_reset_flag(sk
, SOCK_DONE
);
300 dccp_sync_mss(sk
, dst_mtu(dst
));
303 * FIXME: set dp->{dccps_swh,dccps_swl}, with
304 * something like dccp_inc_seq
307 icsk
->icsk_retransmits
= 0;
310 int dccp_connect(struct sock
*sk
)
313 struct inet_connection_sock
*icsk
= inet_csk(sk
);
315 dccp_connect_init(sk
);
317 skb
= alloc_skb(MAX_DCCP_HEADER
+ 15, sk
->sk_allocation
);
318 if (unlikely(skb
== NULL
))
321 /* Reserve space for headers. */
322 skb_reserve(skb
, MAX_DCCP_HEADER
);
324 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_REQUEST
;
325 /* FIXME: set service to something meaningful, coming
327 DCCP_SKB_CB(skb
)->dccpd_service
= 0;
329 skb_set_owner_w(skb
, sk
);
331 BUG_TRAP(sk
->sk_send_head
== NULL
);
332 sk
->sk_send_head
= skb
;
333 dccp_transmit_skb(sk
, skb_clone(skb
, GFP_KERNEL
));
334 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS
);
336 /* Timer for repeating the REQUEST until an answer. */
337 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
338 icsk
->icsk_rto
, DCCP_RTO_MAX
);
342 void dccp_send_ack(struct sock
*sk
)
344 /* If we have been reset, we may not send again. */
345 if (sk
->sk_state
!= DCCP_CLOSED
) {
346 struct sk_buff
*skb
= alloc_skb(MAX_DCCP_HEADER
, GFP_ATOMIC
);
349 inet_csk_schedule_ack(sk
);
350 inet_csk(sk
)->icsk_ack
.ato
= TCP_ATO_MIN
;
351 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
, TCP_DELACK_MAX
, TCP_RTO_MAX
);
355 /* Reserve space for headers */
356 skb_reserve(skb
, MAX_DCCP_HEADER
);
358 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_ACK
;
359 skb_set_owner_w(skb
, sk
);
360 dccp_transmit_skb(sk
, skb
);
364 EXPORT_SYMBOL_GPL(dccp_send_ack
);
366 void dccp_send_delayed_ack(struct sock
*sk
)
368 struct inet_connection_sock
*icsk
= inet_csk(sk
);
370 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
371 * with using 2s, and active senders also piggyback the ACK into a
372 * DATAACK packet, so this is really for quiescent senders.
374 unsigned long timeout
= jiffies
+ 2 * HZ
;
376 /* Use new timeout only if there wasn't a older one earlier. */
377 if (icsk
->icsk_ack
.pending
& ICSK_ACK_TIMER
) {
378 /* If delack timer was blocked or is about to expire,
381 * FIXME: check the "about to expire" part
383 if (icsk
->icsk_ack
.blocked
) {
388 if (!time_before(timeout
, icsk
->icsk_ack
.timeout
))
389 timeout
= icsk
->icsk_ack
.timeout
;
391 icsk
->icsk_ack
.pending
|= ICSK_ACK_SCHED
| ICSK_ACK_TIMER
;
392 icsk
->icsk_ack
.timeout
= timeout
;
393 sk_reset_timer(sk
, &icsk
->icsk_delack_timer
, timeout
);
396 void dccp_send_sync(struct sock
*sk
, u64 seq
)
399 * We are not putting this on the write queue, so
400 * dccp_transmit_skb() will set the ownership to this
403 struct sk_buff
*skb
= alloc_skb(MAX_DCCP_HEADER
, GFP_ATOMIC
);
406 /* FIXME: how to make sure the sync is sent? */
409 /* Reserve space for headers and prepare control bits. */
410 skb_reserve(skb
, MAX_DCCP_HEADER
);
412 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_SYNC
;
413 DCCP_SKB_CB(skb
)->dccpd_seq
= seq
;
415 skb_set_owner_w(skb
, sk
);
416 dccp_transmit_skb(sk
, skb
);
419 /* Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This cannot be
420 * allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under any circumstances.
422 void dccp_send_close(struct sock
*sk
)
424 struct dccp_sock
*dp
= dccp_sk(sk
);
427 /* Socket is locked, keep trying until memory is available. */
429 skb
= alloc_skb(sk
->sk_prot
->max_header
, GFP_KERNEL
);
435 /* Reserve space for headers and prepare control bits. */
436 skb_reserve(skb
, sk
->sk_prot
->max_header
);
438 DCCP_SKB_CB(skb
)->dccpd_type
= dp
->dccps_role
== DCCP_ROLE_CLIENT
? DCCP_PKT_CLOSE
: DCCP_PKT_CLOSEREQ
;
440 skb_set_owner_w(skb
, sk
);
441 dccp_transmit_skb(sk
, skb
);
443 ccid_hc_rx_exit(dp
->dccps_hc_rx_ccid
, sk
);
444 ccid_hc_tx_exit(dp
->dccps_hc_tx_ccid
, sk
);