2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic TIME_WAIT sockets functions
8 * From code orinally in TCP
11 #include <linux/kernel.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/slab.h>
14 #include <net/inet_hashtables.h>
15 #include <net/inet_timewait_sock.h>
20 * inet_twsk_unhash - unhash a timewait socket from established hash
21 * @tw: timewait socket
23 * unhash a timewait socket from established hash, if hashed.
24 * ehash lock must be held by caller.
25 * Returns 1 if caller should call inet_twsk_put() after lock release.
27 int inet_twsk_unhash(struct inet_timewait_sock
*tw
)
29 if (hlist_nulls_unhashed(&tw
->tw_node
))
32 hlist_nulls_del_rcu(&tw
->tw_node
);
33 sk_nulls_node_init(&tw
->tw_node
);
35 * We cannot call inet_twsk_put() ourself under lock,
36 * caller must call it for us.
42 * inet_twsk_bind_unhash - unhash a timewait socket from bind hash
43 * @tw: timewait socket
44 * @hashinfo: hashinfo pointer
46 * unhash a timewait socket from bind hash, if hashed.
47 * bind hash lock must be held by caller.
48 * Returns 1 if caller should call inet_twsk_put() after lock release.
50 int inet_twsk_bind_unhash(struct inet_timewait_sock
*tw
,
51 struct inet_hashinfo
*hashinfo
)
53 struct inet_bind_bucket
*tb
= tw
->tw_tb
;
58 __hlist_del(&tw
->tw_bind_node
);
60 inet_bind_bucket_destroy(hashinfo
->bind_bucket_cachep
, tb
);
62 * We cannot call inet_twsk_put() ourself under lock,
63 * caller must call it for us.
68 /* Must be called with locally disabled BHs. */
69 static void __inet_twsk_kill(struct inet_timewait_sock
*tw
,
70 struct inet_hashinfo
*hashinfo
)
72 struct inet_bind_hashbucket
*bhead
;
74 /* Unlink from established hashes. */
75 spinlock_t
*lock
= inet_ehash_lockp(hashinfo
, tw
->tw_hash
);
78 refcnt
= inet_twsk_unhash(tw
);
81 /* Disassociate with bind bucket. */
82 bhead
= &hashinfo
->bhash
[inet_bhashfn(twsk_net(tw
), tw
->tw_num
,
83 hashinfo
->bhash_size
)];
85 spin_lock(&bhead
->lock
);
86 refcnt
+= inet_twsk_bind_unhash(tw
, hashinfo
);
87 spin_unlock(&bhead
->lock
);
89 #ifdef SOCK_REFCNT_DEBUG
90 if (atomic_read(&tw
->tw_refcnt
) != 1) {
91 printk(KERN_DEBUG
"%s timewait_sock %p refcnt=%d\n",
92 tw
->tw_prot
->name
, tw
, atomic_read(&tw
->tw_refcnt
));
101 static noinline
void inet_twsk_free(struct inet_timewait_sock
*tw
)
103 struct module
*owner
= tw
->tw_prot
->owner
;
104 twsk_destructor((struct sock
*)tw
);
105 #ifdef SOCK_REFCNT_DEBUG
106 pr_debug("%s timewait_sock %p released\n", tw
->tw_prot
->name
, tw
);
108 release_net(twsk_net(tw
));
109 kmem_cache_free(tw
->tw_prot
->twsk_prot
->twsk_slab
, tw
);
113 void inet_twsk_put(struct inet_timewait_sock
*tw
)
115 if (atomic_dec_and_test(&tw
->tw_refcnt
))
118 EXPORT_SYMBOL_GPL(inet_twsk_put
);
121 * Enter the time wait state. This is called with locally disabled BH.
122 * Essentially we whip up a timewait bucket, copy the relevant info into it
123 * from the SK, and mess with hash chains and list linkage.
125 void __inet_twsk_hashdance(struct inet_timewait_sock
*tw
, struct sock
*sk
,
126 struct inet_hashinfo
*hashinfo
)
128 const struct inet_sock
*inet
= inet_sk(sk
);
129 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
130 struct inet_ehash_bucket
*ehead
= inet_ehash_bucket(hashinfo
, sk
->sk_hash
);
131 spinlock_t
*lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
132 struct inet_bind_hashbucket
*bhead
;
133 /* Step 1: Put TW into bind hash. Original socket stays there too.
134 Note, that any socket with inet->num != 0 MUST be bound in
135 binding cache, even if it is closed.
137 bhead
= &hashinfo
->bhash
[inet_bhashfn(twsk_net(tw
), inet
->inet_num
,
138 hashinfo
->bhash_size
)];
139 spin_lock(&bhead
->lock
);
140 tw
->tw_tb
= icsk
->icsk_bind_hash
;
141 WARN_ON(!icsk
->icsk_bind_hash
);
142 inet_twsk_add_bind_node(tw
, &tw
->tw_tb
->owners
);
143 spin_unlock(&bhead
->lock
);
148 * Step 2: Hash TW into TIMEWAIT chain.
149 * Should be done before removing sk from established chain
150 * because readers are lockless and search established first.
152 inet_twsk_add_node_rcu(tw
, &ehead
->twchain
);
154 /* Step 3: Remove SK from established hash. */
155 if (__sk_nulls_del_node_init_rcu(sk
))
156 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
160 * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
161 * - We add one reference for the bhash link
162 * - We add one reference for the ehash link
163 * - We want this refcnt update done before allowing other
164 * threads to find this tw in ehash chain.
166 atomic_add(1 + 1 + 1, &tw
->tw_refcnt
);
170 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance
);
172 struct inet_timewait_sock
*inet_twsk_alloc(const struct sock
*sk
, const int state
)
174 struct inet_timewait_sock
*tw
=
175 kmem_cache_alloc(sk
->sk_prot_creator
->twsk_prot
->twsk_slab
,
178 const struct inet_sock
*inet
= inet_sk(sk
);
180 kmemcheck_annotate_bitfield(tw
, flags
);
182 /* Give us an identity. */
183 tw
->tw_daddr
= inet
->inet_daddr
;
184 tw
->tw_rcv_saddr
= inet
->inet_rcv_saddr
;
185 tw
->tw_bound_dev_if
= sk
->sk_bound_dev_if
;
186 tw
->tw_num
= inet
->inet_num
;
187 tw
->tw_state
= TCP_TIME_WAIT
;
188 tw
->tw_substate
= state
;
189 tw
->tw_sport
= inet
->inet_sport
;
190 tw
->tw_dport
= inet
->inet_dport
;
191 tw
->tw_family
= sk
->sk_family
;
192 tw
->tw_reuse
= sk
->sk_reuse
;
193 tw
->tw_hash
= sk
->sk_hash
;
195 tw
->tw_transparent
= inet
->transparent
;
196 tw
->tw_prot
= sk
->sk_prot_creator
;
197 twsk_net_set(tw
, hold_net(sock_net(sk
)));
199 * Because we use RCU lookups, we should not set tw_refcnt
200 * to a non null value before everything is setup for this
203 atomic_set(&tw
->tw_refcnt
, 0);
204 inet_twsk_dead_node_init(tw
);
205 __module_get(tw
->tw_prot
->owner
);
210 EXPORT_SYMBOL_GPL(inet_twsk_alloc
);
212 /* Returns non-zero if quota exceeded. */
213 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row
*twdr
,
216 struct inet_timewait_sock
*tw
;
217 struct hlist_node
*node
;
221 /* NOTE: compare this to previous version where lock
222 * was released after detaching chain. It was racy,
223 * because tw buckets are scheduled in not serialized context
224 * in 2.3 (with netfilter), and with softnet it is common, because
225 * soft irqs are not sequenced.
230 inet_twsk_for_each_inmate(tw
, node
, &twdr
->cells
[slot
]) {
231 __inet_twsk_del_dead_node(tw
);
232 spin_unlock(&twdr
->death_lock
);
233 __inet_twsk_kill(tw
, twdr
->hashinfo
);
235 NET_INC_STATS_BH(twsk_net(tw
), LINUX_MIB_TIMEWAITED
);
239 spin_lock(&twdr
->death_lock
);
240 if (killed
> INET_TWDR_TWKILL_QUOTA
) {
245 /* While we dropped twdr->death_lock, another cpu may have
246 * killed off the next TW bucket in the list, therefore
247 * do a fresh re-read of the hlist head node with the
248 * lock reacquired. We still use the hlist traversal
249 * macro in order to get the prefetches.
254 twdr
->tw_count
-= killed
;
255 #ifndef CONFIG_NET_NS
256 NET_ADD_STATS_BH(&init_net
, LINUX_MIB_TIMEWAITED
, killed
);
261 void inet_twdr_hangman(unsigned long data
)
263 struct inet_timewait_death_row
*twdr
;
264 int unsigned need_timer
;
266 twdr
= (struct inet_timewait_death_row
*)data
;
267 spin_lock(&twdr
->death_lock
);
269 if (twdr
->tw_count
== 0)
273 if (inet_twdr_do_twkill_work(twdr
, twdr
->slot
)) {
274 twdr
->thread_slots
|= (1 << twdr
->slot
);
275 schedule_work(&twdr
->twkill_work
);
278 /* We purged the entire slot, anything left? */
281 twdr
->slot
= ((twdr
->slot
+ 1) & (INET_TWDR_TWKILL_SLOTS
- 1));
284 mod_timer(&twdr
->tw_timer
, jiffies
+ twdr
->period
);
286 spin_unlock(&twdr
->death_lock
);
288 EXPORT_SYMBOL_GPL(inet_twdr_hangman
);
290 void inet_twdr_twkill_work(struct work_struct
*work
)
292 struct inet_timewait_death_row
*twdr
=
293 container_of(work
, struct inet_timewait_death_row
, twkill_work
);
296 BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS
- 1) >
297 (sizeof(twdr
->thread_slots
) * 8));
299 while (twdr
->thread_slots
) {
300 spin_lock_bh(&twdr
->death_lock
);
301 for (i
= 0; i
< INET_TWDR_TWKILL_SLOTS
; i
++) {
302 if (!(twdr
->thread_slots
& (1 << i
)))
305 while (inet_twdr_do_twkill_work(twdr
, i
) != 0) {
306 if (need_resched()) {
307 spin_unlock_bh(&twdr
->death_lock
);
309 spin_lock_bh(&twdr
->death_lock
);
313 twdr
->thread_slots
&= ~(1 << i
);
315 spin_unlock_bh(&twdr
->death_lock
);
318 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work
);
320 /* These are always called from BH context. See callers in
321 * tcp_input.c to verify this.
324 /* This is for handling early-kills of TIME_WAIT sockets. */
325 void inet_twsk_deschedule(struct inet_timewait_sock
*tw
,
326 struct inet_timewait_death_row
*twdr
)
328 spin_lock(&twdr
->death_lock
);
329 if (inet_twsk_del_dead_node(tw
)) {
331 if (--twdr
->tw_count
== 0)
332 del_timer(&twdr
->tw_timer
);
334 spin_unlock(&twdr
->death_lock
);
335 __inet_twsk_kill(tw
, twdr
->hashinfo
);
337 EXPORT_SYMBOL(inet_twsk_deschedule
);
339 void inet_twsk_schedule(struct inet_timewait_sock
*tw
,
340 struct inet_timewait_death_row
*twdr
,
341 const int timeo
, const int timewait_len
)
343 struct hlist_head
*list
;
346 /* timeout := RTO * 3.5
348 * 3.5 = 1+2+0.5 to wait for two retransmits.
350 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
351 * our ACK acking that FIN can be lost. If N subsequent retransmitted
352 * FINs (or previous seqments) are lost (probability of such event
353 * is p^(N+1), where p is probability to lose single packet and
354 * time to detect the loss is about RTO*(2^N - 1) with exponential
355 * backoff). Normal timewait length is calculated so, that we
356 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
357 * [ BTW Linux. following BSD, violates this requirement waiting
358 * only for 60sec, we should wait at least for 240 secs.
359 * Well, 240 consumes too much of resources 8)
361 * This interval is not reduced to catch old duplicate and
362 * responces to our wandering segments living for two MSLs.
363 * However, if we use PAWS to detect
364 * old duplicates, we can reduce the interval to bounds required
365 * by RTO, rather than MSL. So, if peer understands PAWS, we
366 * kill tw bucket after 3.5*RTO (it is important that this number
367 * is greater than TS tick!) and detect old duplicates with help
370 slot
= (timeo
+ (1 << INET_TWDR_RECYCLE_TICK
) - 1) >> INET_TWDR_RECYCLE_TICK
;
372 spin_lock(&twdr
->death_lock
);
374 /* Unlink it, if it was scheduled */
375 if (inet_twsk_del_dead_node(tw
))
378 atomic_inc(&tw
->tw_refcnt
);
380 if (slot
>= INET_TWDR_RECYCLE_SLOTS
) {
381 /* Schedule to slow timer */
382 if (timeo
>= timewait_len
) {
383 slot
= INET_TWDR_TWKILL_SLOTS
- 1;
385 slot
= DIV_ROUND_UP(timeo
, twdr
->period
);
386 if (slot
>= INET_TWDR_TWKILL_SLOTS
)
387 slot
= INET_TWDR_TWKILL_SLOTS
- 1;
389 tw
->tw_ttd
= jiffies
+ timeo
;
390 slot
= (twdr
->slot
+ slot
) & (INET_TWDR_TWKILL_SLOTS
- 1);
391 list
= &twdr
->cells
[slot
];
393 tw
->tw_ttd
= jiffies
+ (slot
<< INET_TWDR_RECYCLE_TICK
);
395 if (twdr
->twcal_hand
< 0) {
396 twdr
->twcal_hand
= 0;
397 twdr
->twcal_jiffie
= jiffies
;
398 twdr
->twcal_timer
.expires
= twdr
->twcal_jiffie
+
399 (slot
<< INET_TWDR_RECYCLE_TICK
);
400 add_timer(&twdr
->twcal_timer
);
402 if (time_after(twdr
->twcal_timer
.expires
,
403 jiffies
+ (slot
<< INET_TWDR_RECYCLE_TICK
)))
404 mod_timer(&twdr
->twcal_timer
,
405 jiffies
+ (slot
<< INET_TWDR_RECYCLE_TICK
));
406 slot
= (twdr
->twcal_hand
+ slot
) & (INET_TWDR_RECYCLE_SLOTS
- 1);
408 list
= &twdr
->twcal_row
[slot
];
411 hlist_add_head(&tw
->tw_death_node
, list
);
413 if (twdr
->tw_count
++ == 0)
414 mod_timer(&twdr
->tw_timer
, jiffies
+ twdr
->period
);
415 spin_unlock(&twdr
->death_lock
);
417 EXPORT_SYMBOL_GPL(inet_twsk_schedule
);
419 void inet_twdr_twcal_tick(unsigned long data
)
421 struct inet_timewait_death_row
*twdr
;
424 unsigned long now
= jiffies
;
428 twdr
= (struct inet_timewait_death_row
*)data
;
430 spin_lock(&twdr
->death_lock
);
431 if (twdr
->twcal_hand
< 0)
434 slot
= twdr
->twcal_hand
;
435 j
= twdr
->twcal_jiffie
;
437 for (n
= 0; n
< INET_TWDR_RECYCLE_SLOTS
; n
++) {
438 if (time_before_eq(j
, now
)) {
439 struct hlist_node
*node
, *safe
;
440 struct inet_timewait_sock
*tw
;
442 inet_twsk_for_each_inmate_safe(tw
, node
, safe
,
443 &twdr
->twcal_row
[slot
]) {
444 __inet_twsk_del_dead_node(tw
);
445 __inet_twsk_kill(tw
, twdr
->hashinfo
);
447 NET_INC_STATS_BH(twsk_net(tw
), LINUX_MIB_TIMEWAITKILLED
);
455 twdr
->twcal_jiffie
= j
;
456 twdr
->twcal_hand
= slot
;
459 if (!hlist_empty(&twdr
->twcal_row
[slot
])) {
460 mod_timer(&twdr
->twcal_timer
, j
);
464 j
+= 1 << INET_TWDR_RECYCLE_TICK
;
465 slot
= (slot
+ 1) & (INET_TWDR_RECYCLE_SLOTS
- 1);
467 twdr
->twcal_hand
= -1;
470 if ((twdr
->tw_count
-= killed
) == 0)
471 del_timer(&twdr
->tw_timer
);
472 #ifndef CONFIG_NET_NS
473 NET_ADD_STATS_BH(&init_net
, LINUX_MIB_TIMEWAITKILLED
, killed
);
475 spin_unlock(&twdr
->death_lock
);
477 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick
);
479 void inet_twsk_purge(struct inet_hashinfo
*hashinfo
,
480 struct inet_timewait_death_row
*twdr
, int family
)
482 struct inet_timewait_sock
*tw
;
484 struct hlist_nulls_node
*node
;
487 for (slot
= 0; slot
<= hashinfo
->ehash_mask
; slot
++) {
488 struct inet_ehash_bucket
*head
= &hashinfo
->ehash
[slot
];
492 sk_nulls_for_each_rcu(sk
, node
, &head
->twchain
) {
494 if ((tw
->tw_family
!= family
) ||
495 atomic_read(&twsk_net(tw
)->count
))
498 if (unlikely(!atomic_inc_not_zero(&tw
->tw_refcnt
)))
501 if (unlikely((tw
->tw_family
!= family
) ||
502 atomic_read(&twsk_net(tw
)->count
))) {
508 inet_twsk_deschedule(tw
, twdr
);
512 /* If the nulls value we got at the end of this lookup is
513 * not the expected one, we must restart lookup.
514 * We probably met an item that was moved to another chain.
516 if (get_nulls_value(node
) != slot
)
521 EXPORT_SYMBOL_GPL(inet_twsk_purge
);