2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
24 #include <net/inet_frag.h>
26 static void inet_frag_secret_rebuild(unsigned long dummy
)
28 struct inet_frags
*f
= (struct inet_frags
*)dummy
;
29 unsigned long now
= jiffies
;
33 get_random_bytes(&f
->rnd
, sizeof(u32
));
34 for (i
= 0; i
< INETFRAGS_HASHSZ
; i
++) {
35 struct inet_frag_queue
*q
;
36 struct hlist_node
*p
, *n
;
38 hlist_for_each_entry_safe(q
, p
, n
, &f
->hash
[i
], list
) {
39 unsigned int hval
= f
->hashfn(q
);
44 /* Relink to new hash chain. */
45 hlist_add_head(&q
->list
, &f
->hash
[hval
]);
49 write_unlock(&f
->lock
);
51 mod_timer(&f
->secret_timer
, now
+ f
->secret_interval
);
54 void inet_frags_init(struct inet_frags
*f
)
58 for (i
= 0; i
< INETFRAGS_HASHSZ
; i
++)
59 INIT_HLIST_HEAD(&f
->hash
[i
]);
61 rwlock_init(&f
->lock
);
63 f
->rnd
= (u32
) ((num_physpages
^ (num_physpages
>>7)) ^
64 (jiffies
^ (jiffies
>> 6)));
66 setup_timer(&f
->secret_timer
, inet_frag_secret_rebuild
,
68 f
->secret_timer
.expires
= jiffies
+ f
->secret_interval
;
69 add_timer(&f
->secret_timer
);
71 EXPORT_SYMBOL(inet_frags_init
);
73 void inet_frags_init_net(struct netns_frags
*nf
)
76 atomic_set(&nf
->mem
, 0);
77 INIT_LIST_HEAD(&nf
->lru_list
);
79 EXPORT_SYMBOL(inet_frags_init_net
);
81 void inet_frags_fini(struct inet_frags
*f
)
83 del_timer(&f
->secret_timer
);
85 EXPORT_SYMBOL(inet_frags_fini
);
87 void inet_frags_exit_net(struct netns_frags
*nf
, struct inet_frags
*f
)
92 inet_frag_evictor(nf
, f
, true);
95 EXPORT_SYMBOL(inet_frags_exit_net
);
97 static inline void fq_unlink(struct inet_frag_queue
*fq
, struct inet_frags
*f
)
100 hlist_del(&fq
->list
);
101 list_del(&fq
->lru_list
);
103 write_unlock(&f
->lock
);
106 void inet_frag_kill(struct inet_frag_queue
*fq
, struct inet_frags
*f
)
108 if (del_timer(&fq
->timer
))
109 atomic_dec(&fq
->refcnt
);
111 if (!(fq
->last_in
& INET_FRAG_COMPLETE
)) {
113 atomic_dec(&fq
->refcnt
);
114 fq
->last_in
|= INET_FRAG_COMPLETE
;
117 EXPORT_SYMBOL(inet_frag_kill
);
119 static inline void frag_kfree_skb(struct netns_frags
*nf
, struct inet_frags
*f
,
120 struct sk_buff
*skb
, int *work
)
123 *work
-= skb
->truesize
;
125 atomic_sub(skb
->truesize
, &nf
->mem
);
131 void inet_frag_destroy(struct inet_frag_queue
*q
, struct inet_frags
*f
,
135 struct netns_frags
*nf
;
137 WARN_ON(!(q
->last_in
& INET_FRAG_COMPLETE
));
138 WARN_ON(del_timer(&q
->timer
) != 0);
140 /* Release all fragment data. */
144 struct sk_buff
*xp
= fp
->next
;
146 frag_kfree_skb(nf
, f
, fp
, work
);
152 atomic_sub(f
->qsize
, &nf
->mem
);
159 EXPORT_SYMBOL(inet_frag_destroy
);
161 int inet_frag_evictor(struct netns_frags
*nf
, struct inet_frags
*f
, bool force
)
163 struct inet_frag_queue
*q
;
164 int work
, evicted
= 0;
167 if (atomic_read(&nf
->mem
) <= nf
->high_thresh
)
171 work
= atomic_read(&nf
->mem
) - nf
->low_thresh
;
174 if (list_empty(&nf
->lru_list
)) {
175 read_unlock(&f
->lock
);
179 q
= list_first_entry(&nf
->lru_list
,
180 struct inet_frag_queue
, lru_list
);
181 atomic_inc(&q
->refcnt
);
182 read_unlock(&f
->lock
);
185 if (!(q
->last_in
& INET_FRAG_COMPLETE
))
186 inet_frag_kill(q
, f
);
187 spin_unlock(&q
->lock
);
189 if (atomic_dec_and_test(&q
->refcnt
))
190 inet_frag_destroy(q
, f
, &work
);
196 EXPORT_SYMBOL(inet_frag_evictor
);
198 static struct inet_frag_queue
*inet_frag_intern(struct netns_frags
*nf
,
199 struct inet_frag_queue
*qp_in
, struct inet_frags
*f
,
202 struct inet_frag_queue
*qp
;
204 struct hlist_node
*n
;
208 write_lock(&f
->lock
);
210 * While we stayed w/o the lock other CPU could update
211 * the rnd seed, so we need to re-calculate the hash
212 * chain. Fortunatelly the qp_in can be used to get one.
214 hash
= f
->hashfn(qp_in
);
216 /* With SMP race we have to recheck hash table, because
217 * such entry could be created on other cpu, while we
218 * promoted read lock to write lock.
220 hlist_for_each_entry(qp
, n
, &f
->hash
[hash
], list
) {
221 if (qp
->net
== nf
&& f
->match(qp
, arg
)) {
222 atomic_inc(&qp
->refcnt
);
223 write_unlock(&f
->lock
);
224 qp_in
->last_in
|= INET_FRAG_COMPLETE
;
225 inet_frag_put(qp_in
, f
);
231 if (!mod_timer(&qp
->timer
, jiffies
+ nf
->timeout
))
232 atomic_inc(&qp
->refcnt
);
234 atomic_inc(&qp
->refcnt
);
235 hlist_add_head(&qp
->list
, &f
->hash
[hash
]);
236 list_add_tail(&qp
->lru_list
, &nf
->lru_list
);
238 write_unlock(&f
->lock
);
242 static struct inet_frag_queue
*inet_frag_alloc(struct netns_frags
*nf
,
243 struct inet_frags
*f
, void *arg
)
245 struct inet_frag_queue
*q
;
247 q
= kzalloc(f
->qsize
, GFP_ATOMIC
);
252 f
->constructor(q
, arg
);
253 atomic_add(f
->qsize
, &nf
->mem
);
254 setup_timer(&q
->timer
, f
->frag_expire
, (unsigned long)q
);
255 spin_lock_init(&q
->lock
);
256 atomic_set(&q
->refcnt
, 1);
261 static struct inet_frag_queue
*inet_frag_create(struct netns_frags
*nf
,
262 struct inet_frags
*f
, void *arg
)
264 struct inet_frag_queue
*q
;
266 q
= inet_frag_alloc(nf
, f
, arg
);
270 return inet_frag_intern(nf
, q
, f
, arg
);
273 struct inet_frag_queue
*inet_frag_find(struct netns_frags
*nf
,
274 struct inet_frags
*f
, void *key
, unsigned int hash
)
277 struct inet_frag_queue
*q
;
278 struct hlist_node
*n
;
280 hlist_for_each_entry(q
, n
, &f
->hash
[hash
], list
) {
281 if (q
->net
== nf
&& f
->match(q
, key
)) {
282 atomic_inc(&q
->refcnt
);
283 read_unlock(&f
->lock
);
287 read_unlock(&f
->lock
);
289 return inet_frag_create(nf
, f
, key
);
291 EXPORT_SYMBOL(inet_frag_find
);