[PATCH] orinoco_nortel: Add Symbol LA-4123 ID
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / xfrm / xfrm_state.c
blob479effc97666e92092d7e735c6cd85b113462304
1 /*
2 * xfrm_state.c
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 /* Each xfrm_state may be linked to two tables:
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
30 static DEFINE_SPINLOCK(xfrm_state_lock);
32 /* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
38 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 EXPORT_SYMBOL(km_waitq);
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
47 static struct work_struct xfrm_state_gc_work;
48 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
51 static int xfrm_state_gc_flush_bundles;
53 static int __xfrm_state_delete(struct xfrm_state *x);
55 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
58 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59 static void km_state_expired(struct xfrm_state *x, int hard);
61 static void xfrm_state_gc_destroy(struct xfrm_state *x)
63 if (del_timer(&x->timer))
64 BUG();
65 kfree(x->aalg);
66 kfree(x->ealg);
67 kfree(x->calg);
68 kfree(x->encap);
69 if (x->type) {
70 x->type->destructor(x);
71 xfrm_put_type(x->type);
73 kfree(x);
76 static void xfrm_state_gc_task(void *data)
78 struct xfrm_state *x;
79 struct list_head *entry, *tmp;
80 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
82 if (xfrm_state_gc_flush_bundles) {
83 xfrm_state_gc_flush_bundles = 0;
84 xfrm_flush_bundles();
87 spin_lock_bh(&xfrm_state_gc_lock);
88 list_splice_init(&xfrm_state_gc_list, &gc_list);
89 spin_unlock_bh(&xfrm_state_gc_lock);
91 list_for_each_safe(entry, tmp, &gc_list) {
92 x = list_entry(entry, struct xfrm_state, bydst);
93 xfrm_state_gc_destroy(x);
95 wake_up(&km_waitq);
98 static inline unsigned long make_jiffies(long secs)
100 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
101 return MAX_SCHEDULE_TIMEOUT-1;
102 else
103 return secs*HZ;
106 static void xfrm_timer_handler(unsigned long data)
108 struct xfrm_state *x = (struct xfrm_state*)data;
109 unsigned long now = (unsigned long)xtime.tv_sec;
110 long next = LONG_MAX;
111 int warn = 0;
113 spin_lock(&x->lock);
114 if (x->km.state == XFRM_STATE_DEAD)
115 goto out;
116 if (x->km.state == XFRM_STATE_EXPIRED)
117 goto expired;
118 if (x->lft.hard_add_expires_seconds) {
119 long tmo = x->lft.hard_add_expires_seconds +
120 x->curlft.add_time - now;
121 if (tmo <= 0)
122 goto expired;
123 if (tmo < next)
124 next = tmo;
126 if (x->lft.hard_use_expires_seconds) {
127 long tmo = x->lft.hard_use_expires_seconds +
128 (x->curlft.use_time ? : now) - now;
129 if (tmo <= 0)
130 goto expired;
131 if (tmo < next)
132 next = tmo;
134 if (x->km.dying)
135 goto resched;
136 if (x->lft.soft_add_expires_seconds) {
137 long tmo = x->lft.soft_add_expires_seconds +
138 x->curlft.add_time - now;
139 if (tmo <= 0)
140 warn = 1;
141 else if (tmo < next)
142 next = tmo;
144 if (x->lft.soft_use_expires_seconds) {
145 long tmo = x->lft.soft_use_expires_seconds +
146 (x->curlft.use_time ? : now) - now;
147 if (tmo <= 0)
148 warn = 1;
149 else if (tmo < next)
150 next = tmo;
153 x->km.dying = warn;
154 if (warn)
155 km_state_expired(x, 0);
156 resched:
157 if (next != LONG_MAX &&
158 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
159 xfrm_state_hold(x);
160 goto out;
162 expired:
163 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
164 x->km.state = XFRM_STATE_EXPIRED;
165 wake_up(&km_waitq);
166 next = 2;
167 goto resched;
169 if (!__xfrm_state_delete(x) && x->id.spi)
170 km_state_expired(x, 1);
172 out:
173 spin_unlock(&x->lock);
174 xfrm_state_put(x);
177 struct xfrm_state *xfrm_state_alloc(void)
179 struct xfrm_state *x;
181 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
183 if (x) {
184 memset(x, 0, sizeof(struct xfrm_state));
185 atomic_set(&x->refcnt, 1);
186 atomic_set(&x->tunnel_users, 0);
187 INIT_LIST_HEAD(&x->bydst);
188 INIT_LIST_HEAD(&x->byspi);
189 init_timer(&x->timer);
190 x->timer.function = xfrm_timer_handler;
191 x->timer.data = (unsigned long)x;
192 x->curlft.add_time = (unsigned long)xtime.tv_sec;
193 x->lft.soft_byte_limit = XFRM_INF;
194 x->lft.soft_packet_limit = XFRM_INF;
195 x->lft.hard_byte_limit = XFRM_INF;
196 x->lft.hard_packet_limit = XFRM_INF;
197 spin_lock_init(&x->lock);
199 return x;
201 EXPORT_SYMBOL(xfrm_state_alloc);
203 void __xfrm_state_destroy(struct xfrm_state *x)
205 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
207 spin_lock_bh(&xfrm_state_gc_lock);
208 list_add(&x->bydst, &xfrm_state_gc_list);
209 spin_unlock_bh(&xfrm_state_gc_lock);
210 schedule_work(&xfrm_state_gc_work);
212 EXPORT_SYMBOL(__xfrm_state_destroy);
214 static int __xfrm_state_delete(struct xfrm_state *x)
216 int err = -ESRCH;
218 if (x->km.state != XFRM_STATE_DEAD) {
219 x->km.state = XFRM_STATE_DEAD;
220 spin_lock(&xfrm_state_lock);
221 list_del(&x->bydst);
222 atomic_dec(&x->refcnt);
223 if (x->id.spi) {
224 list_del(&x->byspi);
225 atomic_dec(&x->refcnt);
227 spin_unlock(&xfrm_state_lock);
228 if (del_timer(&x->timer))
229 atomic_dec(&x->refcnt);
231 /* The number two in this test is the reference
232 * mentioned in the comment below plus the reference
233 * our caller holds. A larger value means that
234 * there are DSTs attached to this xfrm_state.
236 if (atomic_read(&x->refcnt) > 2) {
237 xfrm_state_gc_flush_bundles = 1;
238 schedule_work(&xfrm_state_gc_work);
241 /* All xfrm_state objects are created by xfrm_state_alloc.
242 * The xfrm_state_alloc call gives a reference, and that
243 * is what we are dropping here.
245 atomic_dec(&x->refcnt);
246 err = 0;
249 return err;
252 int xfrm_state_delete(struct xfrm_state *x)
254 int err;
256 spin_lock_bh(&x->lock);
257 err = __xfrm_state_delete(x);
258 spin_unlock_bh(&x->lock);
260 return err;
262 EXPORT_SYMBOL(xfrm_state_delete);
264 void xfrm_state_flush(u8 proto)
266 int i;
267 struct xfrm_state *x;
269 spin_lock_bh(&xfrm_state_lock);
270 for (i = 0; i < XFRM_DST_HSIZE; i++) {
271 restart:
272 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
273 if (!xfrm_state_kern(x) &&
274 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
275 xfrm_state_hold(x);
276 spin_unlock_bh(&xfrm_state_lock);
278 xfrm_state_delete(x);
279 xfrm_state_put(x);
281 spin_lock_bh(&xfrm_state_lock);
282 goto restart;
286 spin_unlock_bh(&xfrm_state_lock);
287 wake_up(&km_waitq);
289 EXPORT_SYMBOL(xfrm_state_flush);
291 static int
292 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
293 struct xfrm_tmpl *tmpl,
294 xfrm_address_t *daddr, xfrm_address_t *saddr,
295 unsigned short family)
297 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
298 if (!afinfo)
299 return -1;
300 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
301 xfrm_state_put_afinfo(afinfo);
302 return 0;
305 struct xfrm_state *
306 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
307 struct flowi *fl, struct xfrm_tmpl *tmpl,
308 struct xfrm_policy *pol, int *err,
309 unsigned short family)
311 unsigned h = xfrm_dst_hash(daddr, family);
312 struct xfrm_state *x, *x0;
313 int acquire_in_progress = 0;
314 int error = 0;
315 struct xfrm_state *best = NULL;
316 struct xfrm_state_afinfo *afinfo;
318 afinfo = xfrm_state_get_afinfo(family);
319 if (afinfo == NULL) {
320 *err = -EAFNOSUPPORT;
321 return NULL;
324 spin_lock_bh(&xfrm_state_lock);
325 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
326 if (x->props.family == family &&
327 x->props.reqid == tmpl->reqid &&
328 xfrm_state_addr_check(x, daddr, saddr, family) &&
329 tmpl->mode == x->props.mode &&
330 tmpl->id.proto == x->id.proto &&
331 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
332 /* Resolution logic:
333 1. There is a valid state with matching selector.
334 Done.
335 2. Valid state with inappropriate selector. Skip.
337 Entering area of "sysdeps".
339 3. If state is not valid, selector is temporary,
340 it selects only session which triggered
341 previous resolution. Key manager will do
342 something to install a state with proper
343 selector.
345 if (x->km.state == XFRM_STATE_VALID) {
346 if (!xfrm_selector_match(&x->sel, fl, family))
347 continue;
348 if (!best ||
349 best->km.dying > x->km.dying ||
350 (best->km.dying == x->km.dying &&
351 best->curlft.add_time < x->curlft.add_time))
352 best = x;
353 } else if (x->km.state == XFRM_STATE_ACQ) {
354 acquire_in_progress = 1;
355 } else if (x->km.state == XFRM_STATE_ERROR ||
356 x->km.state == XFRM_STATE_EXPIRED) {
357 if (xfrm_selector_match(&x->sel, fl, family))
358 error = -ESRCH;
363 x = best;
364 if (!x && !error && !acquire_in_progress) {
365 if (tmpl->id.spi &&
366 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
367 tmpl->id.proto)) != NULL) {
368 xfrm_state_put(x0);
369 error = -EEXIST;
370 goto out;
372 x = xfrm_state_alloc();
373 if (x == NULL) {
374 error = -ENOMEM;
375 goto out;
377 /* Initialize temporary selector matching only
378 * to current session. */
379 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
381 if (km_query(x, tmpl, pol) == 0) {
382 x->km.state = XFRM_STATE_ACQ;
383 list_add_tail(&x->bydst, xfrm_state_bydst+h);
384 xfrm_state_hold(x);
385 if (x->id.spi) {
386 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
387 list_add(&x->byspi, xfrm_state_byspi+h);
388 xfrm_state_hold(x);
390 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
391 xfrm_state_hold(x);
392 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
393 add_timer(&x->timer);
394 } else {
395 x->km.state = XFRM_STATE_DEAD;
396 xfrm_state_put(x);
397 x = NULL;
398 error = -ESRCH;
401 out:
402 if (x)
403 xfrm_state_hold(x);
404 else
405 *err = acquire_in_progress ? -EAGAIN : error;
406 spin_unlock_bh(&xfrm_state_lock);
407 xfrm_state_put_afinfo(afinfo);
408 return x;
411 static void __xfrm_state_insert(struct xfrm_state *x)
413 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
415 list_add(&x->bydst, xfrm_state_bydst+h);
416 xfrm_state_hold(x);
418 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
420 list_add(&x->byspi, xfrm_state_byspi+h);
421 xfrm_state_hold(x);
423 if (!mod_timer(&x->timer, jiffies + HZ))
424 xfrm_state_hold(x);
426 wake_up(&km_waitq);
429 void xfrm_state_insert(struct xfrm_state *x)
431 spin_lock_bh(&xfrm_state_lock);
432 __xfrm_state_insert(x);
433 spin_unlock_bh(&xfrm_state_lock);
435 xfrm_flush_all_bundles();
437 EXPORT_SYMBOL(xfrm_state_insert);
439 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
441 int xfrm_state_add(struct xfrm_state *x)
443 struct xfrm_state_afinfo *afinfo;
444 struct xfrm_state *x1;
445 int family;
446 int err;
448 family = x->props.family;
449 afinfo = xfrm_state_get_afinfo(family);
450 if (unlikely(afinfo == NULL))
451 return -EAFNOSUPPORT;
453 spin_lock_bh(&xfrm_state_lock);
455 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
456 if (x1) {
457 xfrm_state_put(x1);
458 x1 = NULL;
459 err = -EEXIST;
460 goto out;
463 if (x->km.seq) {
464 x1 = __xfrm_find_acq_byseq(x->km.seq);
465 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
466 xfrm_state_put(x1);
467 x1 = NULL;
471 if (!x1)
472 x1 = afinfo->find_acq(
473 x->props.mode, x->props.reqid, x->id.proto,
474 &x->id.daddr, &x->props.saddr, 0);
476 __xfrm_state_insert(x);
477 err = 0;
479 out:
480 spin_unlock_bh(&xfrm_state_lock);
481 xfrm_state_put_afinfo(afinfo);
483 if (!err)
484 xfrm_flush_all_bundles();
486 if (x1) {
487 xfrm_state_delete(x1);
488 xfrm_state_put(x1);
491 return err;
493 EXPORT_SYMBOL(xfrm_state_add);
495 int xfrm_state_update(struct xfrm_state *x)
497 struct xfrm_state_afinfo *afinfo;
498 struct xfrm_state *x1;
499 int err;
501 afinfo = xfrm_state_get_afinfo(x->props.family);
502 if (unlikely(afinfo == NULL))
503 return -EAFNOSUPPORT;
505 spin_lock_bh(&xfrm_state_lock);
506 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
508 err = -ESRCH;
509 if (!x1)
510 goto out;
512 if (xfrm_state_kern(x1)) {
513 xfrm_state_put(x1);
514 err = -EEXIST;
515 goto out;
518 if (x1->km.state == XFRM_STATE_ACQ) {
519 __xfrm_state_insert(x);
520 x = NULL;
522 err = 0;
524 out:
525 spin_unlock_bh(&xfrm_state_lock);
526 xfrm_state_put_afinfo(afinfo);
528 if (err)
529 return err;
531 if (!x) {
532 xfrm_state_delete(x1);
533 xfrm_state_put(x1);
534 return 0;
537 err = -EINVAL;
538 spin_lock_bh(&x1->lock);
539 if (likely(x1->km.state == XFRM_STATE_VALID)) {
540 if (x->encap && x1->encap)
541 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
542 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
543 x1->km.dying = 0;
545 if (!mod_timer(&x1->timer, jiffies + HZ))
546 xfrm_state_hold(x1);
547 if (x1->curlft.use_time)
548 xfrm_state_check_expire(x1);
550 err = 0;
552 spin_unlock_bh(&x1->lock);
554 xfrm_state_put(x1);
556 return err;
558 EXPORT_SYMBOL(xfrm_state_update);
560 int xfrm_state_check_expire(struct xfrm_state *x)
562 if (!x->curlft.use_time)
563 x->curlft.use_time = (unsigned long)xtime.tv_sec;
565 if (x->km.state != XFRM_STATE_VALID)
566 return -EINVAL;
568 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
569 x->curlft.packets >= x->lft.hard_packet_limit) {
570 x->km.state = XFRM_STATE_EXPIRED;
571 if (!mod_timer(&x->timer, jiffies))
572 xfrm_state_hold(x);
573 return -EINVAL;
576 if (!x->km.dying &&
577 (x->curlft.bytes >= x->lft.soft_byte_limit ||
578 x->curlft.packets >= x->lft.soft_packet_limit)) {
579 x->km.dying = 1;
580 km_state_expired(x, 0);
582 return 0;
584 EXPORT_SYMBOL(xfrm_state_check_expire);
586 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
588 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
589 - skb_headroom(skb);
591 if (nhead > 0)
592 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
594 /* Check tail too... */
595 return 0;
598 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
600 int err = xfrm_state_check_expire(x);
601 if (err < 0)
602 goto err;
603 err = xfrm_state_check_space(x, skb);
604 err:
605 return err;
607 EXPORT_SYMBOL(xfrm_state_check);
609 struct xfrm_state *
610 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
611 unsigned short family)
613 struct xfrm_state *x;
614 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
615 if (!afinfo)
616 return NULL;
618 spin_lock_bh(&xfrm_state_lock);
619 x = afinfo->state_lookup(daddr, spi, proto);
620 spin_unlock_bh(&xfrm_state_lock);
621 xfrm_state_put_afinfo(afinfo);
622 return x;
624 EXPORT_SYMBOL(xfrm_state_lookup);
626 struct xfrm_state *
627 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
628 xfrm_address_t *daddr, xfrm_address_t *saddr,
629 int create, unsigned short family)
631 struct xfrm_state *x;
632 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
633 if (!afinfo)
634 return NULL;
636 spin_lock_bh(&xfrm_state_lock);
637 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
638 spin_unlock_bh(&xfrm_state_lock);
639 xfrm_state_put_afinfo(afinfo);
640 return x;
642 EXPORT_SYMBOL(xfrm_find_acq);
644 /* Silly enough, but I'm lazy to build resolution list */
646 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
648 int i;
649 struct xfrm_state *x;
651 for (i = 0; i < XFRM_DST_HSIZE; i++) {
652 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
653 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
654 xfrm_state_hold(x);
655 return x;
659 return NULL;
662 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
664 struct xfrm_state *x;
666 spin_lock_bh(&xfrm_state_lock);
667 x = __xfrm_find_acq_byseq(seq);
668 spin_unlock_bh(&xfrm_state_lock);
669 return x;
671 EXPORT_SYMBOL(xfrm_find_acq_byseq);
673 u32 xfrm_get_acqseq(void)
675 u32 res;
676 static u32 acqseq;
677 static DEFINE_SPINLOCK(acqseq_lock);
679 spin_lock_bh(&acqseq_lock);
680 res = (++acqseq ? : ++acqseq);
681 spin_unlock_bh(&acqseq_lock);
682 return res;
684 EXPORT_SYMBOL(xfrm_get_acqseq);
686 void
687 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
689 u32 h;
690 struct xfrm_state *x0;
692 if (x->id.spi)
693 return;
695 if (minspi == maxspi) {
696 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
697 if (x0) {
698 xfrm_state_put(x0);
699 return;
701 x->id.spi = minspi;
702 } else {
703 u32 spi = 0;
704 minspi = ntohl(minspi);
705 maxspi = ntohl(maxspi);
706 for (h=0; h<maxspi-minspi+1; h++) {
707 spi = minspi + net_random()%(maxspi-minspi+1);
708 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
709 if (x0 == NULL) {
710 x->id.spi = htonl(spi);
711 break;
713 xfrm_state_put(x0);
716 if (x->id.spi) {
717 spin_lock_bh(&xfrm_state_lock);
718 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
719 list_add(&x->byspi, xfrm_state_byspi+h);
720 xfrm_state_hold(x);
721 spin_unlock_bh(&xfrm_state_lock);
722 wake_up(&km_waitq);
725 EXPORT_SYMBOL(xfrm_alloc_spi);
727 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
728 void *data)
730 int i;
731 struct xfrm_state *x;
732 int count = 0;
733 int err = 0;
735 spin_lock_bh(&xfrm_state_lock);
736 for (i = 0; i < XFRM_DST_HSIZE; i++) {
737 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
738 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
739 count++;
742 if (count == 0) {
743 err = -ENOENT;
744 goto out;
747 for (i = 0; i < XFRM_DST_HSIZE; i++) {
748 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
749 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
750 continue;
751 err = func(x, --count, data);
752 if (err)
753 goto out;
756 out:
757 spin_unlock_bh(&xfrm_state_lock);
758 return err;
760 EXPORT_SYMBOL(xfrm_state_walk);
762 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
764 u32 diff;
766 seq = ntohl(seq);
768 if (unlikely(seq == 0))
769 return -EINVAL;
771 if (likely(seq > x->replay.seq))
772 return 0;
774 diff = x->replay.seq - seq;
775 if (diff >= x->props.replay_window) {
776 x->stats.replay_window++;
777 return -EINVAL;
780 if (x->replay.bitmap & (1U << diff)) {
781 x->stats.replay++;
782 return -EINVAL;
784 return 0;
786 EXPORT_SYMBOL(xfrm_replay_check);
788 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
790 u32 diff;
792 seq = ntohl(seq);
794 if (seq > x->replay.seq) {
795 diff = seq - x->replay.seq;
796 if (diff < x->props.replay_window)
797 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
798 else
799 x->replay.bitmap = 1;
800 x->replay.seq = seq;
801 } else {
802 diff = x->replay.seq - seq;
803 x->replay.bitmap |= (1U << diff);
806 EXPORT_SYMBOL(xfrm_replay_advance);
808 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
809 static DEFINE_RWLOCK(xfrm_km_lock);
811 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
813 struct xfrm_mgr *km;
815 read_lock(&xfrm_km_lock);
816 list_for_each_entry(km, &xfrm_km_list, list)
817 if (km->notify_policy)
818 km->notify_policy(xp, dir, c);
819 read_unlock(&xfrm_km_lock);
822 void km_state_notify(struct xfrm_state *x, struct km_event *c)
824 struct xfrm_mgr *km;
825 read_lock(&xfrm_km_lock);
826 list_for_each_entry(km, &xfrm_km_list, list)
827 if (km->notify)
828 km->notify(x, c);
829 read_unlock(&xfrm_km_lock);
832 EXPORT_SYMBOL(km_policy_notify);
833 EXPORT_SYMBOL(km_state_notify);
835 static void km_state_expired(struct xfrm_state *x, int hard)
837 struct km_event c;
839 c.data.hard = hard;
840 c.event = XFRM_MSG_EXPIRE;
841 km_state_notify(x, &c);
843 if (hard)
844 wake_up(&km_waitq);
848 * We send to all registered managers regardless of failure
849 * We are happy with one success
851 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
853 int err = -EINVAL, acqret;
854 struct xfrm_mgr *km;
856 read_lock(&xfrm_km_lock);
857 list_for_each_entry(km, &xfrm_km_list, list) {
858 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
859 if (!acqret)
860 err = acqret;
862 read_unlock(&xfrm_km_lock);
863 return err;
866 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
868 int err = -EINVAL;
869 struct xfrm_mgr *km;
871 read_lock(&xfrm_km_lock);
872 list_for_each_entry(km, &xfrm_km_list, list) {
873 if (km->new_mapping)
874 err = km->new_mapping(x, ipaddr, sport);
875 if (!err)
876 break;
878 read_unlock(&xfrm_km_lock);
879 return err;
881 EXPORT_SYMBOL(km_new_mapping);
883 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
885 struct km_event c;
887 c.data.hard = hard;
888 c.event = XFRM_MSG_POLEXPIRE;
889 km_policy_notify(pol, dir, &c);
891 if (hard)
892 wake_up(&km_waitq);
895 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
897 int err;
898 u8 *data;
899 struct xfrm_mgr *km;
900 struct xfrm_policy *pol = NULL;
902 if (optlen <= 0 || optlen > PAGE_SIZE)
903 return -EMSGSIZE;
905 data = kmalloc(optlen, GFP_KERNEL);
906 if (!data)
907 return -ENOMEM;
909 err = -EFAULT;
910 if (copy_from_user(data, optval, optlen))
911 goto out;
913 err = -EINVAL;
914 read_lock(&xfrm_km_lock);
915 list_for_each_entry(km, &xfrm_km_list, list) {
916 pol = km->compile_policy(sk->sk_family, optname, data,
917 optlen, &err);
918 if (err >= 0)
919 break;
921 read_unlock(&xfrm_km_lock);
923 if (err >= 0) {
924 xfrm_sk_policy_insert(sk, err, pol);
925 xfrm_pol_put(pol);
926 err = 0;
929 out:
930 kfree(data);
931 return err;
933 EXPORT_SYMBOL(xfrm_user_policy);
935 int xfrm_register_km(struct xfrm_mgr *km)
937 write_lock_bh(&xfrm_km_lock);
938 list_add_tail(&km->list, &xfrm_km_list);
939 write_unlock_bh(&xfrm_km_lock);
940 return 0;
942 EXPORT_SYMBOL(xfrm_register_km);
944 int xfrm_unregister_km(struct xfrm_mgr *km)
946 write_lock_bh(&xfrm_km_lock);
947 list_del(&km->list);
948 write_unlock_bh(&xfrm_km_lock);
949 return 0;
951 EXPORT_SYMBOL(xfrm_unregister_km);
953 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
955 int err = 0;
956 if (unlikely(afinfo == NULL))
957 return -EINVAL;
958 if (unlikely(afinfo->family >= NPROTO))
959 return -EAFNOSUPPORT;
960 write_lock(&xfrm_state_afinfo_lock);
961 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
962 err = -ENOBUFS;
963 else {
964 afinfo->state_bydst = xfrm_state_bydst;
965 afinfo->state_byspi = xfrm_state_byspi;
966 xfrm_state_afinfo[afinfo->family] = afinfo;
968 write_unlock(&xfrm_state_afinfo_lock);
969 return err;
971 EXPORT_SYMBOL(xfrm_state_register_afinfo);
973 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
975 int err = 0;
976 if (unlikely(afinfo == NULL))
977 return -EINVAL;
978 if (unlikely(afinfo->family >= NPROTO))
979 return -EAFNOSUPPORT;
980 write_lock(&xfrm_state_afinfo_lock);
981 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
982 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
983 err = -EINVAL;
984 else {
985 xfrm_state_afinfo[afinfo->family] = NULL;
986 afinfo->state_byspi = NULL;
987 afinfo->state_bydst = NULL;
990 write_unlock(&xfrm_state_afinfo_lock);
991 return err;
993 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
995 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
997 struct xfrm_state_afinfo *afinfo;
998 if (unlikely(family >= NPROTO))
999 return NULL;
1000 read_lock(&xfrm_state_afinfo_lock);
1001 afinfo = xfrm_state_afinfo[family];
1002 if (likely(afinfo != NULL))
1003 read_lock(&afinfo->lock);
1004 read_unlock(&xfrm_state_afinfo_lock);
1005 return afinfo;
1008 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1010 if (unlikely(afinfo == NULL))
1011 return;
1012 read_unlock(&afinfo->lock);
1015 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1016 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1018 if (x->tunnel) {
1019 struct xfrm_state *t = x->tunnel;
1021 if (atomic_read(&t->tunnel_users) == 2)
1022 xfrm_state_delete(t);
1023 atomic_dec(&t->tunnel_users);
1024 xfrm_state_put(t);
1025 x->tunnel = NULL;
1028 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1031 * This function is NOT optimal. For example, with ESP it will give an
1032 * MTU that's usually two bytes short of being optimal. However, it will
1033 * usually give an answer that's a multiple of 4 provided the input is
1034 * also a multiple of 4.
1036 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1038 int res = mtu;
1040 res -= x->props.header_len;
1042 for (;;) {
1043 int m = res;
1045 if (m < 68)
1046 return 68;
1048 spin_lock_bh(&x->lock);
1049 if (x->km.state == XFRM_STATE_VALID &&
1050 x->type && x->type->get_max_size)
1051 m = x->type->get_max_size(x, m);
1052 else
1053 m += x->props.header_len;
1054 spin_unlock_bh(&x->lock);
1056 if (m <= mtu)
1057 break;
1058 res -= (m - mtu);
1061 return res;
1064 EXPORT_SYMBOL(xfrm_state_mtu);
1066 int xfrm_init_state(struct xfrm_state *x)
1068 struct xfrm_state_afinfo *afinfo;
1069 int family = x->props.family;
1070 int err;
1072 err = -EAFNOSUPPORT;
1073 afinfo = xfrm_state_get_afinfo(family);
1074 if (!afinfo)
1075 goto error;
1077 err = 0;
1078 if (afinfo->init_flags)
1079 err = afinfo->init_flags(x);
1081 xfrm_state_put_afinfo(afinfo);
1083 if (err)
1084 goto error;
1086 err = -EPROTONOSUPPORT;
1087 x->type = xfrm_get_type(x->id.proto, family);
1088 if (x->type == NULL)
1089 goto error;
1091 err = x->type->init_state(x);
1092 if (err)
1093 goto error;
1095 x->km.state = XFRM_STATE_VALID;
1097 error:
1098 return err;
1101 EXPORT_SYMBOL(xfrm_init_state);
1103 void __init xfrm_state_init(void)
1105 int i;
1107 for (i=0; i<XFRM_DST_HSIZE; i++) {
1108 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1109 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1111 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);