[MTD] [MAPS] Remove flash maps for no longer supported 405LP boards
[linux-2.6/mini2440.git] / net / xfrm / xfrm_state.c
blobf3a61ebd8d65bca12d1936943074c1a03e23fd4e
1 /*
2 * xfrm_state.c
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
23 #include <linux/audit.h>
25 #include "xfrm_hash.h"
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
30 u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
33 u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
36 /* Each xfrm_state may be linked to two tables:
38 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40 destination/tunnel endpoint. (output)
43 static DEFINE_SPINLOCK(xfrm_state_lock);
45 /* Hash table to find appropriate SA towards given target (endpoint
46 * of tunnel or destination of transport mode) allowed by selector.
48 * Main use is finding SA after policy selected tunnel or transport mode.
49 * Also, it can be used by ah/esp icmp error handler to find offending SA.
51 static struct hlist_head *xfrm_state_bydst __read_mostly;
52 static struct hlist_head *xfrm_state_bysrc __read_mostly;
53 static struct hlist_head *xfrm_state_byspi __read_mostly;
54 static unsigned int xfrm_state_hmask __read_mostly;
55 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
56 static unsigned int xfrm_state_num;
57 static unsigned int xfrm_state_genid;
59 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
60 xfrm_address_t *saddr,
61 u32 reqid,
62 unsigned short family)
64 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
67 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
68 xfrm_address_t *saddr,
69 unsigned short family)
71 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
74 static inline unsigned int
75 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
77 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
80 static void xfrm_hash_transfer(struct hlist_head *list,
81 struct hlist_head *ndsttable,
82 struct hlist_head *nsrctable,
83 struct hlist_head *nspitable,
84 unsigned int nhashmask)
86 struct hlist_node *entry, *tmp;
87 struct xfrm_state *x;
89 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
90 unsigned int h;
92 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
93 x->props.reqid, x->props.family,
94 nhashmask);
95 hlist_add_head(&x->bydst, ndsttable+h);
97 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
98 x->props.family,
99 nhashmask);
100 hlist_add_head(&x->bysrc, nsrctable+h);
102 if (x->id.spi) {
103 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
104 x->id.proto, x->props.family,
105 nhashmask);
106 hlist_add_head(&x->byspi, nspitable+h);
111 static unsigned long xfrm_hash_new_size(void)
113 return ((xfrm_state_hmask + 1) << 1) *
114 sizeof(struct hlist_head);
117 static DEFINE_MUTEX(hash_resize_mutex);
119 static void xfrm_hash_resize(struct work_struct *__unused)
121 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
122 unsigned long nsize, osize;
123 unsigned int nhashmask, ohashmask;
124 int i;
126 mutex_lock(&hash_resize_mutex);
128 nsize = xfrm_hash_new_size();
129 ndst = xfrm_hash_alloc(nsize);
130 if (!ndst)
131 goto out_unlock;
132 nsrc = xfrm_hash_alloc(nsize);
133 if (!nsrc) {
134 xfrm_hash_free(ndst, nsize);
135 goto out_unlock;
137 nspi = xfrm_hash_alloc(nsize);
138 if (!nspi) {
139 xfrm_hash_free(ndst, nsize);
140 xfrm_hash_free(nsrc, nsize);
141 goto out_unlock;
144 spin_lock_bh(&xfrm_state_lock);
146 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
147 for (i = xfrm_state_hmask; i >= 0; i--)
148 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
149 nhashmask);
151 odst = xfrm_state_bydst;
152 osrc = xfrm_state_bysrc;
153 ospi = xfrm_state_byspi;
154 ohashmask = xfrm_state_hmask;
156 xfrm_state_bydst = ndst;
157 xfrm_state_bysrc = nsrc;
158 xfrm_state_byspi = nspi;
159 xfrm_state_hmask = nhashmask;
161 spin_unlock_bh(&xfrm_state_lock);
163 osize = (ohashmask + 1) * sizeof(struct hlist_head);
164 xfrm_hash_free(odst, osize);
165 xfrm_hash_free(osrc, osize);
166 xfrm_hash_free(ospi, osize);
168 out_unlock:
169 mutex_unlock(&hash_resize_mutex);
172 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
174 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
175 EXPORT_SYMBOL(km_waitq);
177 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
178 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
180 static struct work_struct xfrm_state_gc_work;
181 static HLIST_HEAD(xfrm_state_gc_list);
182 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
184 int __xfrm_state_delete(struct xfrm_state *x);
186 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
187 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
189 static void xfrm_state_gc_destroy(struct xfrm_state *x)
191 del_timer_sync(&x->timer);
192 del_timer_sync(&x->rtimer);
193 kfree(x->aalg);
194 kfree(x->ealg);
195 kfree(x->calg);
196 kfree(x->encap);
197 kfree(x->coaddr);
198 if (x->mode)
199 xfrm_put_mode(x->mode);
200 if (x->type) {
201 x->type->destructor(x);
202 xfrm_put_type(x->type);
204 security_xfrm_state_free(x);
205 kfree(x);
208 static void xfrm_state_gc_task(struct work_struct *data)
210 struct xfrm_state *x;
211 struct hlist_node *entry, *tmp;
212 struct hlist_head gc_list;
214 spin_lock_bh(&xfrm_state_gc_lock);
215 gc_list.first = xfrm_state_gc_list.first;
216 INIT_HLIST_HEAD(&xfrm_state_gc_list);
217 spin_unlock_bh(&xfrm_state_gc_lock);
219 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
220 xfrm_state_gc_destroy(x);
222 wake_up(&km_waitq);
225 static inline unsigned long make_jiffies(long secs)
227 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
228 return MAX_SCHEDULE_TIMEOUT-1;
229 else
230 return secs*HZ;
233 static void xfrm_timer_handler(unsigned long data)
235 struct xfrm_state *x = (struct xfrm_state*)data;
236 unsigned long now = get_seconds();
237 long next = LONG_MAX;
238 int warn = 0;
239 int err = 0;
241 spin_lock(&x->lock);
242 if (x->km.state == XFRM_STATE_DEAD)
243 goto out;
244 if (x->km.state == XFRM_STATE_EXPIRED)
245 goto expired;
246 if (x->lft.hard_add_expires_seconds) {
247 long tmo = x->lft.hard_add_expires_seconds +
248 x->curlft.add_time - now;
249 if (tmo <= 0)
250 goto expired;
251 if (tmo < next)
252 next = tmo;
254 if (x->lft.hard_use_expires_seconds) {
255 long tmo = x->lft.hard_use_expires_seconds +
256 (x->curlft.use_time ? : now) - now;
257 if (tmo <= 0)
258 goto expired;
259 if (tmo < next)
260 next = tmo;
262 if (x->km.dying)
263 goto resched;
264 if (x->lft.soft_add_expires_seconds) {
265 long tmo = x->lft.soft_add_expires_seconds +
266 x->curlft.add_time - now;
267 if (tmo <= 0)
268 warn = 1;
269 else if (tmo < next)
270 next = tmo;
272 if (x->lft.soft_use_expires_seconds) {
273 long tmo = x->lft.soft_use_expires_seconds +
274 (x->curlft.use_time ? : now) - now;
275 if (tmo <= 0)
276 warn = 1;
277 else if (tmo < next)
278 next = tmo;
281 x->km.dying = warn;
282 if (warn)
283 km_state_expired(x, 0, 0);
284 resched:
285 if (next != LONG_MAX)
286 mod_timer(&x->timer, jiffies + make_jiffies(next));
288 goto out;
290 expired:
291 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
292 x->km.state = XFRM_STATE_EXPIRED;
293 wake_up(&km_waitq);
294 next = 2;
295 goto resched;
298 err = __xfrm_state_delete(x);
299 if (!err && x->id.spi)
300 km_state_expired(x, 1, 0);
302 xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
303 AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
305 out:
306 spin_unlock(&x->lock);
309 static void xfrm_replay_timer_handler(unsigned long data);
311 struct xfrm_state *xfrm_state_alloc(void)
313 struct xfrm_state *x;
315 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
317 if (x) {
318 atomic_set(&x->refcnt, 1);
319 atomic_set(&x->tunnel_users, 0);
320 INIT_HLIST_NODE(&x->bydst);
321 INIT_HLIST_NODE(&x->bysrc);
322 INIT_HLIST_NODE(&x->byspi);
323 init_timer(&x->timer);
324 x->timer.function = xfrm_timer_handler;
325 x->timer.data = (unsigned long)x;
326 init_timer(&x->rtimer);
327 x->rtimer.function = xfrm_replay_timer_handler;
328 x->rtimer.data = (unsigned long)x;
329 x->curlft.add_time = get_seconds();
330 x->lft.soft_byte_limit = XFRM_INF;
331 x->lft.soft_packet_limit = XFRM_INF;
332 x->lft.hard_byte_limit = XFRM_INF;
333 x->lft.hard_packet_limit = XFRM_INF;
334 x->replay_maxage = 0;
335 x->replay_maxdiff = 0;
336 spin_lock_init(&x->lock);
338 return x;
340 EXPORT_SYMBOL(xfrm_state_alloc);
342 void __xfrm_state_destroy(struct xfrm_state *x)
344 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
346 spin_lock_bh(&xfrm_state_gc_lock);
347 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
348 spin_unlock_bh(&xfrm_state_gc_lock);
349 schedule_work(&xfrm_state_gc_work);
351 EXPORT_SYMBOL(__xfrm_state_destroy);
353 int __xfrm_state_delete(struct xfrm_state *x)
355 int err = -ESRCH;
357 if (x->km.state != XFRM_STATE_DEAD) {
358 x->km.state = XFRM_STATE_DEAD;
359 spin_lock(&xfrm_state_lock);
360 hlist_del(&x->bydst);
361 hlist_del(&x->bysrc);
362 if (x->id.spi)
363 hlist_del(&x->byspi);
364 xfrm_state_num--;
365 spin_unlock(&xfrm_state_lock);
367 /* All xfrm_state objects are created by xfrm_state_alloc.
368 * The xfrm_state_alloc call gives a reference, and that
369 * is what we are dropping here.
371 __xfrm_state_put(x);
372 err = 0;
375 return err;
377 EXPORT_SYMBOL(__xfrm_state_delete);
379 int xfrm_state_delete(struct xfrm_state *x)
381 int err;
383 spin_lock_bh(&x->lock);
384 err = __xfrm_state_delete(x);
385 spin_unlock_bh(&x->lock);
387 return err;
389 EXPORT_SYMBOL(xfrm_state_delete);
391 void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
393 int i;
394 int err = 0;
396 spin_lock_bh(&xfrm_state_lock);
397 for (i = 0; i <= xfrm_state_hmask; i++) {
398 struct hlist_node *entry;
399 struct xfrm_state *x;
400 restart:
401 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
402 if (!xfrm_state_kern(x) &&
403 xfrm_id_proto_match(x->id.proto, proto)) {
404 xfrm_state_hold(x);
405 spin_unlock_bh(&xfrm_state_lock);
407 err = xfrm_state_delete(x);
408 xfrm_audit_log(audit_info->loginuid,
409 audit_info->secid,
410 AUDIT_MAC_IPSEC_DELSA,
411 err ? 0 : 1, NULL, x);
412 xfrm_state_put(x);
414 spin_lock_bh(&xfrm_state_lock);
415 goto restart;
419 spin_unlock_bh(&xfrm_state_lock);
420 wake_up(&km_waitq);
422 EXPORT_SYMBOL(xfrm_state_flush);
424 void xfrm_sad_getinfo(struct xfrm_sadinfo *si)
426 spin_lock_bh(&xfrm_state_lock);
427 si->sadcnt = xfrm_state_num;
428 si->sadhcnt = xfrm_state_hmask;
429 si->sadhmcnt = xfrm_state_hashmax;
430 spin_unlock_bh(&xfrm_state_lock);
432 EXPORT_SYMBOL(xfrm_sad_getinfo);
434 static int
435 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
436 struct xfrm_tmpl *tmpl,
437 xfrm_address_t *daddr, xfrm_address_t *saddr,
438 unsigned short family)
440 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
441 if (!afinfo)
442 return -1;
443 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
444 xfrm_state_put_afinfo(afinfo);
445 return 0;
448 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
450 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
451 struct xfrm_state *x;
452 struct hlist_node *entry;
454 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
455 if (x->props.family != family ||
456 x->id.spi != spi ||
457 x->id.proto != proto)
458 continue;
460 switch (family) {
461 case AF_INET:
462 if (x->id.daddr.a4 != daddr->a4)
463 continue;
464 break;
465 case AF_INET6:
466 if (!ipv6_addr_equal((struct in6_addr *)daddr,
467 (struct in6_addr *)
468 x->id.daddr.a6))
469 continue;
470 break;
473 xfrm_state_hold(x);
474 return x;
477 return NULL;
480 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
482 unsigned int h = xfrm_src_hash(daddr, saddr, family);
483 struct xfrm_state *x;
484 struct hlist_node *entry;
486 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
487 if (x->props.family != family ||
488 x->id.proto != proto)
489 continue;
491 switch (family) {
492 case AF_INET:
493 if (x->id.daddr.a4 != daddr->a4 ||
494 x->props.saddr.a4 != saddr->a4)
495 continue;
496 break;
497 case AF_INET6:
498 if (!ipv6_addr_equal((struct in6_addr *)daddr,
499 (struct in6_addr *)
500 x->id.daddr.a6) ||
501 !ipv6_addr_equal((struct in6_addr *)saddr,
502 (struct in6_addr *)
503 x->props.saddr.a6))
504 continue;
505 break;
508 xfrm_state_hold(x);
509 return x;
512 return NULL;
515 static inline struct xfrm_state *
516 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
518 if (use_spi)
519 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
520 x->id.proto, family);
521 else
522 return __xfrm_state_lookup_byaddr(&x->id.daddr,
523 &x->props.saddr,
524 x->id.proto, family);
527 static void xfrm_hash_grow_check(int have_hash_collision)
529 if (have_hash_collision &&
530 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
531 xfrm_state_num > xfrm_state_hmask)
532 schedule_work(&xfrm_hash_work);
535 struct xfrm_state *
536 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
537 struct flowi *fl, struct xfrm_tmpl *tmpl,
538 struct xfrm_policy *pol, int *err,
539 unsigned short family)
541 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
542 struct hlist_node *entry;
543 struct xfrm_state *x, *x0;
544 int acquire_in_progress = 0;
545 int error = 0;
546 struct xfrm_state *best = NULL;
548 spin_lock_bh(&xfrm_state_lock);
549 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
550 if (x->props.family == family &&
551 x->props.reqid == tmpl->reqid &&
552 !(x->props.flags & XFRM_STATE_WILDRECV) &&
553 xfrm_state_addr_check(x, daddr, saddr, family) &&
554 tmpl->mode == x->props.mode &&
555 tmpl->id.proto == x->id.proto &&
556 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
557 /* Resolution logic:
558 1. There is a valid state with matching selector.
559 Done.
560 2. Valid state with inappropriate selector. Skip.
562 Entering area of "sysdeps".
564 3. If state is not valid, selector is temporary,
565 it selects only session which triggered
566 previous resolution. Key manager will do
567 something to install a state with proper
568 selector.
570 if (x->km.state == XFRM_STATE_VALID) {
571 if (!xfrm_selector_match(&x->sel, fl, family) ||
572 !security_xfrm_state_pol_flow_match(x, pol, fl))
573 continue;
574 if (!best ||
575 best->km.dying > x->km.dying ||
576 (best->km.dying == x->km.dying &&
577 best->curlft.add_time < x->curlft.add_time))
578 best = x;
579 } else if (x->km.state == XFRM_STATE_ACQ) {
580 acquire_in_progress = 1;
581 } else if (x->km.state == XFRM_STATE_ERROR ||
582 x->km.state == XFRM_STATE_EXPIRED) {
583 if (xfrm_selector_match(&x->sel, fl, family) &&
584 security_xfrm_state_pol_flow_match(x, pol, fl))
585 error = -ESRCH;
590 x = best;
591 if (!x && !error && !acquire_in_progress) {
592 if (tmpl->id.spi &&
593 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
594 tmpl->id.proto, family)) != NULL) {
595 xfrm_state_put(x0);
596 error = -EEXIST;
597 goto out;
599 x = xfrm_state_alloc();
600 if (x == NULL) {
601 error = -ENOMEM;
602 goto out;
604 /* Initialize temporary selector matching only
605 * to current session. */
606 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
608 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
609 if (error) {
610 x->km.state = XFRM_STATE_DEAD;
611 xfrm_state_put(x);
612 x = NULL;
613 goto out;
616 if (km_query(x, tmpl, pol) == 0) {
617 x->km.state = XFRM_STATE_ACQ;
618 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
619 h = xfrm_src_hash(daddr, saddr, family);
620 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
621 if (x->id.spi) {
622 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
623 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
625 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
626 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
627 add_timer(&x->timer);
628 xfrm_state_num++;
629 xfrm_hash_grow_check(x->bydst.next != NULL);
630 } else {
631 x->km.state = XFRM_STATE_DEAD;
632 xfrm_state_put(x);
633 x = NULL;
634 error = -ESRCH;
637 out:
638 if (x)
639 xfrm_state_hold(x);
640 else
641 *err = acquire_in_progress ? -EAGAIN : error;
642 spin_unlock_bh(&xfrm_state_lock);
643 return x;
646 static void __xfrm_state_insert(struct xfrm_state *x)
648 unsigned int h;
650 x->genid = ++xfrm_state_genid;
652 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
653 x->props.reqid, x->props.family);
654 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
656 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
657 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
659 if (x->id.spi) {
660 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
661 x->props.family);
663 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
666 mod_timer(&x->timer, jiffies + HZ);
667 if (x->replay_maxage)
668 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
670 wake_up(&km_waitq);
672 xfrm_state_num++;
674 xfrm_hash_grow_check(x->bydst.next != NULL);
677 /* xfrm_state_lock is held */
678 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
680 unsigned short family = xnew->props.family;
681 u32 reqid = xnew->props.reqid;
682 struct xfrm_state *x;
683 struct hlist_node *entry;
684 unsigned int h;
686 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
687 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
688 if (x->props.family == family &&
689 x->props.reqid == reqid &&
690 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
691 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
692 x->genid = xfrm_state_genid;
696 void xfrm_state_insert(struct xfrm_state *x)
698 spin_lock_bh(&xfrm_state_lock);
699 __xfrm_state_bump_genids(x);
700 __xfrm_state_insert(x);
701 spin_unlock_bh(&xfrm_state_lock);
703 EXPORT_SYMBOL(xfrm_state_insert);
705 /* xfrm_state_lock is held */
706 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
708 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
709 struct hlist_node *entry;
710 struct xfrm_state *x;
712 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
713 if (x->props.reqid != reqid ||
714 x->props.mode != mode ||
715 x->props.family != family ||
716 x->km.state != XFRM_STATE_ACQ ||
717 x->id.spi != 0 ||
718 x->id.proto != proto)
719 continue;
721 switch (family) {
722 case AF_INET:
723 if (x->id.daddr.a4 != daddr->a4 ||
724 x->props.saddr.a4 != saddr->a4)
725 continue;
726 break;
727 case AF_INET6:
728 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
729 (struct in6_addr *)daddr) ||
730 !ipv6_addr_equal((struct in6_addr *)
731 x->props.saddr.a6,
732 (struct in6_addr *)saddr))
733 continue;
734 break;
737 xfrm_state_hold(x);
738 return x;
741 if (!create)
742 return NULL;
744 x = xfrm_state_alloc();
745 if (likely(x)) {
746 switch (family) {
747 case AF_INET:
748 x->sel.daddr.a4 = daddr->a4;
749 x->sel.saddr.a4 = saddr->a4;
750 x->sel.prefixlen_d = 32;
751 x->sel.prefixlen_s = 32;
752 x->props.saddr.a4 = saddr->a4;
753 x->id.daddr.a4 = daddr->a4;
754 break;
756 case AF_INET6:
757 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
758 (struct in6_addr *)daddr);
759 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
760 (struct in6_addr *)saddr);
761 x->sel.prefixlen_d = 128;
762 x->sel.prefixlen_s = 128;
763 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
764 (struct in6_addr *)saddr);
765 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
766 (struct in6_addr *)daddr);
767 break;
770 x->km.state = XFRM_STATE_ACQ;
771 x->id.proto = proto;
772 x->props.family = family;
773 x->props.mode = mode;
774 x->props.reqid = reqid;
775 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
776 xfrm_state_hold(x);
777 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
778 add_timer(&x->timer);
779 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
780 h = xfrm_src_hash(daddr, saddr, family);
781 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
782 wake_up(&km_waitq);
784 xfrm_state_num++;
786 xfrm_hash_grow_check(x->bydst.next != NULL);
789 return x;
792 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
794 int xfrm_state_add(struct xfrm_state *x)
796 struct xfrm_state *x1;
797 int family;
798 int err;
799 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
801 family = x->props.family;
803 spin_lock_bh(&xfrm_state_lock);
805 x1 = __xfrm_state_locate(x, use_spi, family);
806 if (x1) {
807 xfrm_state_put(x1);
808 x1 = NULL;
809 err = -EEXIST;
810 goto out;
813 if (use_spi && x->km.seq) {
814 x1 = __xfrm_find_acq_byseq(x->km.seq);
815 if (x1 && ((x1->id.proto != x->id.proto) ||
816 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
817 xfrm_state_put(x1);
818 x1 = NULL;
822 if (use_spi && !x1)
823 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
824 x->id.proto,
825 &x->id.daddr, &x->props.saddr, 0);
827 __xfrm_state_bump_genids(x);
828 __xfrm_state_insert(x);
829 err = 0;
831 out:
832 spin_unlock_bh(&xfrm_state_lock);
834 if (x1) {
835 xfrm_state_delete(x1);
836 xfrm_state_put(x1);
839 return err;
841 EXPORT_SYMBOL(xfrm_state_add);
843 #ifdef CONFIG_XFRM_MIGRATE
844 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
846 int err = -ENOMEM;
847 struct xfrm_state *x = xfrm_state_alloc();
848 if (!x)
849 goto error;
851 memcpy(&x->id, &orig->id, sizeof(x->id));
852 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
853 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
854 x->props.mode = orig->props.mode;
855 x->props.replay_window = orig->props.replay_window;
856 x->props.reqid = orig->props.reqid;
857 x->props.family = orig->props.family;
858 x->props.saddr = orig->props.saddr;
860 if (orig->aalg) {
861 x->aalg = xfrm_algo_clone(orig->aalg);
862 if (!x->aalg)
863 goto error;
865 x->props.aalgo = orig->props.aalgo;
867 if (orig->ealg) {
868 x->ealg = xfrm_algo_clone(orig->ealg);
869 if (!x->ealg)
870 goto error;
872 x->props.ealgo = orig->props.ealgo;
874 if (orig->calg) {
875 x->calg = xfrm_algo_clone(orig->calg);
876 if (!x->calg)
877 goto error;
879 x->props.calgo = orig->props.calgo;
881 if (orig->encap) {
882 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
883 if (!x->encap)
884 goto error;
887 if (orig->coaddr) {
888 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
889 GFP_KERNEL);
890 if (!x->coaddr)
891 goto error;
894 err = xfrm_init_state(x);
895 if (err)
896 goto error;
898 x->props.flags = orig->props.flags;
900 x->curlft.add_time = orig->curlft.add_time;
901 x->km.state = orig->km.state;
902 x->km.seq = orig->km.seq;
904 return x;
906 error:
907 if (errp)
908 *errp = err;
909 if (x) {
910 kfree(x->aalg);
911 kfree(x->ealg);
912 kfree(x->calg);
913 kfree(x->encap);
914 kfree(x->coaddr);
916 kfree(x);
917 return NULL;
919 EXPORT_SYMBOL(xfrm_state_clone);
921 /* xfrm_state_lock is held */
922 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
924 unsigned int h;
925 struct xfrm_state *x;
926 struct hlist_node *entry;
928 if (m->reqid) {
929 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
930 m->reqid, m->old_family);
931 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
932 if (x->props.mode != m->mode ||
933 x->id.proto != m->proto)
934 continue;
935 if (m->reqid && x->props.reqid != m->reqid)
936 continue;
937 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
938 m->old_family) ||
939 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
940 m->old_family))
941 continue;
942 xfrm_state_hold(x);
943 return x;
945 } else {
946 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
947 m->old_family);
948 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
949 if (x->props.mode != m->mode ||
950 x->id.proto != m->proto)
951 continue;
952 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
953 m->old_family) ||
954 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
955 m->old_family))
956 continue;
957 xfrm_state_hold(x);
958 return x;
962 return NULL;
964 EXPORT_SYMBOL(xfrm_migrate_state_find);
966 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
967 struct xfrm_migrate *m)
969 struct xfrm_state *xc;
970 int err;
972 xc = xfrm_state_clone(x, &err);
973 if (!xc)
974 return NULL;
976 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
977 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
979 /* add state */
980 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
981 /* a care is needed when the destination address of the
982 state is to be updated as it is a part of triplet */
983 xfrm_state_insert(xc);
984 } else {
985 if ((err = xfrm_state_add(xc)) < 0)
986 goto error;
989 return xc;
990 error:
991 kfree(xc);
992 return NULL;
994 EXPORT_SYMBOL(xfrm_state_migrate);
995 #endif
997 int xfrm_state_update(struct xfrm_state *x)
999 struct xfrm_state *x1;
1000 int err;
1001 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1003 spin_lock_bh(&xfrm_state_lock);
1004 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1006 err = -ESRCH;
1007 if (!x1)
1008 goto out;
1010 if (xfrm_state_kern(x1)) {
1011 xfrm_state_put(x1);
1012 err = -EEXIST;
1013 goto out;
1016 if (x1->km.state == XFRM_STATE_ACQ) {
1017 __xfrm_state_insert(x);
1018 x = NULL;
1020 err = 0;
1022 out:
1023 spin_unlock_bh(&xfrm_state_lock);
1025 if (err)
1026 return err;
1028 if (!x) {
1029 xfrm_state_delete(x1);
1030 xfrm_state_put(x1);
1031 return 0;
1034 err = -EINVAL;
1035 spin_lock_bh(&x1->lock);
1036 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1037 if (x->encap && x1->encap)
1038 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1039 if (x->coaddr && x1->coaddr) {
1040 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1042 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1043 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1044 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1045 x1->km.dying = 0;
1047 mod_timer(&x1->timer, jiffies + HZ);
1048 if (x1->curlft.use_time)
1049 xfrm_state_check_expire(x1);
1051 err = 0;
1053 spin_unlock_bh(&x1->lock);
1055 xfrm_state_put(x1);
1057 return err;
1059 EXPORT_SYMBOL(xfrm_state_update);
1061 int xfrm_state_check_expire(struct xfrm_state *x)
1063 if (!x->curlft.use_time)
1064 x->curlft.use_time = get_seconds();
1066 if (x->km.state != XFRM_STATE_VALID)
1067 return -EINVAL;
1069 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1070 x->curlft.packets >= x->lft.hard_packet_limit) {
1071 x->km.state = XFRM_STATE_EXPIRED;
1072 mod_timer(&x->timer, jiffies);
1073 return -EINVAL;
1076 if (!x->km.dying &&
1077 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1078 x->curlft.packets >= x->lft.soft_packet_limit)) {
1079 x->km.dying = 1;
1080 km_state_expired(x, 0, 0);
1082 return 0;
1084 EXPORT_SYMBOL(xfrm_state_check_expire);
1086 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
1088 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
1089 - skb_headroom(skb);
1091 if (nhead > 0)
1092 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
1094 /* Check tail too... */
1095 return 0;
1098 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
1100 int err = xfrm_state_check_expire(x);
1101 if (err < 0)
1102 goto err;
1103 err = xfrm_state_check_space(x, skb);
1104 err:
1105 return err;
1107 EXPORT_SYMBOL(xfrm_state_check);
1109 struct xfrm_state *
1110 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1111 unsigned short family)
1113 struct xfrm_state *x;
1115 spin_lock_bh(&xfrm_state_lock);
1116 x = __xfrm_state_lookup(daddr, spi, proto, family);
1117 spin_unlock_bh(&xfrm_state_lock);
1118 return x;
1120 EXPORT_SYMBOL(xfrm_state_lookup);
1122 struct xfrm_state *
1123 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1124 u8 proto, unsigned short family)
1126 struct xfrm_state *x;
1128 spin_lock_bh(&xfrm_state_lock);
1129 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1130 spin_unlock_bh(&xfrm_state_lock);
1131 return x;
1133 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1135 struct xfrm_state *
1136 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1137 xfrm_address_t *daddr, xfrm_address_t *saddr,
1138 int create, unsigned short family)
1140 struct xfrm_state *x;
1142 spin_lock_bh(&xfrm_state_lock);
1143 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1144 spin_unlock_bh(&xfrm_state_lock);
1146 return x;
1148 EXPORT_SYMBOL(xfrm_find_acq);
1150 #ifdef CONFIG_XFRM_SUB_POLICY
1152 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1153 unsigned short family)
1155 int err = 0;
1156 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1157 if (!afinfo)
1158 return -EAFNOSUPPORT;
1160 spin_lock_bh(&xfrm_state_lock);
1161 if (afinfo->tmpl_sort)
1162 err = afinfo->tmpl_sort(dst, src, n);
1163 spin_unlock_bh(&xfrm_state_lock);
1164 xfrm_state_put_afinfo(afinfo);
1165 return err;
1167 EXPORT_SYMBOL(xfrm_tmpl_sort);
1170 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1171 unsigned short family)
1173 int err = 0;
1174 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1175 if (!afinfo)
1176 return -EAFNOSUPPORT;
1178 spin_lock_bh(&xfrm_state_lock);
1179 if (afinfo->state_sort)
1180 err = afinfo->state_sort(dst, src, n);
1181 spin_unlock_bh(&xfrm_state_lock);
1182 xfrm_state_put_afinfo(afinfo);
1183 return err;
1185 EXPORT_SYMBOL(xfrm_state_sort);
1186 #endif
1188 /* Silly enough, but I'm lazy to build resolution list */
1190 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1192 int i;
1194 for (i = 0; i <= xfrm_state_hmask; i++) {
1195 struct hlist_node *entry;
1196 struct xfrm_state *x;
1198 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1199 if (x->km.seq == seq &&
1200 x->km.state == XFRM_STATE_ACQ) {
1201 xfrm_state_hold(x);
1202 return x;
1206 return NULL;
1209 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1211 struct xfrm_state *x;
1213 spin_lock_bh(&xfrm_state_lock);
1214 x = __xfrm_find_acq_byseq(seq);
1215 spin_unlock_bh(&xfrm_state_lock);
1216 return x;
1218 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1220 u32 xfrm_get_acqseq(void)
1222 u32 res;
1223 static u32 acqseq;
1224 static DEFINE_SPINLOCK(acqseq_lock);
1226 spin_lock_bh(&acqseq_lock);
1227 res = (++acqseq ? : ++acqseq);
1228 spin_unlock_bh(&acqseq_lock);
1229 return res;
1231 EXPORT_SYMBOL(xfrm_get_acqseq);
1233 void
1234 xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi)
1236 unsigned int h;
1237 struct xfrm_state *x0;
1239 if (x->id.spi)
1240 return;
1242 if (minspi == maxspi) {
1243 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1244 if (x0) {
1245 xfrm_state_put(x0);
1246 return;
1248 x->id.spi = minspi;
1249 } else {
1250 u32 spi = 0;
1251 u32 low = ntohl(minspi);
1252 u32 high = ntohl(maxspi);
1253 for (h=0; h<high-low+1; h++) {
1254 spi = low + net_random()%(high-low+1);
1255 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1256 if (x0 == NULL) {
1257 x->id.spi = htonl(spi);
1258 break;
1260 xfrm_state_put(x0);
1263 if (x->id.spi) {
1264 spin_lock_bh(&xfrm_state_lock);
1265 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1266 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1267 spin_unlock_bh(&xfrm_state_lock);
1268 wake_up(&km_waitq);
1271 EXPORT_SYMBOL(xfrm_alloc_spi);
1273 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1274 void *data)
1276 int i;
1277 struct xfrm_state *x, *last = NULL;
1278 struct hlist_node *entry;
1279 int count = 0;
1280 int err = 0;
1282 spin_lock_bh(&xfrm_state_lock);
1283 for (i = 0; i <= xfrm_state_hmask; i++) {
1284 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1285 if (!xfrm_id_proto_match(x->id.proto, proto))
1286 continue;
1287 if (last) {
1288 err = func(last, count, data);
1289 if (err)
1290 goto out;
1292 last = x;
1293 count++;
1296 if (count == 0) {
1297 err = -ENOENT;
1298 goto out;
1300 err = func(last, 0, data);
1301 out:
1302 spin_unlock_bh(&xfrm_state_lock);
1303 return err;
1305 EXPORT_SYMBOL(xfrm_state_walk);
1308 void xfrm_replay_notify(struct xfrm_state *x, int event)
1310 struct km_event c;
1311 /* we send notify messages in case
1312 * 1. we updated on of the sequence numbers, and the seqno difference
1313 * is at least x->replay_maxdiff, in this case we also update the
1314 * timeout of our timer function
1315 * 2. if x->replay_maxage has elapsed since last update,
1316 * and there were changes
1318 * The state structure must be locked!
1321 switch (event) {
1322 case XFRM_REPLAY_UPDATE:
1323 if (x->replay_maxdiff &&
1324 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1325 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1326 if (x->xflags & XFRM_TIME_DEFER)
1327 event = XFRM_REPLAY_TIMEOUT;
1328 else
1329 return;
1332 break;
1334 case XFRM_REPLAY_TIMEOUT:
1335 if ((x->replay.seq == x->preplay.seq) &&
1336 (x->replay.bitmap == x->preplay.bitmap) &&
1337 (x->replay.oseq == x->preplay.oseq)) {
1338 x->xflags |= XFRM_TIME_DEFER;
1339 return;
1342 break;
1345 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1346 c.event = XFRM_MSG_NEWAE;
1347 c.data.aevent = event;
1348 km_state_notify(x, &c);
1350 if (x->replay_maxage &&
1351 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1352 x->xflags &= ~XFRM_TIME_DEFER;
1354 EXPORT_SYMBOL(xfrm_replay_notify);
1356 static void xfrm_replay_timer_handler(unsigned long data)
1358 struct xfrm_state *x = (struct xfrm_state*)data;
1360 spin_lock(&x->lock);
1362 if (x->km.state == XFRM_STATE_VALID) {
1363 if (xfrm_aevent_is_on())
1364 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1365 else
1366 x->xflags |= XFRM_TIME_DEFER;
1369 spin_unlock(&x->lock);
1372 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1374 u32 diff;
1375 u32 seq = ntohl(net_seq);
1377 if (unlikely(seq == 0))
1378 return -EINVAL;
1380 if (likely(seq > x->replay.seq))
1381 return 0;
1383 diff = x->replay.seq - seq;
1384 if (diff >= min_t(unsigned int, x->props.replay_window,
1385 sizeof(x->replay.bitmap) * 8)) {
1386 x->stats.replay_window++;
1387 return -EINVAL;
1390 if (x->replay.bitmap & (1U << diff)) {
1391 x->stats.replay++;
1392 return -EINVAL;
1394 return 0;
1396 EXPORT_SYMBOL(xfrm_replay_check);
1398 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1400 u32 diff;
1401 u32 seq = ntohl(net_seq);
1403 if (seq > x->replay.seq) {
1404 diff = seq - x->replay.seq;
1405 if (diff < x->props.replay_window)
1406 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1407 else
1408 x->replay.bitmap = 1;
1409 x->replay.seq = seq;
1410 } else {
1411 diff = x->replay.seq - seq;
1412 x->replay.bitmap |= (1U << diff);
1415 if (xfrm_aevent_is_on())
1416 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1418 EXPORT_SYMBOL(xfrm_replay_advance);
1420 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1421 static DEFINE_RWLOCK(xfrm_km_lock);
1423 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1425 struct xfrm_mgr *km;
1427 read_lock(&xfrm_km_lock);
1428 list_for_each_entry(km, &xfrm_km_list, list)
1429 if (km->notify_policy)
1430 km->notify_policy(xp, dir, c);
1431 read_unlock(&xfrm_km_lock);
1434 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1436 struct xfrm_mgr *km;
1437 read_lock(&xfrm_km_lock);
1438 list_for_each_entry(km, &xfrm_km_list, list)
1439 if (km->notify)
1440 km->notify(x, c);
1441 read_unlock(&xfrm_km_lock);
1444 EXPORT_SYMBOL(km_policy_notify);
1445 EXPORT_SYMBOL(km_state_notify);
1447 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1449 struct km_event c;
1451 c.data.hard = hard;
1452 c.pid = pid;
1453 c.event = XFRM_MSG_EXPIRE;
1454 km_state_notify(x, &c);
1456 if (hard)
1457 wake_up(&km_waitq);
1460 EXPORT_SYMBOL(km_state_expired);
1462 * We send to all registered managers regardless of failure
1463 * We are happy with one success
1465 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1467 int err = -EINVAL, acqret;
1468 struct xfrm_mgr *km;
1470 read_lock(&xfrm_km_lock);
1471 list_for_each_entry(km, &xfrm_km_list, list) {
1472 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1473 if (!acqret)
1474 err = acqret;
1476 read_unlock(&xfrm_km_lock);
1477 return err;
1479 EXPORT_SYMBOL(km_query);
1481 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1483 int err = -EINVAL;
1484 struct xfrm_mgr *km;
1486 read_lock(&xfrm_km_lock);
1487 list_for_each_entry(km, &xfrm_km_list, list) {
1488 if (km->new_mapping)
1489 err = km->new_mapping(x, ipaddr, sport);
1490 if (!err)
1491 break;
1493 read_unlock(&xfrm_km_lock);
1494 return err;
1496 EXPORT_SYMBOL(km_new_mapping);
1498 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1500 struct km_event c;
1502 c.data.hard = hard;
1503 c.pid = pid;
1504 c.event = XFRM_MSG_POLEXPIRE;
1505 km_policy_notify(pol, dir, &c);
1507 if (hard)
1508 wake_up(&km_waitq);
1510 EXPORT_SYMBOL(km_policy_expired);
1512 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1513 struct xfrm_migrate *m, int num_migrate)
1515 int err = -EINVAL;
1516 int ret;
1517 struct xfrm_mgr *km;
1519 read_lock(&xfrm_km_lock);
1520 list_for_each_entry(km, &xfrm_km_list, list) {
1521 if (km->migrate) {
1522 ret = km->migrate(sel, dir, type, m, num_migrate);
1523 if (!ret)
1524 err = ret;
1527 read_unlock(&xfrm_km_lock);
1528 return err;
1530 EXPORT_SYMBOL(km_migrate);
1532 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1534 int err = -EINVAL;
1535 int ret;
1536 struct xfrm_mgr *km;
1538 read_lock(&xfrm_km_lock);
1539 list_for_each_entry(km, &xfrm_km_list, list) {
1540 if (km->report) {
1541 ret = km->report(proto, sel, addr);
1542 if (!ret)
1543 err = ret;
1546 read_unlock(&xfrm_km_lock);
1547 return err;
1549 EXPORT_SYMBOL(km_report);
1551 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1553 int err;
1554 u8 *data;
1555 struct xfrm_mgr *km;
1556 struct xfrm_policy *pol = NULL;
1558 if (optlen <= 0 || optlen > PAGE_SIZE)
1559 return -EMSGSIZE;
1561 data = kmalloc(optlen, GFP_KERNEL);
1562 if (!data)
1563 return -ENOMEM;
1565 err = -EFAULT;
1566 if (copy_from_user(data, optval, optlen))
1567 goto out;
1569 err = -EINVAL;
1570 read_lock(&xfrm_km_lock);
1571 list_for_each_entry(km, &xfrm_km_list, list) {
1572 pol = km->compile_policy(sk, optname, data,
1573 optlen, &err);
1574 if (err >= 0)
1575 break;
1577 read_unlock(&xfrm_km_lock);
1579 if (err >= 0) {
1580 xfrm_sk_policy_insert(sk, err, pol);
1581 xfrm_pol_put(pol);
1582 err = 0;
1585 out:
1586 kfree(data);
1587 return err;
1589 EXPORT_SYMBOL(xfrm_user_policy);
1591 int xfrm_register_km(struct xfrm_mgr *km)
1593 write_lock_bh(&xfrm_km_lock);
1594 list_add_tail(&km->list, &xfrm_km_list);
1595 write_unlock_bh(&xfrm_km_lock);
1596 return 0;
1598 EXPORT_SYMBOL(xfrm_register_km);
1600 int xfrm_unregister_km(struct xfrm_mgr *km)
1602 write_lock_bh(&xfrm_km_lock);
1603 list_del(&km->list);
1604 write_unlock_bh(&xfrm_km_lock);
1605 return 0;
1607 EXPORT_SYMBOL(xfrm_unregister_km);
1609 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1611 int err = 0;
1612 if (unlikely(afinfo == NULL))
1613 return -EINVAL;
1614 if (unlikely(afinfo->family >= NPROTO))
1615 return -EAFNOSUPPORT;
1616 write_lock_bh(&xfrm_state_afinfo_lock);
1617 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1618 err = -ENOBUFS;
1619 else
1620 xfrm_state_afinfo[afinfo->family] = afinfo;
1621 write_unlock_bh(&xfrm_state_afinfo_lock);
1622 return err;
1624 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1626 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1628 int err = 0;
1629 if (unlikely(afinfo == NULL))
1630 return -EINVAL;
1631 if (unlikely(afinfo->family >= NPROTO))
1632 return -EAFNOSUPPORT;
1633 write_lock_bh(&xfrm_state_afinfo_lock);
1634 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1635 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1636 err = -EINVAL;
1637 else
1638 xfrm_state_afinfo[afinfo->family] = NULL;
1640 write_unlock_bh(&xfrm_state_afinfo_lock);
1641 return err;
1643 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1645 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1647 struct xfrm_state_afinfo *afinfo;
1648 if (unlikely(family >= NPROTO))
1649 return NULL;
1650 read_lock(&xfrm_state_afinfo_lock);
1651 afinfo = xfrm_state_afinfo[family];
1652 if (unlikely(!afinfo))
1653 read_unlock(&xfrm_state_afinfo_lock);
1654 return afinfo;
1657 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1659 read_unlock(&xfrm_state_afinfo_lock);
1662 EXPORT_SYMBOL(xfrm_state_get_afinfo);
1663 EXPORT_SYMBOL(xfrm_state_put_afinfo);
1665 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1666 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1668 if (x->tunnel) {
1669 struct xfrm_state *t = x->tunnel;
1671 if (atomic_read(&t->tunnel_users) == 2)
1672 xfrm_state_delete(t);
1673 atomic_dec(&t->tunnel_users);
1674 xfrm_state_put(t);
1675 x->tunnel = NULL;
1678 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1680 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1682 int res;
1684 spin_lock_bh(&x->lock);
1685 if (x->km.state == XFRM_STATE_VALID &&
1686 x->type && x->type->get_mtu)
1687 res = x->type->get_mtu(x, mtu);
1688 else
1689 res = mtu;
1690 spin_unlock_bh(&x->lock);
1691 return res;
1694 int xfrm_init_state(struct xfrm_state *x)
1696 struct xfrm_state_afinfo *afinfo;
1697 int family = x->props.family;
1698 int err;
1700 err = -EAFNOSUPPORT;
1701 afinfo = xfrm_state_get_afinfo(family);
1702 if (!afinfo)
1703 goto error;
1705 err = 0;
1706 if (afinfo->init_flags)
1707 err = afinfo->init_flags(x);
1709 xfrm_state_put_afinfo(afinfo);
1711 if (err)
1712 goto error;
1714 err = -EPROTONOSUPPORT;
1715 x->type = xfrm_get_type(x->id.proto, family);
1716 if (x->type == NULL)
1717 goto error;
1719 err = x->type->init_state(x);
1720 if (err)
1721 goto error;
1723 x->mode = xfrm_get_mode(x->props.mode, family);
1724 if (x->mode == NULL)
1725 goto error;
1727 x->km.state = XFRM_STATE_VALID;
1729 error:
1730 return err;
1733 EXPORT_SYMBOL(xfrm_init_state);
1735 void __init xfrm_state_init(void)
1737 unsigned int sz;
1739 sz = sizeof(struct hlist_head) * 8;
1741 xfrm_state_bydst = xfrm_hash_alloc(sz);
1742 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1743 xfrm_state_byspi = xfrm_hash_alloc(sz);
1744 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1745 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1746 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1748 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);