[IPSEC]: Move xfrm_state_check into xfrm_output.c
[linux-2.6/x86.git] / net / xfrm / xfrm_state.c
blob0ecec34d265a390dbcb0be908b70f276511bda1f
1 /*
2 * xfrm_state.c
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
24 #include "xfrm_hash.h"
26 struct sock *xfrm_nl;
27 EXPORT_SYMBOL(xfrm_nl);
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37 /* Each xfrm_state may be linked to two tables:
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
44 static DEFINE_SPINLOCK(xfrm_state_lock);
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
60 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
61 xfrm_address_t *saddr,
62 u32 reqid,
63 unsigned short family)
65 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
68 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
69 xfrm_address_t *saddr,
70 unsigned short family)
72 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
75 static inline unsigned int
76 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
78 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
81 static void xfrm_hash_transfer(struct hlist_head *list,
82 struct hlist_head *ndsttable,
83 struct hlist_head *nsrctable,
84 struct hlist_head *nspitable,
85 unsigned int nhashmask)
87 struct hlist_node *entry, *tmp;
88 struct xfrm_state *x;
90 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
91 unsigned int h;
93 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
94 x->props.reqid, x->props.family,
95 nhashmask);
96 hlist_add_head(&x->bydst, ndsttable+h);
98 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
99 x->props.family,
100 nhashmask);
101 hlist_add_head(&x->bysrc, nsrctable+h);
103 if (x->id.spi) {
104 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
105 x->id.proto, x->props.family,
106 nhashmask);
107 hlist_add_head(&x->byspi, nspitable+h);
112 static unsigned long xfrm_hash_new_size(void)
114 return ((xfrm_state_hmask + 1) << 1) *
115 sizeof(struct hlist_head);
118 static DEFINE_MUTEX(hash_resize_mutex);
120 static void xfrm_hash_resize(struct work_struct *__unused)
122 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
123 unsigned long nsize, osize;
124 unsigned int nhashmask, ohashmask;
125 int i;
127 mutex_lock(&hash_resize_mutex);
129 nsize = xfrm_hash_new_size();
130 ndst = xfrm_hash_alloc(nsize);
131 if (!ndst)
132 goto out_unlock;
133 nsrc = xfrm_hash_alloc(nsize);
134 if (!nsrc) {
135 xfrm_hash_free(ndst, nsize);
136 goto out_unlock;
138 nspi = xfrm_hash_alloc(nsize);
139 if (!nspi) {
140 xfrm_hash_free(ndst, nsize);
141 xfrm_hash_free(nsrc, nsize);
142 goto out_unlock;
145 spin_lock_bh(&xfrm_state_lock);
147 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
148 for (i = xfrm_state_hmask; i >= 0; i--)
149 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
150 nhashmask);
152 odst = xfrm_state_bydst;
153 osrc = xfrm_state_bysrc;
154 ospi = xfrm_state_byspi;
155 ohashmask = xfrm_state_hmask;
157 xfrm_state_bydst = ndst;
158 xfrm_state_bysrc = nsrc;
159 xfrm_state_byspi = nspi;
160 xfrm_state_hmask = nhashmask;
162 spin_unlock_bh(&xfrm_state_lock);
164 osize = (ohashmask + 1) * sizeof(struct hlist_head);
165 xfrm_hash_free(odst, osize);
166 xfrm_hash_free(osrc, osize);
167 xfrm_hash_free(ospi, osize);
169 out_unlock:
170 mutex_unlock(&hash_resize_mutex);
173 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
175 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
176 EXPORT_SYMBOL(km_waitq);
178 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
179 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
181 static struct work_struct xfrm_state_gc_work;
182 static HLIST_HEAD(xfrm_state_gc_list);
183 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
185 int __xfrm_state_delete(struct xfrm_state *x);
187 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
188 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
190 static void xfrm_state_gc_destroy(struct xfrm_state *x)
192 del_timer_sync(&x->timer);
193 del_timer_sync(&x->rtimer);
194 kfree(x->aalg);
195 kfree(x->ealg);
196 kfree(x->calg);
197 kfree(x->encap);
198 kfree(x->coaddr);
199 if (x->mode)
200 xfrm_put_mode(x->mode);
201 if (x->type) {
202 x->type->destructor(x);
203 xfrm_put_type(x->type);
205 security_xfrm_state_free(x);
206 kfree(x);
209 static void xfrm_state_gc_task(struct work_struct *data)
211 struct xfrm_state *x;
212 struct hlist_node *entry, *tmp;
213 struct hlist_head gc_list;
215 spin_lock_bh(&xfrm_state_gc_lock);
216 gc_list.first = xfrm_state_gc_list.first;
217 INIT_HLIST_HEAD(&xfrm_state_gc_list);
218 spin_unlock_bh(&xfrm_state_gc_lock);
220 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
221 xfrm_state_gc_destroy(x);
223 wake_up(&km_waitq);
226 static inline unsigned long make_jiffies(long secs)
228 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
229 return MAX_SCHEDULE_TIMEOUT-1;
230 else
231 return secs*HZ;
234 static void xfrm_timer_handler(unsigned long data)
236 struct xfrm_state *x = (struct xfrm_state*)data;
237 unsigned long now = get_seconds();
238 long next = LONG_MAX;
239 int warn = 0;
240 int err = 0;
242 spin_lock(&x->lock);
243 if (x->km.state == XFRM_STATE_DEAD)
244 goto out;
245 if (x->km.state == XFRM_STATE_EXPIRED)
246 goto expired;
247 if (x->lft.hard_add_expires_seconds) {
248 long tmo = x->lft.hard_add_expires_seconds +
249 x->curlft.add_time - now;
250 if (tmo <= 0)
251 goto expired;
252 if (tmo < next)
253 next = tmo;
255 if (x->lft.hard_use_expires_seconds) {
256 long tmo = x->lft.hard_use_expires_seconds +
257 (x->curlft.use_time ? : now) - now;
258 if (tmo <= 0)
259 goto expired;
260 if (tmo < next)
261 next = tmo;
263 if (x->km.dying)
264 goto resched;
265 if (x->lft.soft_add_expires_seconds) {
266 long tmo = x->lft.soft_add_expires_seconds +
267 x->curlft.add_time - now;
268 if (tmo <= 0)
269 warn = 1;
270 else if (tmo < next)
271 next = tmo;
273 if (x->lft.soft_use_expires_seconds) {
274 long tmo = x->lft.soft_use_expires_seconds +
275 (x->curlft.use_time ? : now) - now;
276 if (tmo <= 0)
277 warn = 1;
278 else if (tmo < next)
279 next = tmo;
282 x->km.dying = warn;
283 if (warn)
284 km_state_expired(x, 0, 0);
285 resched:
286 if (next != LONG_MAX)
287 mod_timer(&x->timer, jiffies + make_jiffies(next));
289 goto out;
291 expired:
292 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
293 x->km.state = XFRM_STATE_EXPIRED;
294 wake_up(&km_waitq);
295 next = 2;
296 goto resched;
299 err = __xfrm_state_delete(x);
300 if (!err && x->id.spi)
301 km_state_expired(x, 1, 0);
303 xfrm_audit_state_delete(x, err ? 0 : 1,
304 audit_get_loginuid(current->audit_context), 0);
306 out:
307 spin_unlock(&x->lock);
310 static void xfrm_replay_timer_handler(unsigned long data);
312 struct xfrm_state *xfrm_state_alloc(void)
314 struct xfrm_state *x;
316 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
318 if (x) {
319 atomic_set(&x->refcnt, 1);
320 atomic_set(&x->tunnel_users, 0);
321 INIT_HLIST_NODE(&x->bydst);
322 INIT_HLIST_NODE(&x->bysrc);
323 INIT_HLIST_NODE(&x->byspi);
324 init_timer(&x->timer);
325 x->timer.function = xfrm_timer_handler;
326 x->timer.data = (unsigned long)x;
327 init_timer(&x->rtimer);
328 x->rtimer.function = xfrm_replay_timer_handler;
329 x->rtimer.data = (unsigned long)x;
330 x->curlft.add_time = get_seconds();
331 x->lft.soft_byte_limit = XFRM_INF;
332 x->lft.soft_packet_limit = XFRM_INF;
333 x->lft.hard_byte_limit = XFRM_INF;
334 x->lft.hard_packet_limit = XFRM_INF;
335 x->replay_maxage = 0;
336 x->replay_maxdiff = 0;
337 spin_lock_init(&x->lock);
339 return x;
341 EXPORT_SYMBOL(xfrm_state_alloc);
343 void __xfrm_state_destroy(struct xfrm_state *x)
345 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
347 spin_lock_bh(&xfrm_state_gc_lock);
348 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
349 spin_unlock_bh(&xfrm_state_gc_lock);
350 schedule_work(&xfrm_state_gc_work);
352 EXPORT_SYMBOL(__xfrm_state_destroy);
354 int __xfrm_state_delete(struct xfrm_state *x)
356 int err = -ESRCH;
358 if (x->km.state != XFRM_STATE_DEAD) {
359 x->km.state = XFRM_STATE_DEAD;
360 spin_lock(&xfrm_state_lock);
361 hlist_del(&x->bydst);
362 hlist_del(&x->bysrc);
363 if (x->id.spi)
364 hlist_del(&x->byspi);
365 xfrm_state_num--;
366 spin_unlock(&xfrm_state_lock);
368 /* All xfrm_state objects are created by xfrm_state_alloc.
369 * The xfrm_state_alloc call gives a reference, and that
370 * is what we are dropping here.
372 __xfrm_state_put(x);
373 err = 0;
376 return err;
378 EXPORT_SYMBOL(__xfrm_state_delete);
380 int xfrm_state_delete(struct xfrm_state *x)
382 int err;
384 spin_lock_bh(&x->lock);
385 err = __xfrm_state_delete(x);
386 spin_unlock_bh(&x->lock);
388 return err;
390 EXPORT_SYMBOL(xfrm_state_delete);
392 #ifdef CONFIG_SECURITY_NETWORK_XFRM
393 static inline int
394 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
396 int i, err = 0;
398 for (i = 0; i <= xfrm_state_hmask; i++) {
399 struct hlist_node *entry;
400 struct xfrm_state *x;
402 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
403 if (xfrm_id_proto_match(x->id.proto, proto) &&
404 (err = security_xfrm_state_delete(x)) != 0) {
405 xfrm_audit_state_delete(x, 0,
406 audit_info->loginuid,
407 audit_info->secid);
408 return err;
413 return err;
415 #else
416 static inline int
417 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
419 return 0;
421 #endif
423 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
425 int i, err = 0;
427 spin_lock_bh(&xfrm_state_lock);
428 err = xfrm_state_flush_secctx_check(proto, audit_info);
429 if (err)
430 goto out;
432 for (i = 0; i <= xfrm_state_hmask; i++) {
433 struct hlist_node *entry;
434 struct xfrm_state *x;
435 restart:
436 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
437 if (!xfrm_state_kern(x) &&
438 xfrm_id_proto_match(x->id.proto, proto)) {
439 xfrm_state_hold(x);
440 spin_unlock_bh(&xfrm_state_lock);
442 err = xfrm_state_delete(x);
443 xfrm_audit_state_delete(x, err ? 0 : 1,
444 audit_info->loginuid,
445 audit_info->secid);
446 xfrm_state_put(x);
448 spin_lock_bh(&xfrm_state_lock);
449 goto restart;
453 err = 0;
455 out:
456 spin_unlock_bh(&xfrm_state_lock);
457 wake_up(&km_waitq);
458 return err;
460 EXPORT_SYMBOL(xfrm_state_flush);
462 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
464 spin_lock_bh(&xfrm_state_lock);
465 si->sadcnt = xfrm_state_num;
466 si->sadhcnt = xfrm_state_hmask;
467 si->sadhmcnt = xfrm_state_hashmax;
468 spin_unlock_bh(&xfrm_state_lock);
470 EXPORT_SYMBOL(xfrm_sad_getinfo);
472 static int
473 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
474 struct xfrm_tmpl *tmpl,
475 xfrm_address_t *daddr, xfrm_address_t *saddr,
476 unsigned short family)
478 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
479 if (!afinfo)
480 return -1;
481 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
482 xfrm_state_put_afinfo(afinfo);
483 return 0;
486 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
488 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
489 struct xfrm_state *x;
490 struct hlist_node *entry;
492 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
493 if (x->props.family != family ||
494 x->id.spi != spi ||
495 x->id.proto != proto)
496 continue;
498 switch (family) {
499 case AF_INET:
500 if (x->id.daddr.a4 != daddr->a4)
501 continue;
502 break;
503 case AF_INET6:
504 if (!ipv6_addr_equal((struct in6_addr *)daddr,
505 (struct in6_addr *)
506 x->id.daddr.a6))
507 continue;
508 break;
511 xfrm_state_hold(x);
512 return x;
515 return NULL;
518 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
520 unsigned int h = xfrm_src_hash(daddr, saddr, family);
521 struct xfrm_state *x;
522 struct hlist_node *entry;
524 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
525 if (x->props.family != family ||
526 x->id.proto != proto)
527 continue;
529 switch (family) {
530 case AF_INET:
531 if (x->id.daddr.a4 != daddr->a4 ||
532 x->props.saddr.a4 != saddr->a4)
533 continue;
534 break;
535 case AF_INET6:
536 if (!ipv6_addr_equal((struct in6_addr *)daddr,
537 (struct in6_addr *)
538 x->id.daddr.a6) ||
539 !ipv6_addr_equal((struct in6_addr *)saddr,
540 (struct in6_addr *)
541 x->props.saddr.a6))
542 continue;
543 break;
546 xfrm_state_hold(x);
547 return x;
550 return NULL;
553 static inline struct xfrm_state *
554 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
556 if (use_spi)
557 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
558 x->id.proto, family);
559 else
560 return __xfrm_state_lookup_byaddr(&x->id.daddr,
561 &x->props.saddr,
562 x->id.proto, family);
565 static void xfrm_hash_grow_check(int have_hash_collision)
567 if (have_hash_collision &&
568 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
569 xfrm_state_num > xfrm_state_hmask)
570 schedule_work(&xfrm_hash_work);
573 struct xfrm_state *
574 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
575 struct flowi *fl, struct xfrm_tmpl *tmpl,
576 struct xfrm_policy *pol, int *err,
577 unsigned short family)
579 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
580 struct hlist_node *entry;
581 struct xfrm_state *x, *x0;
582 int acquire_in_progress = 0;
583 int error = 0;
584 struct xfrm_state *best = NULL;
586 spin_lock_bh(&xfrm_state_lock);
587 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
588 if (x->props.family == family &&
589 x->props.reqid == tmpl->reqid &&
590 !(x->props.flags & XFRM_STATE_WILDRECV) &&
591 xfrm_state_addr_check(x, daddr, saddr, family) &&
592 tmpl->mode == x->props.mode &&
593 tmpl->id.proto == x->id.proto &&
594 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
595 /* Resolution logic:
596 1. There is a valid state with matching selector.
597 Done.
598 2. Valid state with inappropriate selector. Skip.
600 Entering area of "sysdeps".
602 3. If state is not valid, selector is temporary,
603 it selects only session which triggered
604 previous resolution. Key manager will do
605 something to install a state with proper
606 selector.
608 if (x->km.state == XFRM_STATE_VALID) {
609 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
610 !security_xfrm_state_pol_flow_match(x, pol, fl))
611 continue;
612 if (!best ||
613 best->km.dying > x->km.dying ||
614 (best->km.dying == x->km.dying &&
615 best->curlft.add_time < x->curlft.add_time))
616 best = x;
617 } else if (x->km.state == XFRM_STATE_ACQ) {
618 acquire_in_progress = 1;
619 } else if (x->km.state == XFRM_STATE_ERROR ||
620 x->km.state == XFRM_STATE_EXPIRED) {
621 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
622 security_xfrm_state_pol_flow_match(x, pol, fl))
623 error = -ESRCH;
628 x = best;
629 if (!x && !error && !acquire_in_progress) {
630 if (tmpl->id.spi &&
631 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
632 tmpl->id.proto, family)) != NULL) {
633 xfrm_state_put(x0);
634 error = -EEXIST;
635 goto out;
637 x = xfrm_state_alloc();
638 if (x == NULL) {
639 error = -ENOMEM;
640 goto out;
642 /* Initialize temporary selector matching only
643 * to current session. */
644 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
646 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
647 if (error) {
648 x->km.state = XFRM_STATE_DEAD;
649 xfrm_state_put(x);
650 x = NULL;
651 goto out;
654 if (km_query(x, tmpl, pol) == 0) {
655 x->km.state = XFRM_STATE_ACQ;
656 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
657 h = xfrm_src_hash(daddr, saddr, family);
658 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
659 if (x->id.spi) {
660 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
661 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
663 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
664 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
665 add_timer(&x->timer);
666 xfrm_state_num++;
667 xfrm_hash_grow_check(x->bydst.next != NULL);
668 } else {
669 x->km.state = XFRM_STATE_DEAD;
670 xfrm_state_put(x);
671 x = NULL;
672 error = -ESRCH;
675 out:
676 if (x)
677 xfrm_state_hold(x);
678 else
679 *err = acquire_in_progress ? -EAGAIN : error;
680 spin_unlock_bh(&xfrm_state_lock);
681 return x;
684 struct xfrm_state *
685 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
686 unsigned short family, u8 mode, u8 proto, u32 reqid)
688 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
689 struct xfrm_state *rx = NULL, *x = NULL;
690 struct hlist_node *entry;
692 spin_lock(&xfrm_state_lock);
693 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
694 if (x->props.family == family &&
695 x->props.reqid == reqid &&
696 !(x->props.flags & XFRM_STATE_WILDRECV) &&
697 xfrm_state_addr_check(x, daddr, saddr, family) &&
698 mode == x->props.mode &&
699 proto == x->id.proto &&
700 x->km.state == XFRM_STATE_VALID) {
701 rx = x;
702 break;
706 if (rx)
707 xfrm_state_hold(rx);
708 spin_unlock(&xfrm_state_lock);
711 return rx;
713 EXPORT_SYMBOL(xfrm_stateonly_find);
715 static void __xfrm_state_insert(struct xfrm_state *x)
717 unsigned int h;
719 x->genid = ++xfrm_state_genid;
721 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
722 x->props.reqid, x->props.family);
723 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
725 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
726 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
728 if (x->id.spi) {
729 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
730 x->props.family);
732 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
735 mod_timer(&x->timer, jiffies + HZ);
736 if (x->replay_maxage)
737 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
739 wake_up(&km_waitq);
741 xfrm_state_num++;
743 xfrm_hash_grow_check(x->bydst.next != NULL);
746 /* xfrm_state_lock is held */
747 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
749 unsigned short family = xnew->props.family;
750 u32 reqid = xnew->props.reqid;
751 struct xfrm_state *x;
752 struct hlist_node *entry;
753 unsigned int h;
755 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
756 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
757 if (x->props.family == family &&
758 x->props.reqid == reqid &&
759 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
760 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
761 x->genid = xfrm_state_genid;
765 void xfrm_state_insert(struct xfrm_state *x)
767 spin_lock_bh(&xfrm_state_lock);
768 __xfrm_state_bump_genids(x);
769 __xfrm_state_insert(x);
770 spin_unlock_bh(&xfrm_state_lock);
772 EXPORT_SYMBOL(xfrm_state_insert);
774 /* xfrm_state_lock is held */
775 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
777 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
778 struct hlist_node *entry;
779 struct xfrm_state *x;
781 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
782 if (x->props.reqid != reqid ||
783 x->props.mode != mode ||
784 x->props.family != family ||
785 x->km.state != XFRM_STATE_ACQ ||
786 x->id.spi != 0 ||
787 x->id.proto != proto)
788 continue;
790 switch (family) {
791 case AF_INET:
792 if (x->id.daddr.a4 != daddr->a4 ||
793 x->props.saddr.a4 != saddr->a4)
794 continue;
795 break;
796 case AF_INET6:
797 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
798 (struct in6_addr *)daddr) ||
799 !ipv6_addr_equal((struct in6_addr *)
800 x->props.saddr.a6,
801 (struct in6_addr *)saddr))
802 continue;
803 break;
806 xfrm_state_hold(x);
807 return x;
810 if (!create)
811 return NULL;
813 x = xfrm_state_alloc();
814 if (likely(x)) {
815 switch (family) {
816 case AF_INET:
817 x->sel.daddr.a4 = daddr->a4;
818 x->sel.saddr.a4 = saddr->a4;
819 x->sel.prefixlen_d = 32;
820 x->sel.prefixlen_s = 32;
821 x->props.saddr.a4 = saddr->a4;
822 x->id.daddr.a4 = daddr->a4;
823 break;
825 case AF_INET6:
826 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
827 (struct in6_addr *)daddr);
828 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
829 (struct in6_addr *)saddr);
830 x->sel.prefixlen_d = 128;
831 x->sel.prefixlen_s = 128;
832 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
833 (struct in6_addr *)saddr);
834 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
835 (struct in6_addr *)daddr);
836 break;
839 x->km.state = XFRM_STATE_ACQ;
840 x->id.proto = proto;
841 x->props.family = family;
842 x->props.mode = mode;
843 x->props.reqid = reqid;
844 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
845 xfrm_state_hold(x);
846 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
847 add_timer(&x->timer);
848 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
849 h = xfrm_src_hash(daddr, saddr, family);
850 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
851 wake_up(&km_waitq);
853 xfrm_state_num++;
855 xfrm_hash_grow_check(x->bydst.next != NULL);
858 return x;
861 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
863 int xfrm_state_add(struct xfrm_state *x)
865 struct xfrm_state *x1;
866 int family;
867 int err;
868 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
870 family = x->props.family;
872 spin_lock_bh(&xfrm_state_lock);
874 x1 = __xfrm_state_locate(x, use_spi, family);
875 if (x1) {
876 xfrm_state_put(x1);
877 x1 = NULL;
878 err = -EEXIST;
879 goto out;
882 if (use_spi && x->km.seq) {
883 x1 = __xfrm_find_acq_byseq(x->km.seq);
884 if (x1 && ((x1->id.proto != x->id.proto) ||
885 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
886 xfrm_state_put(x1);
887 x1 = NULL;
891 if (use_spi && !x1)
892 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
893 x->id.proto,
894 &x->id.daddr, &x->props.saddr, 0);
896 __xfrm_state_bump_genids(x);
897 __xfrm_state_insert(x);
898 err = 0;
900 out:
901 spin_unlock_bh(&xfrm_state_lock);
903 if (x1) {
904 xfrm_state_delete(x1);
905 xfrm_state_put(x1);
908 return err;
910 EXPORT_SYMBOL(xfrm_state_add);
912 #ifdef CONFIG_XFRM_MIGRATE
913 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
915 int err = -ENOMEM;
916 struct xfrm_state *x = xfrm_state_alloc();
917 if (!x)
918 goto error;
920 memcpy(&x->id, &orig->id, sizeof(x->id));
921 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
922 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
923 x->props.mode = orig->props.mode;
924 x->props.replay_window = orig->props.replay_window;
925 x->props.reqid = orig->props.reqid;
926 x->props.family = orig->props.family;
927 x->props.saddr = orig->props.saddr;
929 if (orig->aalg) {
930 x->aalg = xfrm_algo_clone(orig->aalg);
931 if (!x->aalg)
932 goto error;
934 x->props.aalgo = orig->props.aalgo;
936 if (orig->ealg) {
937 x->ealg = xfrm_algo_clone(orig->ealg);
938 if (!x->ealg)
939 goto error;
941 x->props.ealgo = orig->props.ealgo;
943 if (orig->calg) {
944 x->calg = xfrm_algo_clone(orig->calg);
945 if (!x->calg)
946 goto error;
948 x->props.calgo = orig->props.calgo;
950 if (orig->encap) {
951 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
952 if (!x->encap)
953 goto error;
956 if (orig->coaddr) {
957 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
958 GFP_KERNEL);
959 if (!x->coaddr)
960 goto error;
963 err = xfrm_init_state(x);
964 if (err)
965 goto error;
967 x->props.flags = orig->props.flags;
969 x->curlft.add_time = orig->curlft.add_time;
970 x->km.state = orig->km.state;
971 x->km.seq = orig->km.seq;
973 return x;
975 error:
976 if (errp)
977 *errp = err;
978 if (x) {
979 kfree(x->aalg);
980 kfree(x->ealg);
981 kfree(x->calg);
982 kfree(x->encap);
983 kfree(x->coaddr);
985 kfree(x);
986 return NULL;
988 EXPORT_SYMBOL(xfrm_state_clone);
990 /* xfrm_state_lock is held */
991 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
993 unsigned int h;
994 struct xfrm_state *x;
995 struct hlist_node *entry;
997 if (m->reqid) {
998 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
999 m->reqid, m->old_family);
1000 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1001 if (x->props.mode != m->mode ||
1002 x->id.proto != m->proto)
1003 continue;
1004 if (m->reqid && x->props.reqid != m->reqid)
1005 continue;
1006 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1007 m->old_family) ||
1008 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1009 m->old_family))
1010 continue;
1011 xfrm_state_hold(x);
1012 return x;
1014 } else {
1015 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1016 m->old_family);
1017 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1018 if (x->props.mode != m->mode ||
1019 x->id.proto != m->proto)
1020 continue;
1021 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1022 m->old_family) ||
1023 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1024 m->old_family))
1025 continue;
1026 xfrm_state_hold(x);
1027 return x;
1031 return NULL;
1033 EXPORT_SYMBOL(xfrm_migrate_state_find);
1035 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1036 struct xfrm_migrate *m)
1038 struct xfrm_state *xc;
1039 int err;
1041 xc = xfrm_state_clone(x, &err);
1042 if (!xc)
1043 return NULL;
1045 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1046 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1048 /* add state */
1049 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1050 /* a care is needed when the destination address of the
1051 state is to be updated as it is a part of triplet */
1052 xfrm_state_insert(xc);
1053 } else {
1054 if ((err = xfrm_state_add(xc)) < 0)
1055 goto error;
1058 return xc;
1059 error:
1060 kfree(xc);
1061 return NULL;
1063 EXPORT_SYMBOL(xfrm_state_migrate);
1064 #endif
1066 int xfrm_state_update(struct xfrm_state *x)
1068 struct xfrm_state *x1;
1069 int err;
1070 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1072 spin_lock_bh(&xfrm_state_lock);
1073 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1075 err = -ESRCH;
1076 if (!x1)
1077 goto out;
1079 if (xfrm_state_kern(x1)) {
1080 xfrm_state_put(x1);
1081 err = -EEXIST;
1082 goto out;
1085 if (x1->km.state == XFRM_STATE_ACQ) {
1086 __xfrm_state_insert(x);
1087 x = NULL;
1089 err = 0;
1091 out:
1092 spin_unlock_bh(&xfrm_state_lock);
1094 if (err)
1095 return err;
1097 if (!x) {
1098 xfrm_state_delete(x1);
1099 xfrm_state_put(x1);
1100 return 0;
1103 err = -EINVAL;
1104 spin_lock_bh(&x1->lock);
1105 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1106 if (x->encap && x1->encap)
1107 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1108 if (x->coaddr && x1->coaddr) {
1109 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1111 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1112 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1113 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1114 x1->km.dying = 0;
1116 mod_timer(&x1->timer, jiffies + HZ);
1117 if (x1->curlft.use_time)
1118 xfrm_state_check_expire(x1);
1120 err = 0;
1122 spin_unlock_bh(&x1->lock);
1124 xfrm_state_put(x1);
1126 return err;
1128 EXPORT_SYMBOL(xfrm_state_update);
1130 int xfrm_state_check_expire(struct xfrm_state *x)
1132 if (!x->curlft.use_time)
1133 x->curlft.use_time = get_seconds();
1135 if (x->km.state != XFRM_STATE_VALID)
1136 return -EINVAL;
1138 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1139 x->curlft.packets >= x->lft.hard_packet_limit) {
1140 x->km.state = XFRM_STATE_EXPIRED;
1141 mod_timer(&x->timer, jiffies);
1142 return -EINVAL;
1145 if (!x->km.dying &&
1146 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1147 x->curlft.packets >= x->lft.soft_packet_limit)) {
1148 x->km.dying = 1;
1149 km_state_expired(x, 0, 0);
1151 return 0;
1153 EXPORT_SYMBOL(xfrm_state_check_expire);
1155 struct xfrm_state *
1156 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1157 unsigned short family)
1159 struct xfrm_state *x;
1161 spin_lock_bh(&xfrm_state_lock);
1162 x = __xfrm_state_lookup(daddr, spi, proto, family);
1163 spin_unlock_bh(&xfrm_state_lock);
1164 return x;
1166 EXPORT_SYMBOL(xfrm_state_lookup);
1168 struct xfrm_state *
1169 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1170 u8 proto, unsigned short family)
1172 struct xfrm_state *x;
1174 spin_lock_bh(&xfrm_state_lock);
1175 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1176 spin_unlock_bh(&xfrm_state_lock);
1177 return x;
1179 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1181 struct xfrm_state *
1182 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1183 xfrm_address_t *daddr, xfrm_address_t *saddr,
1184 int create, unsigned short family)
1186 struct xfrm_state *x;
1188 spin_lock_bh(&xfrm_state_lock);
1189 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1190 spin_unlock_bh(&xfrm_state_lock);
1192 return x;
1194 EXPORT_SYMBOL(xfrm_find_acq);
1196 #ifdef CONFIG_XFRM_SUB_POLICY
1198 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1199 unsigned short family)
1201 int err = 0;
1202 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1203 if (!afinfo)
1204 return -EAFNOSUPPORT;
1206 spin_lock_bh(&xfrm_state_lock);
1207 if (afinfo->tmpl_sort)
1208 err = afinfo->tmpl_sort(dst, src, n);
1209 spin_unlock_bh(&xfrm_state_lock);
1210 xfrm_state_put_afinfo(afinfo);
1211 return err;
1213 EXPORT_SYMBOL(xfrm_tmpl_sort);
1216 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1217 unsigned short family)
1219 int err = 0;
1220 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1221 if (!afinfo)
1222 return -EAFNOSUPPORT;
1224 spin_lock_bh(&xfrm_state_lock);
1225 if (afinfo->state_sort)
1226 err = afinfo->state_sort(dst, src, n);
1227 spin_unlock_bh(&xfrm_state_lock);
1228 xfrm_state_put_afinfo(afinfo);
1229 return err;
1231 EXPORT_SYMBOL(xfrm_state_sort);
1232 #endif
1234 /* Silly enough, but I'm lazy to build resolution list */
1236 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1238 int i;
1240 for (i = 0; i <= xfrm_state_hmask; i++) {
1241 struct hlist_node *entry;
1242 struct xfrm_state *x;
1244 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1245 if (x->km.seq == seq &&
1246 x->km.state == XFRM_STATE_ACQ) {
1247 xfrm_state_hold(x);
1248 return x;
1252 return NULL;
1255 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1257 struct xfrm_state *x;
1259 spin_lock_bh(&xfrm_state_lock);
1260 x = __xfrm_find_acq_byseq(seq);
1261 spin_unlock_bh(&xfrm_state_lock);
1262 return x;
1264 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1266 u32 xfrm_get_acqseq(void)
1268 u32 res;
1269 static u32 acqseq;
1270 static DEFINE_SPINLOCK(acqseq_lock);
1272 spin_lock_bh(&acqseq_lock);
1273 res = (++acqseq ? : ++acqseq);
1274 spin_unlock_bh(&acqseq_lock);
1275 return res;
1277 EXPORT_SYMBOL(xfrm_get_acqseq);
1279 void
1280 xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi)
1282 unsigned int h;
1283 struct xfrm_state *x0;
1285 if (x->id.spi)
1286 return;
1288 if (minspi == maxspi) {
1289 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1290 if (x0) {
1291 xfrm_state_put(x0);
1292 return;
1294 x->id.spi = minspi;
1295 } else {
1296 u32 spi = 0;
1297 u32 low = ntohl(minspi);
1298 u32 high = ntohl(maxspi);
1299 for (h=0; h<high-low+1; h++) {
1300 spi = low + net_random()%(high-low+1);
1301 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1302 if (x0 == NULL) {
1303 x->id.spi = htonl(spi);
1304 break;
1306 xfrm_state_put(x0);
1309 if (x->id.spi) {
1310 spin_lock_bh(&xfrm_state_lock);
1311 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1312 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1313 spin_unlock_bh(&xfrm_state_lock);
1314 wake_up(&km_waitq);
1317 EXPORT_SYMBOL(xfrm_alloc_spi);
1319 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1320 void *data)
1322 int i;
1323 struct xfrm_state *x, *last = NULL;
1324 struct hlist_node *entry;
1325 int count = 0;
1326 int err = 0;
1328 spin_lock_bh(&xfrm_state_lock);
1329 for (i = 0; i <= xfrm_state_hmask; i++) {
1330 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1331 if (!xfrm_id_proto_match(x->id.proto, proto))
1332 continue;
1333 if (last) {
1334 err = func(last, count, data);
1335 if (err)
1336 goto out;
1338 last = x;
1339 count++;
1342 if (count == 0) {
1343 err = -ENOENT;
1344 goto out;
1346 err = func(last, 0, data);
1347 out:
1348 spin_unlock_bh(&xfrm_state_lock);
1349 return err;
1351 EXPORT_SYMBOL(xfrm_state_walk);
1354 void xfrm_replay_notify(struct xfrm_state *x, int event)
1356 struct km_event c;
1357 /* we send notify messages in case
1358 * 1. we updated on of the sequence numbers, and the seqno difference
1359 * is at least x->replay_maxdiff, in this case we also update the
1360 * timeout of our timer function
1361 * 2. if x->replay_maxage has elapsed since last update,
1362 * and there were changes
1364 * The state structure must be locked!
1367 switch (event) {
1368 case XFRM_REPLAY_UPDATE:
1369 if (x->replay_maxdiff &&
1370 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1371 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1372 if (x->xflags & XFRM_TIME_DEFER)
1373 event = XFRM_REPLAY_TIMEOUT;
1374 else
1375 return;
1378 break;
1380 case XFRM_REPLAY_TIMEOUT:
1381 if ((x->replay.seq == x->preplay.seq) &&
1382 (x->replay.bitmap == x->preplay.bitmap) &&
1383 (x->replay.oseq == x->preplay.oseq)) {
1384 x->xflags |= XFRM_TIME_DEFER;
1385 return;
1388 break;
1391 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1392 c.event = XFRM_MSG_NEWAE;
1393 c.data.aevent = event;
1394 km_state_notify(x, &c);
1396 if (x->replay_maxage &&
1397 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1398 x->xflags &= ~XFRM_TIME_DEFER;
1400 EXPORT_SYMBOL(xfrm_replay_notify);
1402 static void xfrm_replay_timer_handler(unsigned long data)
1404 struct xfrm_state *x = (struct xfrm_state*)data;
1406 spin_lock(&x->lock);
1408 if (x->km.state == XFRM_STATE_VALID) {
1409 if (xfrm_aevent_is_on())
1410 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1411 else
1412 x->xflags |= XFRM_TIME_DEFER;
1415 spin_unlock(&x->lock);
1418 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1420 u32 diff;
1421 u32 seq = ntohl(net_seq);
1423 if (unlikely(seq == 0))
1424 return -EINVAL;
1426 if (likely(seq > x->replay.seq))
1427 return 0;
1429 diff = x->replay.seq - seq;
1430 if (diff >= min_t(unsigned int, x->props.replay_window,
1431 sizeof(x->replay.bitmap) * 8)) {
1432 x->stats.replay_window++;
1433 return -EINVAL;
1436 if (x->replay.bitmap & (1U << diff)) {
1437 x->stats.replay++;
1438 return -EINVAL;
1440 return 0;
1442 EXPORT_SYMBOL(xfrm_replay_check);
1444 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1446 u32 diff;
1447 u32 seq = ntohl(net_seq);
1449 if (seq > x->replay.seq) {
1450 diff = seq - x->replay.seq;
1451 if (diff < x->props.replay_window)
1452 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1453 else
1454 x->replay.bitmap = 1;
1455 x->replay.seq = seq;
1456 } else {
1457 diff = x->replay.seq - seq;
1458 x->replay.bitmap |= (1U << diff);
1461 if (xfrm_aevent_is_on())
1462 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1464 EXPORT_SYMBOL(xfrm_replay_advance);
1466 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1467 static DEFINE_RWLOCK(xfrm_km_lock);
1469 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1471 struct xfrm_mgr *km;
1473 read_lock(&xfrm_km_lock);
1474 list_for_each_entry(km, &xfrm_km_list, list)
1475 if (km->notify_policy)
1476 km->notify_policy(xp, dir, c);
1477 read_unlock(&xfrm_km_lock);
1480 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1482 struct xfrm_mgr *km;
1483 read_lock(&xfrm_km_lock);
1484 list_for_each_entry(km, &xfrm_km_list, list)
1485 if (km->notify)
1486 km->notify(x, c);
1487 read_unlock(&xfrm_km_lock);
1490 EXPORT_SYMBOL(km_policy_notify);
1491 EXPORT_SYMBOL(km_state_notify);
1493 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1495 struct km_event c;
1497 c.data.hard = hard;
1498 c.pid = pid;
1499 c.event = XFRM_MSG_EXPIRE;
1500 km_state_notify(x, &c);
1502 if (hard)
1503 wake_up(&km_waitq);
1506 EXPORT_SYMBOL(km_state_expired);
1508 * We send to all registered managers regardless of failure
1509 * We are happy with one success
1511 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1513 int err = -EINVAL, acqret;
1514 struct xfrm_mgr *km;
1516 read_lock(&xfrm_km_lock);
1517 list_for_each_entry(km, &xfrm_km_list, list) {
1518 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1519 if (!acqret)
1520 err = acqret;
1522 read_unlock(&xfrm_km_lock);
1523 return err;
1525 EXPORT_SYMBOL(km_query);
1527 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1529 int err = -EINVAL;
1530 struct xfrm_mgr *km;
1532 read_lock(&xfrm_km_lock);
1533 list_for_each_entry(km, &xfrm_km_list, list) {
1534 if (km->new_mapping)
1535 err = km->new_mapping(x, ipaddr, sport);
1536 if (!err)
1537 break;
1539 read_unlock(&xfrm_km_lock);
1540 return err;
1542 EXPORT_SYMBOL(km_new_mapping);
1544 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1546 struct km_event c;
1548 c.data.hard = hard;
1549 c.pid = pid;
1550 c.event = XFRM_MSG_POLEXPIRE;
1551 km_policy_notify(pol, dir, &c);
1553 if (hard)
1554 wake_up(&km_waitq);
1556 EXPORT_SYMBOL(km_policy_expired);
1558 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1559 struct xfrm_migrate *m, int num_migrate)
1561 int err = -EINVAL;
1562 int ret;
1563 struct xfrm_mgr *km;
1565 read_lock(&xfrm_km_lock);
1566 list_for_each_entry(km, &xfrm_km_list, list) {
1567 if (km->migrate) {
1568 ret = km->migrate(sel, dir, type, m, num_migrate);
1569 if (!ret)
1570 err = ret;
1573 read_unlock(&xfrm_km_lock);
1574 return err;
1576 EXPORT_SYMBOL(km_migrate);
1578 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1580 int err = -EINVAL;
1581 int ret;
1582 struct xfrm_mgr *km;
1584 read_lock(&xfrm_km_lock);
1585 list_for_each_entry(km, &xfrm_km_list, list) {
1586 if (km->report) {
1587 ret = km->report(proto, sel, addr);
1588 if (!ret)
1589 err = ret;
1592 read_unlock(&xfrm_km_lock);
1593 return err;
1595 EXPORT_SYMBOL(km_report);
1597 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1599 int err;
1600 u8 *data;
1601 struct xfrm_mgr *km;
1602 struct xfrm_policy *pol = NULL;
1604 if (optlen <= 0 || optlen > PAGE_SIZE)
1605 return -EMSGSIZE;
1607 data = kmalloc(optlen, GFP_KERNEL);
1608 if (!data)
1609 return -ENOMEM;
1611 err = -EFAULT;
1612 if (copy_from_user(data, optval, optlen))
1613 goto out;
1615 err = -EINVAL;
1616 read_lock(&xfrm_km_lock);
1617 list_for_each_entry(km, &xfrm_km_list, list) {
1618 pol = km->compile_policy(sk, optname, data,
1619 optlen, &err);
1620 if (err >= 0)
1621 break;
1623 read_unlock(&xfrm_km_lock);
1625 if (err >= 0) {
1626 xfrm_sk_policy_insert(sk, err, pol);
1627 xfrm_pol_put(pol);
1628 err = 0;
1631 out:
1632 kfree(data);
1633 return err;
1635 EXPORT_SYMBOL(xfrm_user_policy);
1637 int xfrm_register_km(struct xfrm_mgr *km)
1639 write_lock_bh(&xfrm_km_lock);
1640 list_add_tail(&km->list, &xfrm_km_list);
1641 write_unlock_bh(&xfrm_km_lock);
1642 return 0;
1644 EXPORT_SYMBOL(xfrm_register_km);
1646 int xfrm_unregister_km(struct xfrm_mgr *km)
1648 write_lock_bh(&xfrm_km_lock);
1649 list_del(&km->list);
1650 write_unlock_bh(&xfrm_km_lock);
1651 return 0;
1653 EXPORT_SYMBOL(xfrm_unregister_km);
1655 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1657 int err = 0;
1658 if (unlikely(afinfo == NULL))
1659 return -EINVAL;
1660 if (unlikely(afinfo->family >= NPROTO))
1661 return -EAFNOSUPPORT;
1662 write_lock_bh(&xfrm_state_afinfo_lock);
1663 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1664 err = -ENOBUFS;
1665 else
1666 xfrm_state_afinfo[afinfo->family] = afinfo;
1667 write_unlock_bh(&xfrm_state_afinfo_lock);
1668 return err;
1670 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1672 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1674 int err = 0;
1675 if (unlikely(afinfo == NULL))
1676 return -EINVAL;
1677 if (unlikely(afinfo->family >= NPROTO))
1678 return -EAFNOSUPPORT;
1679 write_lock_bh(&xfrm_state_afinfo_lock);
1680 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1681 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1682 err = -EINVAL;
1683 else
1684 xfrm_state_afinfo[afinfo->family] = NULL;
1686 write_unlock_bh(&xfrm_state_afinfo_lock);
1687 return err;
1689 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1691 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1693 struct xfrm_state_afinfo *afinfo;
1694 if (unlikely(family >= NPROTO))
1695 return NULL;
1696 read_lock(&xfrm_state_afinfo_lock);
1697 afinfo = xfrm_state_afinfo[family];
1698 if (unlikely(!afinfo))
1699 read_unlock(&xfrm_state_afinfo_lock);
1700 return afinfo;
1703 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1705 read_unlock(&xfrm_state_afinfo_lock);
1708 EXPORT_SYMBOL(xfrm_state_get_afinfo);
1709 EXPORT_SYMBOL(xfrm_state_put_afinfo);
1711 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1712 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1714 if (x->tunnel) {
1715 struct xfrm_state *t = x->tunnel;
1717 if (atomic_read(&t->tunnel_users) == 2)
1718 xfrm_state_delete(t);
1719 atomic_dec(&t->tunnel_users);
1720 xfrm_state_put(t);
1721 x->tunnel = NULL;
1724 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1726 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1728 int res;
1730 spin_lock_bh(&x->lock);
1731 if (x->km.state == XFRM_STATE_VALID &&
1732 x->type && x->type->get_mtu)
1733 res = x->type->get_mtu(x, mtu);
1734 else
1735 res = mtu - x->props.header_len;
1736 spin_unlock_bh(&x->lock);
1737 return res;
1740 int xfrm_init_state(struct xfrm_state *x)
1742 struct xfrm_state_afinfo *afinfo;
1743 int family = x->props.family;
1744 int err;
1746 err = -EAFNOSUPPORT;
1747 afinfo = xfrm_state_get_afinfo(family);
1748 if (!afinfo)
1749 goto error;
1751 err = 0;
1752 if (afinfo->init_flags)
1753 err = afinfo->init_flags(x);
1755 xfrm_state_put_afinfo(afinfo);
1757 if (err)
1758 goto error;
1760 err = -EPROTONOSUPPORT;
1761 x->type = xfrm_get_type(x->id.proto, family);
1762 if (x->type == NULL)
1763 goto error;
1765 err = x->type->init_state(x);
1766 if (err)
1767 goto error;
1769 x->mode = xfrm_get_mode(x->props.mode, family);
1770 if (x->mode == NULL)
1771 goto error;
1773 x->km.state = XFRM_STATE_VALID;
1775 error:
1776 return err;
1779 EXPORT_SYMBOL(xfrm_init_state);
1781 void __init xfrm_state_init(void)
1783 unsigned int sz;
1785 sz = sizeof(struct hlist_head) * 8;
1787 xfrm_state_bydst = xfrm_hash_alloc(sz);
1788 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1789 xfrm_state_byspi = xfrm_hash_alloc(sz);
1790 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1791 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1792 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1794 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1797 #ifdef CONFIG_AUDITSYSCALL
1798 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1799 struct audit_buffer *audit_buf)
1801 if (x->security)
1802 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
1803 x->security->ctx_alg, x->security->ctx_doi,
1804 x->security->ctx_str);
1806 switch(x->props.family) {
1807 case AF_INET:
1808 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
1809 NIPQUAD(x->props.saddr.a4),
1810 NIPQUAD(x->id.daddr.a4));
1811 break;
1812 case AF_INET6:
1814 struct in6_addr saddr6, daddr6;
1816 memcpy(&saddr6, x->props.saddr.a6,
1817 sizeof(struct in6_addr));
1818 memcpy(&daddr6, x->id.daddr.a6,
1819 sizeof(struct in6_addr));
1820 audit_log_format(audit_buf,
1821 " src=" NIP6_FMT " dst=" NIP6_FMT,
1822 NIP6(saddr6), NIP6(daddr6));
1824 break;
1828 void
1829 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
1831 struct audit_buffer *audit_buf;
1832 extern int audit_enabled;
1834 if (audit_enabled == 0)
1835 return;
1836 audit_buf = xfrm_audit_start(sid, auid);
1837 if (audit_buf == NULL)
1838 return;
1839 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
1840 xfrm_audit_common_stateinfo(x, audit_buf);
1841 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
1842 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
1843 audit_log_end(audit_buf);
1845 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
1847 void
1848 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
1850 struct audit_buffer *audit_buf;
1851 extern int audit_enabled;
1853 if (audit_enabled == 0)
1854 return;
1855 audit_buf = xfrm_audit_start(sid, auid);
1856 if (audit_buf == NULL)
1857 return;
1858 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
1859 xfrm_audit_common_stateinfo(x, audit_buf);
1860 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
1861 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
1862 audit_log_end(audit_buf);
1864 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
1865 #endif /* CONFIG_AUDITSYSCALL */