arch/sh/mm: Move a dereference below a NULL test
[linux-2.6/mini2440.git] / net / xfrm / xfrm_state.c
blobe25ff62ab2a62676e239cef57e9a288973d1ed25
1 /*
2 * xfrm_state.c
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
27 /* Each xfrm_state may be linked to two tables:
29 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
30 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
31 destination/tunnel endpoint. (output)
34 static DEFINE_SPINLOCK(xfrm_state_lock);
36 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
37 static unsigned int xfrm_state_genid;
39 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
40 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
42 #ifdef CONFIG_AUDITSYSCALL
43 static void xfrm_audit_state_replay(struct xfrm_state *x,
44 struct sk_buff *skb, __be32 net_seq);
45 #else
46 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
47 #endif /* CONFIG_AUDITSYSCALL */
49 static inline unsigned int xfrm_dst_hash(struct net *net,
50 xfrm_address_t *daddr,
51 xfrm_address_t *saddr,
52 u32 reqid,
53 unsigned short family)
55 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
58 static inline unsigned int xfrm_src_hash(struct net *net,
59 xfrm_address_t *daddr,
60 xfrm_address_t *saddr,
61 unsigned short family)
63 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
66 static inline unsigned int
67 xfrm_spi_hash(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
69 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
72 static void xfrm_hash_transfer(struct hlist_head *list,
73 struct hlist_head *ndsttable,
74 struct hlist_head *nsrctable,
75 struct hlist_head *nspitable,
76 unsigned int nhashmask)
78 struct hlist_node *entry, *tmp;
79 struct xfrm_state *x;
81 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
82 unsigned int h;
84 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
85 x->props.reqid, x->props.family,
86 nhashmask);
87 hlist_add_head(&x->bydst, ndsttable+h);
89 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
90 x->props.family,
91 nhashmask);
92 hlist_add_head(&x->bysrc, nsrctable+h);
94 if (x->id.spi) {
95 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
96 x->id.proto, x->props.family,
97 nhashmask);
98 hlist_add_head(&x->byspi, nspitable+h);
103 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
105 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
108 static DEFINE_MUTEX(hash_resize_mutex);
110 static void xfrm_hash_resize(struct work_struct *work)
112 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
113 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
114 unsigned long nsize, osize;
115 unsigned int nhashmask, ohashmask;
116 int i;
118 mutex_lock(&hash_resize_mutex);
120 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
121 ndst = xfrm_hash_alloc(nsize);
122 if (!ndst)
123 goto out_unlock;
124 nsrc = xfrm_hash_alloc(nsize);
125 if (!nsrc) {
126 xfrm_hash_free(ndst, nsize);
127 goto out_unlock;
129 nspi = xfrm_hash_alloc(nsize);
130 if (!nspi) {
131 xfrm_hash_free(ndst, nsize);
132 xfrm_hash_free(nsrc, nsize);
133 goto out_unlock;
136 spin_lock_bh(&xfrm_state_lock);
138 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
139 for (i = net->xfrm.state_hmask; i >= 0; i--)
140 xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi,
141 nhashmask);
143 odst = net->xfrm.state_bydst;
144 osrc = net->xfrm.state_bysrc;
145 ospi = net->xfrm.state_byspi;
146 ohashmask = net->xfrm.state_hmask;
148 net->xfrm.state_bydst = ndst;
149 net->xfrm.state_bysrc = nsrc;
150 net->xfrm.state_byspi = nspi;
151 net->xfrm.state_hmask = nhashmask;
153 spin_unlock_bh(&xfrm_state_lock);
155 osize = (ohashmask + 1) * sizeof(struct hlist_head);
156 xfrm_hash_free(odst, osize);
157 xfrm_hash_free(osrc, osize);
158 xfrm_hash_free(ospi, osize);
160 out_unlock:
161 mutex_unlock(&hash_resize_mutex);
164 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
165 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
167 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
169 int __xfrm_state_delete(struct xfrm_state *x);
171 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
172 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
174 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
176 struct xfrm_state_afinfo *afinfo;
177 if (unlikely(family >= NPROTO))
178 return NULL;
179 write_lock_bh(&xfrm_state_afinfo_lock);
180 afinfo = xfrm_state_afinfo[family];
181 if (unlikely(!afinfo))
182 write_unlock_bh(&xfrm_state_afinfo_lock);
183 return afinfo;
186 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
187 __releases(xfrm_state_afinfo_lock)
189 write_unlock_bh(&xfrm_state_afinfo_lock);
192 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
194 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
195 const struct xfrm_type **typemap;
196 int err = 0;
198 if (unlikely(afinfo == NULL))
199 return -EAFNOSUPPORT;
200 typemap = afinfo->type_map;
202 if (likely(typemap[type->proto] == NULL))
203 typemap[type->proto] = type;
204 else
205 err = -EEXIST;
206 xfrm_state_unlock_afinfo(afinfo);
207 return err;
209 EXPORT_SYMBOL(xfrm_register_type);
211 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
213 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
214 const struct xfrm_type **typemap;
215 int err = 0;
217 if (unlikely(afinfo == NULL))
218 return -EAFNOSUPPORT;
219 typemap = afinfo->type_map;
221 if (unlikely(typemap[type->proto] != type))
222 err = -ENOENT;
223 else
224 typemap[type->proto] = NULL;
225 xfrm_state_unlock_afinfo(afinfo);
226 return err;
228 EXPORT_SYMBOL(xfrm_unregister_type);
230 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
232 struct xfrm_state_afinfo *afinfo;
233 const struct xfrm_type **typemap;
234 const struct xfrm_type *type;
235 int modload_attempted = 0;
237 retry:
238 afinfo = xfrm_state_get_afinfo(family);
239 if (unlikely(afinfo == NULL))
240 return NULL;
241 typemap = afinfo->type_map;
243 type = typemap[proto];
244 if (unlikely(type && !try_module_get(type->owner)))
245 type = NULL;
246 if (!type && !modload_attempted) {
247 xfrm_state_put_afinfo(afinfo);
248 request_module("xfrm-type-%d-%d", family, proto);
249 modload_attempted = 1;
250 goto retry;
253 xfrm_state_put_afinfo(afinfo);
254 return type;
257 static void xfrm_put_type(const struct xfrm_type *type)
259 module_put(type->owner);
262 int xfrm_register_mode(struct xfrm_mode *mode, int family)
264 struct xfrm_state_afinfo *afinfo;
265 struct xfrm_mode **modemap;
266 int err;
268 if (unlikely(mode->encap >= XFRM_MODE_MAX))
269 return -EINVAL;
271 afinfo = xfrm_state_lock_afinfo(family);
272 if (unlikely(afinfo == NULL))
273 return -EAFNOSUPPORT;
275 err = -EEXIST;
276 modemap = afinfo->mode_map;
277 if (modemap[mode->encap])
278 goto out;
280 err = -ENOENT;
281 if (!try_module_get(afinfo->owner))
282 goto out;
284 mode->afinfo = afinfo;
285 modemap[mode->encap] = mode;
286 err = 0;
288 out:
289 xfrm_state_unlock_afinfo(afinfo);
290 return err;
292 EXPORT_SYMBOL(xfrm_register_mode);
294 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
296 struct xfrm_state_afinfo *afinfo;
297 struct xfrm_mode **modemap;
298 int err;
300 if (unlikely(mode->encap >= XFRM_MODE_MAX))
301 return -EINVAL;
303 afinfo = xfrm_state_lock_afinfo(family);
304 if (unlikely(afinfo == NULL))
305 return -EAFNOSUPPORT;
307 err = -ENOENT;
308 modemap = afinfo->mode_map;
309 if (likely(modemap[mode->encap] == mode)) {
310 modemap[mode->encap] = NULL;
311 module_put(mode->afinfo->owner);
312 err = 0;
315 xfrm_state_unlock_afinfo(afinfo);
316 return err;
318 EXPORT_SYMBOL(xfrm_unregister_mode);
320 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
322 struct xfrm_state_afinfo *afinfo;
323 struct xfrm_mode *mode;
324 int modload_attempted = 0;
326 if (unlikely(encap >= XFRM_MODE_MAX))
327 return NULL;
329 retry:
330 afinfo = xfrm_state_get_afinfo(family);
331 if (unlikely(afinfo == NULL))
332 return NULL;
334 mode = afinfo->mode_map[encap];
335 if (unlikely(mode && !try_module_get(mode->owner)))
336 mode = NULL;
337 if (!mode && !modload_attempted) {
338 xfrm_state_put_afinfo(afinfo);
339 request_module("xfrm-mode-%d-%d", family, encap);
340 modload_attempted = 1;
341 goto retry;
344 xfrm_state_put_afinfo(afinfo);
345 return mode;
348 static void xfrm_put_mode(struct xfrm_mode *mode)
350 module_put(mode->owner);
353 static void xfrm_state_gc_destroy(struct xfrm_state *x)
355 del_timer_sync(&x->timer);
356 del_timer_sync(&x->rtimer);
357 kfree(x->aalg);
358 kfree(x->ealg);
359 kfree(x->calg);
360 kfree(x->encap);
361 kfree(x->coaddr);
362 if (x->inner_mode)
363 xfrm_put_mode(x->inner_mode);
364 if (x->inner_mode_iaf)
365 xfrm_put_mode(x->inner_mode_iaf);
366 if (x->outer_mode)
367 xfrm_put_mode(x->outer_mode);
368 if (x->type) {
369 x->type->destructor(x);
370 xfrm_put_type(x->type);
372 security_xfrm_state_free(x);
373 kfree(x);
376 static void xfrm_state_gc_task(struct work_struct *work)
378 struct net *net = container_of(work, struct net, xfrm.state_gc_work);
379 struct xfrm_state *x;
380 struct hlist_node *entry, *tmp;
381 struct hlist_head gc_list;
383 spin_lock_bh(&xfrm_state_gc_lock);
384 hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
385 spin_unlock_bh(&xfrm_state_gc_lock);
387 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist)
388 xfrm_state_gc_destroy(x);
390 wake_up(&net->xfrm.km_waitq);
393 static inline unsigned long make_jiffies(long secs)
395 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
396 return MAX_SCHEDULE_TIMEOUT-1;
397 else
398 return secs*HZ;
401 static void xfrm_timer_handler(unsigned long data)
403 struct xfrm_state *x = (struct xfrm_state*)data;
404 struct net *net = xs_net(x);
405 unsigned long now = get_seconds();
406 long next = LONG_MAX;
407 int warn = 0;
408 int err = 0;
410 spin_lock(&x->lock);
411 if (x->km.state == XFRM_STATE_DEAD)
412 goto out;
413 if (x->km.state == XFRM_STATE_EXPIRED)
414 goto expired;
415 if (x->lft.hard_add_expires_seconds) {
416 long tmo = x->lft.hard_add_expires_seconds +
417 x->curlft.add_time - now;
418 if (tmo <= 0)
419 goto expired;
420 if (tmo < next)
421 next = tmo;
423 if (x->lft.hard_use_expires_seconds) {
424 long tmo = x->lft.hard_use_expires_seconds +
425 (x->curlft.use_time ? : now) - now;
426 if (tmo <= 0)
427 goto expired;
428 if (tmo < next)
429 next = tmo;
431 if (x->km.dying)
432 goto resched;
433 if (x->lft.soft_add_expires_seconds) {
434 long tmo = x->lft.soft_add_expires_seconds +
435 x->curlft.add_time - now;
436 if (tmo <= 0)
437 warn = 1;
438 else if (tmo < next)
439 next = tmo;
441 if (x->lft.soft_use_expires_seconds) {
442 long tmo = x->lft.soft_use_expires_seconds +
443 (x->curlft.use_time ? : now) - now;
444 if (tmo <= 0)
445 warn = 1;
446 else if (tmo < next)
447 next = tmo;
450 x->km.dying = warn;
451 if (warn)
452 km_state_expired(x, 0, 0);
453 resched:
454 if (next != LONG_MAX)
455 mod_timer(&x->timer, jiffies + make_jiffies(next));
457 goto out;
459 expired:
460 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
461 x->km.state = XFRM_STATE_EXPIRED;
462 wake_up(&net->xfrm.km_waitq);
463 next = 2;
464 goto resched;
467 err = __xfrm_state_delete(x);
468 if (!err && x->id.spi)
469 km_state_expired(x, 1, 0);
471 xfrm_audit_state_delete(x, err ? 0 : 1,
472 audit_get_loginuid(current),
473 audit_get_sessionid(current), 0);
475 out:
476 spin_unlock(&x->lock);
479 static void xfrm_replay_timer_handler(unsigned long data);
481 struct xfrm_state *xfrm_state_alloc(struct net *net)
483 struct xfrm_state *x;
485 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
487 if (x) {
488 write_pnet(&x->xs_net, net);
489 atomic_set(&x->refcnt, 1);
490 atomic_set(&x->tunnel_users, 0);
491 INIT_LIST_HEAD(&x->km.all);
492 INIT_HLIST_NODE(&x->bydst);
493 INIT_HLIST_NODE(&x->bysrc);
494 INIT_HLIST_NODE(&x->byspi);
495 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
496 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
497 (unsigned long)x);
498 x->curlft.add_time = get_seconds();
499 x->lft.soft_byte_limit = XFRM_INF;
500 x->lft.soft_packet_limit = XFRM_INF;
501 x->lft.hard_byte_limit = XFRM_INF;
502 x->lft.hard_packet_limit = XFRM_INF;
503 x->replay_maxage = 0;
504 x->replay_maxdiff = 0;
505 x->inner_mode = NULL;
506 x->inner_mode_iaf = NULL;
507 spin_lock_init(&x->lock);
509 return x;
511 EXPORT_SYMBOL(xfrm_state_alloc);
513 void __xfrm_state_destroy(struct xfrm_state *x)
515 struct net *net = xs_net(x);
517 WARN_ON(x->km.state != XFRM_STATE_DEAD);
519 spin_lock_bh(&xfrm_state_gc_lock);
520 hlist_add_head(&x->gclist, &net->xfrm.state_gc_list);
521 spin_unlock_bh(&xfrm_state_gc_lock);
522 schedule_work(&net->xfrm.state_gc_work);
524 EXPORT_SYMBOL(__xfrm_state_destroy);
526 int __xfrm_state_delete(struct xfrm_state *x)
528 struct net *net = xs_net(x);
529 int err = -ESRCH;
531 if (x->km.state != XFRM_STATE_DEAD) {
532 x->km.state = XFRM_STATE_DEAD;
533 spin_lock(&xfrm_state_lock);
534 list_del(&x->km.all);
535 hlist_del(&x->bydst);
536 hlist_del(&x->bysrc);
537 if (x->id.spi)
538 hlist_del(&x->byspi);
539 net->xfrm.state_num--;
540 spin_unlock(&xfrm_state_lock);
542 /* All xfrm_state objects are created by xfrm_state_alloc.
543 * The xfrm_state_alloc call gives a reference, and that
544 * is what we are dropping here.
546 xfrm_state_put(x);
547 err = 0;
550 return err;
552 EXPORT_SYMBOL(__xfrm_state_delete);
554 int xfrm_state_delete(struct xfrm_state *x)
556 int err;
558 spin_lock_bh(&x->lock);
559 err = __xfrm_state_delete(x);
560 spin_unlock_bh(&x->lock);
562 return err;
564 EXPORT_SYMBOL(xfrm_state_delete);
566 #ifdef CONFIG_SECURITY_NETWORK_XFRM
567 static inline int
568 xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info)
570 int i, err = 0;
572 for (i = 0; i <= net->xfrm.state_hmask; i++) {
573 struct hlist_node *entry;
574 struct xfrm_state *x;
576 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
577 if (xfrm_id_proto_match(x->id.proto, proto) &&
578 (err = security_xfrm_state_delete(x)) != 0) {
579 xfrm_audit_state_delete(x, 0,
580 audit_info->loginuid,
581 audit_info->sessionid,
582 audit_info->secid);
583 return err;
588 return err;
590 #else
591 static inline int
592 xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info)
594 return 0;
596 #endif
598 int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
600 int i, err = 0;
602 spin_lock_bh(&xfrm_state_lock);
603 err = xfrm_state_flush_secctx_check(net, proto, audit_info);
604 if (err)
605 goto out;
607 for (i = 0; i <= net->xfrm.state_hmask; i++) {
608 struct hlist_node *entry;
609 struct xfrm_state *x;
610 restart:
611 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
612 if (!xfrm_state_kern(x) &&
613 xfrm_id_proto_match(x->id.proto, proto)) {
614 xfrm_state_hold(x);
615 spin_unlock_bh(&xfrm_state_lock);
617 err = xfrm_state_delete(x);
618 xfrm_audit_state_delete(x, err ? 0 : 1,
619 audit_info->loginuid,
620 audit_info->sessionid,
621 audit_info->secid);
622 xfrm_state_put(x);
624 spin_lock_bh(&xfrm_state_lock);
625 goto restart;
629 err = 0;
631 out:
632 spin_unlock_bh(&xfrm_state_lock);
633 wake_up(&net->xfrm.km_waitq);
634 return err;
636 EXPORT_SYMBOL(xfrm_state_flush);
638 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
640 spin_lock_bh(&xfrm_state_lock);
641 si->sadcnt = init_net.xfrm.state_num;
642 si->sadhcnt = init_net.xfrm.state_hmask;
643 si->sadhmcnt = xfrm_state_hashmax;
644 spin_unlock_bh(&xfrm_state_lock);
646 EXPORT_SYMBOL(xfrm_sad_getinfo);
648 static int
649 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
650 struct xfrm_tmpl *tmpl,
651 xfrm_address_t *daddr, xfrm_address_t *saddr,
652 unsigned short family)
654 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
655 if (!afinfo)
656 return -1;
657 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
658 xfrm_state_put_afinfo(afinfo);
659 return 0;
662 static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
664 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
665 struct xfrm_state *x;
666 struct hlist_node *entry;
668 hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
669 if (x->props.family != family ||
670 x->id.spi != spi ||
671 x->id.proto != proto)
672 continue;
674 switch (family) {
675 case AF_INET:
676 if (x->id.daddr.a4 != daddr->a4)
677 continue;
678 break;
679 case AF_INET6:
680 if (!ipv6_addr_equal((struct in6_addr *)daddr,
681 (struct in6_addr *)
682 x->id.daddr.a6))
683 continue;
684 break;
687 xfrm_state_hold(x);
688 return x;
691 return NULL;
694 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
696 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
697 struct xfrm_state *x;
698 struct hlist_node *entry;
700 hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
701 if (x->props.family != family ||
702 x->id.proto != proto)
703 continue;
705 switch (family) {
706 case AF_INET:
707 if (x->id.daddr.a4 != daddr->a4 ||
708 x->props.saddr.a4 != saddr->a4)
709 continue;
710 break;
711 case AF_INET6:
712 if (!ipv6_addr_equal((struct in6_addr *)daddr,
713 (struct in6_addr *)
714 x->id.daddr.a6) ||
715 !ipv6_addr_equal((struct in6_addr *)saddr,
716 (struct in6_addr *)
717 x->props.saddr.a6))
718 continue;
719 break;
722 xfrm_state_hold(x);
723 return x;
726 return NULL;
729 static inline struct xfrm_state *
730 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
732 struct net *net = xs_net(x);
734 if (use_spi)
735 return __xfrm_state_lookup(net, &x->id.daddr, x->id.spi,
736 x->id.proto, family);
737 else
738 return __xfrm_state_lookup_byaddr(net, &x->id.daddr,
739 &x->props.saddr,
740 x->id.proto, family);
743 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
745 if (have_hash_collision &&
746 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
747 net->xfrm.state_num > net->xfrm.state_hmask)
748 schedule_work(&net->xfrm.state_hash_work);
751 struct xfrm_state *
752 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
753 struct flowi *fl, struct xfrm_tmpl *tmpl,
754 struct xfrm_policy *pol, int *err,
755 unsigned short family)
757 struct net *net = xp_net(pol);
758 unsigned int h;
759 struct hlist_node *entry;
760 struct xfrm_state *x, *x0, *to_put;
761 int acquire_in_progress = 0;
762 int error = 0;
763 struct xfrm_state *best = NULL;
765 to_put = NULL;
767 spin_lock_bh(&xfrm_state_lock);
768 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family);
769 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
770 if (x->props.family == family &&
771 x->props.reqid == tmpl->reqid &&
772 !(x->props.flags & XFRM_STATE_WILDRECV) &&
773 xfrm_state_addr_check(x, daddr, saddr, family) &&
774 tmpl->mode == x->props.mode &&
775 tmpl->id.proto == x->id.proto &&
776 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
777 /* Resolution logic:
778 1. There is a valid state with matching selector.
779 Done.
780 2. Valid state with inappropriate selector. Skip.
782 Entering area of "sysdeps".
784 3. If state is not valid, selector is temporary,
785 it selects only session which triggered
786 previous resolution. Key manager will do
787 something to install a state with proper
788 selector.
790 if (x->km.state == XFRM_STATE_VALID) {
791 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
792 !security_xfrm_state_pol_flow_match(x, pol, fl))
793 continue;
794 if (!best ||
795 best->km.dying > x->km.dying ||
796 (best->km.dying == x->km.dying &&
797 best->curlft.add_time < x->curlft.add_time))
798 best = x;
799 } else if (x->km.state == XFRM_STATE_ACQ) {
800 acquire_in_progress = 1;
801 } else if (x->km.state == XFRM_STATE_ERROR ||
802 x->km.state == XFRM_STATE_EXPIRED) {
803 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
804 security_xfrm_state_pol_flow_match(x, pol, fl))
805 error = -ESRCH;
810 x = best;
811 if (!x && !error && !acquire_in_progress) {
812 if (tmpl->id.spi &&
813 (x0 = __xfrm_state_lookup(net, daddr, tmpl->id.spi,
814 tmpl->id.proto, family)) != NULL) {
815 to_put = x0;
816 error = -EEXIST;
817 goto out;
819 x = xfrm_state_alloc(net);
820 if (x == NULL) {
821 error = -ENOMEM;
822 goto out;
824 /* Initialize temporary selector matching only
825 * to current session. */
826 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
828 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
829 if (error) {
830 x->km.state = XFRM_STATE_DEAD;
831 to_put = x;
832 x = NULL;
833 goto out;
836 if (km_query(x, tmpl, pol) == 0) {
837 x->km.state = XFRM_STATE_ACQ;
838 list_add(&x->km.all, &net->xfrm.state_all);
839 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
840 h = xfrm_src_hash(net, daddr, saddr, family);
841 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
842 if (x->id.spi) {
843 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family);
844 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
846 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
847 x->timer.expires = jiffies + net->xfrm.sysctl_acq_expires*HZ;
848 add_timer(&x->timer);
849 net->xfrm.state_num++;
850 xfrm_hash_grow_check(net, x->bydst.next != NULL);
851 } else {
852 x->km.state = XFRM_STATE_DEAD;
853 to_put = x;
854 x = NULL;
855 error = -ESRCH;
858 out:
859 if (x)
860 xfrm_state_hold(x);
861 else
862 *err = acquire_in_progress ? -EAGAIN : error;
863 spin_unlock_bh(&xfrm_state_lock);
864 if (to_put)
865 xfrm_state_put(to_put);
866 return x;
869 struct xfrm_state *
870 xfrm_stateonly_find(struct net *net,
871 xfrm_address_t *daddr, xfrm_address_t *saddr,
872 unsigned short family, u8 mode, u8 proto, u32 reqid)
874 unsigned int h;
875 struct xfrm_state *rx = NULL, *x = NULL;
876 struct hlist_node *entry;
878 spin_lock(&xfrm_state_lock);
879 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
880 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
881 if (x->props.family == family &&
882 x->props.reqid == reqid &&
883 !(x->props.flags & XFRM_STATE_WILDRECV) &&
884 xfrm_state_addr_check(x, daddr, saddr, family) &&
885 mode == x->props.mode &&
886 proto == x->id.proto &&
887 x->km.state == XFRM_STATE_VALID) {
888 rx = x;
889 break;
893 if (rx)
894 xfrm_state_hold(rx);
895 spin_unlock(&xfrm_state_lock);
898 return rx;
900 EXPORT_SYMBOL(xfrm_stateonly_find);
902 static void __xfrm_state_insert(struct xfrm_state *x)
904 struct net *net = xs_net(x);
905 unsigned int h;
907 x->genid = ++xfrm_state_genid;
909 list_add(&x->km.all, &net->xfrm.state_all);
911 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
912 x->props.reqid, x->props.family);
913 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
915 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
916 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
918 if (x->id.spi) {
919 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
920 x->props.family);
922 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
925 mod_timer(&x->timer, jiffies + HZ);
926 if (x->replay_maxage)
927 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
929 wake_up(&net->xfrm.km_waitq);
931 net->xfrm.state_num++;
933 xfrm_hash_grow_check(net, x->bydst.next != NULL);
936 /* xfrm_state_lock is held */
937 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
939 struct net *net = xs_net(xnew);
940 unsigned short family = xnew->props.family;
941 u32 reqid = xnew->props.reqid;
942 struct xfrm_state *x;
943 struct hlist_node *entry;
944 unsigned int h;
946 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
947 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
948 if (x->props.family == family &&
949 x->props.reqid == reqid &&
950 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
951 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
952 x->genid = xfrm_state_genid;
956 void xfrm_state_insert(struct xfrm_state *x)
958 spin_lock_bh(&xfrm_state_lock);
959 __xfrm_state_bump_genids(x);
960 __xfrm_state_insert(x);
961 spin_unlock_bh(&xfrm_state_lock);
963 EXPORT_SYMBOL(xfrm_state_insert);
965 /* xfrm_state_lock is held */
966 static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
968 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
969 struct hlist_node *entry;
970 struct xfrm_state *x;
972 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
973 if (x->props.reqid != reqid ||
974 x->props.mode != mode ||
975 x->props.family != family ||
976 x->km.state != XFRM_STATE_ACQ ||
977 x->id.spi != 0 ||
978 x->id.proto != proto)
979 continue;
981 switch (family) {
982 case AF_INET:
983 if (x->id.daddr.a4 != daddr->a4 ||
984 x->props.saddr.a4 != saddr->a4)
985 continue;
986 break;
987 case AF_INET6:
988 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
989 (struct in6_addr *)daddr) ||
990 !ipv6_addr_equal((struct in6_addr *)
991 x->props.saddr.a6,
992 (struct in6_addr *)saddr))
993 continue;
994 break;
997 xfrm_state_hold(x);
998 return x;
1001 if (!create)
1002 return NULL;
1004 x = xfrm_state_alloc(net);
1005 if (likely(x)) {
1006 switch (family) {
1007 case AF_INET:
1008 x->sel.daddr.a4 = daddr->a4;
1009 x->sel.saddr.a4 = saddr->a4;
1010 x->sel.prefixlen_d = 32;
1011 x->sel.prefixlen_s = 32;
1012 x->props.saddr.a4 = saddr->a4;
1013 x->id.daddr.a4 = daddr->a4;
1014 break;
1016 case AF_INET6:
1017 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1018 (struct in6_addr *)daddr);
1019 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1020 (struct in6_addr *)saddr);
1021 x->sel.prefixlen_d = 128;
1022 x->sel.prefixlen_s = 128;
1023 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1024 (struct in6_addr *)saddr);
1025 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1026 (struct in6_addr *)daddr);
1027 break;
1030 x->km.state = XFRM_STATE_ACQ;
1031 x->id.proto = proto;
1032 x->props.family = family;
1033 x->props.mode = mode;
1034 x->props.reqid = reqid;
1035 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1036 xfrm_state_hold(x);
1037 x->timer.expires = jiffies + net->xfrm.sysctl_acq_expires*HZ;
1038 add_timer(&x->timer);
1039 list_add(&x->km.all, &net->xfrm.state_all);
1040 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
1041 h = xfrm_src_hash(net, daddr, saddr, family);
1042 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
1044 net->xfrm.state_num++;
1046 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1049 return x;
1052 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq);
1054 int xfrm_state_add(struct xfrm_state *x)
1056 struct net *net = xs_net(x);
1057 struct xfrm_state *x1, *to_put;
1058 int family;
1059 int err;
1060 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1062 family = x->props.family;
1064 to_put = NULL;
1066 spin_lock_bh(&xfrm_state_lock);
1068 x1 = __xfrm_state_locate(x, use_spi, family);
1069 if (x1) {
1070 to_put = x1;
1071 x1 = NULL;
1072 err = -EEXIST;
1073 goto out;
1076 if (use_spi && x->km.seq) {
1077 x1 = __xfrm_find_acq_byseq(net, x->km.seq);
1078 if (x1 && ((x1->id.proto != x->id.proto) ||
1079 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1080 to_put = x1;
1081 x1 = NULL;
1085 if (use_spi && !x1)
1086 x1 = __find_acq_core(net, family, x->props.mode, x->props.reqid,
1087 x->id.proto,
1088 &x->id.daddr, &x->props.saddr, 0);
1090 __xfrm_state_bump_genids(x);
1091 __xfrm_state_insert(x);
1092 err = 0;
1094 out:
1095 spin_unlock_bh(&xfrm_state_lock);
1097 if (x1) {
1098 xfrm_state_delete(x1);
1099 xfrm_state_put(x1);
1102 if (to_put)
1103 xfrm_state_put(to_put);
1105 return err;
1107 EXPORT_SYMBOL(xfrm_state_add);
1109 #ifdef CONFIG_XFRM_MIGRATE
1110 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1112 struct net *net = xs_net(orig);
1113 int err = -ENOMEM;
1114 struct xfrm_state *x = xfrm_state_alloc(net);
1115 if (!x)
1116 goto error;
1118 memcpy(&x->id, &orig->id, sizeof(x->id));
1119 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1120 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1121 x->props.mode = orig->props.mode;
1122 x->props.replay_window = orig->props.replay_window;
1123 x->props.reqid = orig->props.reqid;
1124 x->props.family = orig->props.family;
1125 x->props.saddr = orig->props.saddr;
1127 if (orig->aalg) {
1128 x->aalg = xfrm_algo_clone(orig->aalg);
1129 if (!x->aalg)
1130 goto error;
1132 x->props.aalgo = orig->props.aalgo;
1134 if (orig->ealg) {
1135 x->ealg = xfrm_algo_clone(orig->ealg);
1136 if (!x->ealg)
1137 goto error;
1139 x->props.ealgo = orig->props.ealgo;
1141 if (orig->calg) {
1142 x->calg = xfrm_algo_clone(orig->calg);
1143 if (!x->calg)
1144 goto error;
1146 x->props.calgo = orig->props.calgo;
1148 if (orig->encap) {
1149 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1150 if (!x->encap)
1151 goto error;
1154 if (orig->coaddr) {
1155 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1156 GFP_KERNEL);
1157 if (!x->coaddr)
1158 goto error;
1161 err = xfrm_init_state(x);
1162 if (err)
1163 goto error;
1165 x->props.flags = orig->props.flags;
1167 x->curlft.add_time = orig->curlft.add_time;
1168 x->km.state = orig->km.state;
1169 x->km.seq = orig->km.seq;
1171 return x;
1173 error:
1174 if (errp)
1175 *errp = err;
1176 if (x) {
1177 kfree(x->aalg);
1178 kfree(x->ealg);
1179 kfree(x->calg);
1180 kfree(x->encap);
1181 kfree(x->coaddr);
1183 kfree(x);
1184 return NULL;
1187 /* xfrm_state_lock is held */
1188 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1190 unsigned int h;
1191 struct xfrm_state *x;
1192 struct hlist_node *entry;
1194 if (m->reqid) {
1195 h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr,
1196 m->reqid, m->old_family);
1197 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
1198 if (x->props.mode != m->mode ||
1199 x->id.proto != m->proto)
1200 continue;
1201 if (m->reqid && x->props.reqid != m->reqid)
1202 continue;
1203 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1204 m->old_family) ||
1205 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1206 m->old_family))
1207 continue;
1208 xfrm_state_hold(x);
1209 return x;
1211 } else {
1212 h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr,
1213 m->old_family);
1214 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
1215 if (x->props.mode != m->mode ||
1216 x->id.proto != m->proto)
1217 continue;
1218 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1219 m->old_family) ||
1220 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1221 m->old_family))
1222 continue;
1223 xfrm_state_hold(x);
1224 return x;
1228 return NULL;
1230 EXPORT_SYMBOL(xfrm_migrate_state_find);
1232 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1233 struct xfrm_migrate *m)
1235 struct xfrm_state *xc;
1236 int err;
1238 xc = xfrm_state_clone(x, &err);
1239 if (!xc)
1240 return NULL;
1242 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1243 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1245 /* add state */
1246 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1247 /* a care is needed when the destination address of the
1248 state is to be updated as it is a part of triplet */
1249 xfrm_state_insert(xc);
1250 } else {
1251 if ((err = xfrm_state_add(xc)) < 0)
1252 goto error;
1255 return xc;
1256 error:
1257 kfree(xc);
1258 return NULL;
1260 EXPORT_SYMBOL(xfrm_state_migrate);
1261 #endif
1263 int xfrm_state_update(struct xfrm_state *x)
1265 struct xfrm_state *x1, *to_put;
1266 int err;
1267 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1269 to_put = NULL;
1271 spin_lock_bh(&xfrm_state_lock);
1272 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1274 err = -ESRCH;
1275 if (!x1)
1276 goto out;
1278 if (xfrm_state_kern(x1)) {
1279 to_put = x1;
1280 err = -EEXIST;
1281 goto out;
1284 if (x1->km.state == XFRM_STATE_ACQ) {
1285 __xfrm_state_insert(x);
1286 x = NULL;
1288 err = 0;
1290 out:
1291 spin_unlock_bh(&xfrm_state_lock);
1293 if (to_put)
1294 xfrm_state_put(to_put);
1296 if (err)
1297 return err;
1299 if (!x) {
1300 xfrm_state_delete(x1);
1301 xfrm_state_put(x1);
1302 return 0;
1305 err = -EINVAL;
1306 spin_lock_bh(&x1->lock);
1307 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1308 if (x->encap && x1->encap)
1309 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1310 if (x->coaddr && x1->coaddr) {
1311 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1313 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1314 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1315 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1316 x1->km.dying = 0;
1318 mod_timer(&x1->timer, jiffies + HZ);
1319 if (x1->curlft.use_time)
1320 xfrm_state_check_expire(x1);
1322 err = 0;
1324 spin_unlock_bh(&x1->lock);
1326 xfrm_state_put(x1);
1328 return err;
1330 EXPORT_SYMBOL(xfrm_state_update);
1332 int xfrm_state_check_expire(struct xfrm_state *x)
1334 if (!x->curlft.use_time)
1335 x->curlft.use_time = get_seconds();
1337 if (x->km.state != XFRM_STATE_VALID)
1338 return -EINVAL;
1340 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1341 x->curlft.packets >= x->lft.hard_packet_limit) {
1342 x->km.state = XFRM_STATE_EXPIRED;
1343 mod_timer(&x->timer, jiffies);
1344 return -EINVAL;
1347 if (!x->km.dying &&
1348 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1349 x->curlft.packets >= x->lft.soft_packet_limit)) {
1350 x->km.dying = 1;
1351 km_state_expired(x, 0, 0);
1353 return 0;
1355 EXPORT_SYMBOL(xfrm_state_check_expire);
1357 struct xfrm_state *
1358 xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto,
1359 unsigned short family)
1361 struct xfrm_state *x;
1363 spin_lock_bh(&xfrm_state_lock);
1364 x = __xfrm_state_lookup(net, daddr, spi, proto, family);
1365 spin_unlock_bh(&xfrm_state_lock);
1366 return x;
1368 EXPORT_SYMBOL(xfrm_state_lookup);
1370 struct xfrm_state *
1371 xfrm_state_lookup_byaddr(struct net *net,
1372 xfrm_address_t *daddr, xfrm_address_t *saddr,
1373 u8 proto, unsigned short family)
1375 struct xfrm_state *x;
1377 spin_lock_bh(&xfrm_state_lock);
1378 x = __xfrm_state_lookup_byaddr(net, daddr, saddr, proto, family);
1379 spin_unlock_bh(&xfrm_state_lock);
1380 return x;
1382 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1384 struct xfrm_state *
1385 xfrm_find_acq(struct net *net, u8 mode, u32 reqid, u8 proto,
1386 xfrm_address_t *daddr, xfrm_address_t *saddr,
1387 int create, unsigned short family)
1389 struct xfrm_state *x;
1391 spin_lock_bh(&xfrm_state_lock);
1392 x = __find_acq_core(net, family, mode, reqid, proto, daddr, saddr, create);
1393 spin_unlock_bh(&xfrm_state_lock);
1395 return x;
1397 EXPORT_SYMBOL(xfrm_find_acq);
1399 #ifdef CONFIG_XFRM_SUB_POLICY
1401 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1402 unsigned short family)
1404 int err = 0;
1405 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1406 if (!afinfo)
1407 return -EAFNOSUPPORT;
1409 spin_lock_bh(&xfrm_state_lock);
1410 if (afinfo->tmpl_sort)
1411 err = afinfo->tmpl_sort(dst, src, n);
1412 spin_unlock_bh(&xfrm_state_lock);
1413 xfrm_state_put_afinfo(afinfo);
1414 return err;
1416 EXPORT_SYMBOL(xfrm_tmpl_sort);
1419 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1420 unsigned short family)
1422 int err = 0;
1423 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1424 if (!afinfo)
1425 return -EAFNOSUPPORT;
1427 spin_lock_bh(&xfrm_state_lock);
1428 if (afinfo->state_sort)
1429 err = afinfo->state_sort(dst, src, n);
1430 spin_unlock_bh(&xfrm_state_lock);
1431 xfrm_state_put_afinfo(afinfo);
1432 return err;
1434 EXPORT_SYMBOL(xfrm_state_sort);
1435 #endif
1437 /* Silly enough, but I'm lazy to build resolution list */
1439 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
1441 int i;
1443 for (i = 0; i <= net->xfrm.state_hmask; i++) {
1444 struct hlist_node *entry;
1445 struct xfrm_state *x;
1447 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
1448 if (x->km.seq == seq &&
1449 x->km.state == XFRM_STATE_ACQ) {
1450 xfrm_state_hold(x);
1451 return x;
1455 return NULL;
1458 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq)
1460 struct xfrm_state *x;
1462 spin_lock_bh(&xfrm_state_lock);
1463 x = __xfrm_find_acq_byseq(net, seq);
1464 spin_unlock_bh(&xfrm_state_lock);
1465 return x;
1467 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1469 u32 xfrm_get_acqseq(void)
1471 u32 res;
1472 static u32 acqseq;
1473 static DEFINE_SPINLOCK(acqseq_lock);
1475 spin_lock_bh(&acqseq_lock);
1476 res = (++acqseq ? : ++acqseq);
1477 spin_unlock_bh(&acqseq_lock);
1478 return res;
1480 EXPORT_SYMBOL(xfrm_get_acqseq);
1482 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1484 struct net *net = xs_net(x);
1485 unsigned int h;
1486 struct xfrm_state *x0;
1487 int err = -ENOENT;
1488 __be32 minspi = htonl(low);
1489 __be32 maxspi = htonl(high);
1491 spin_lock_bh(&x->lock);
1492 if (x->km.state == XFRM_STATE_DEAD)
1493 goto unlock;
1495 err = 0;
1496 if (x->id.spi)
1497 goto unlock;
1499 err = -ENOENT;
1501 if (minspi == maxspi) {
1502 x0 = xfrm_state_lookup(net, &x->id.daddr, minspi, x->id.proto, x->props.family);
1503 if (x0) {
1504 xfrm_state_put(x0);
1505 goto unlock;
1507 x->id.spi = minspi;
1508 } else {
1509 u32 spi = 0;
1510 for (h=0; h<high-low+1; h++) {
1511 spi = low + net_random()%(high-low+1);
1512 x0 = xfrm_state_lookup(net, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1513 if (x0 == NULL) {
1514 x->id.spi = htonl(spi);
1515 break;
1517 xfrm_state_put(x0);
1520 if (x->id.spi) {
1521 spin_lock_bh(&xfrm_state_lock);
1522 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1523 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
1524 spin_unlock_bh(&xfrm_state_lock);
1526 err = 0;
1529 unlock:
1530 spin_unlock_bh(&x->lock);
1532 return err;
1534 EXPORT_SYMBOL(xfrm_alloc_spi);
1536 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1537 int (*func)(struct xfrm_state *, int, void*),
1538 void *data)
1540 struct xfrm_state *state;
1541 struct xfrm_state_walk *x;
1542 int err = 0;
1544 if (walk->seq != 0 && list_empty(&walk->all))
1545 return 0;
1547 spin_lock_bh(&xfrm_state_lock);
1548 if (list_empty(&walk->all))
1549 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
1550 else
1551 x = list_entry(&walk->all, struct xfrm_state_walk, all);
1552 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
1553 if (x->state == XFRM_STATE_DEAD)
1554 continue;
1555 state = container_of(x, struct xfrm_state, km);
1556 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1557 continue;
1558 err = func(state, walk->seq, data);
1559 if (err) {
1560 list_move_tail(&walk->all, &x->all);
1561 goto out;
1563 walk->seq++;
1565 if (walk->seq == 0) {
1566 err = -ENOENT;
1567 goto out;
1569 list_del_init(&walk->all);
1570 out:
1571 spin_unlock_bh(&xfrm_state_lock);
1572 return err;
1574 EXPORT_SYMBOL(xfrm_state_walk);
1576 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
1578 INIT_LIST_HEAD(&walk->all);
1579 walk->proto = proto;
1580 walk->state = XFRM_STATE_DEAD;
1581 walk->seq = 0;
1583 EXPORT_SYMBOL(xfrm_state_walk_init);
1585 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1587 if (list_empty(&walk->all))
1588 return;
1590 spin_lock_bh(&xfrm_state_lock);
1591 list_del(&walk->all);
1592 spin_lock_bh(&xfrm_state_lock);
1594 EXPORT_SYMBOL(xfrm_state_walk_done);
1597 void xfrm_replay_notify(struct xfrm_state *x, int event)
1599 struct km_event c;
1600 /* we send notify messages in case
1601 * 1. we updated on of the sequence numbers, and the seqno difference
1602 * is at least x->replay_maxdiff, in this case we also update the
1603 * timeout of our timer function
1604 * 2. if x->replay_maxage has elapsed since last update,
1605 * and there were changes
1607 * The state structure must be locked!
1610 switch (event) {
1611 case XFRM_REPLAY_UPDATE:
1612 if (x->replay_maxdiff &&
1613 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1614 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1615 if (x->xflags & XFRM_TIME_DEFER)
1616 event = XFRM_REPLAY_TIMEOUT;
1617 else
1618 return;
1621 break;
1623 case XFRM_REPLAY_TIMEOUT:
1624 if ((x->replay.seq == x->preplay.seq) &&
1625 (x->replay.bitmap == x->preplay.bitmap) &&
1626 (x->replay.oseq == x->preplay.oseq)) {
1627 x->xflags |= XFRM_TIME_DEFER;
1628 return;
1631 break;
1634 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1635 c.event = XFRM_MSG_NEWAE;
1636 c.data.aevent = event;
1637 km_state_notify(x, &c);
1639 if (x->replay_maxage &&
1640 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1641 x->xflags &= ~XFRM_TIME_DEFER;
1644 static void xfrm_replay_timer_handler(unsigned long data)
1646 struct xfrm_state *x = (struct xfrm_state*)data;
1648 spin_lock(&x->lock);
1650 if (x->km.state == XFRM_STATE_VALID) {
1651 if (xfrm_aevent_is_on(xs_net(x)))
1652 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1653 else
1654 x->xflags |= XFRM_TIME_DEFER;
1657 spin_unlock(&x->lock);
1660 int xfrm_replay_check(struct xfrm_state *x,
1661 struct sk_buff *skb, __be32 net_seq)
1663 u32 diff;
1664 u32 seq = ntohl(net_seq);
1666 if (unlikely(seq == 0))
1667 goto err;
1669 if (likely(seq > x->replay.seq))
1670 return 0;
1672 diff = x->replay.seq - seq;
1673 if (diff >= min_t(unsigned int, x->props.replay_window,
1674 sizeof(x->replay.bitmap) * 8)) {
1675 x->stats.replay_window++;
1676 goto err;
1679 if (x->replay.bitmap & (1U << diff)) {
1680 x->stats.replay++;
1681 goto err;
1683 return 0;
1685 err:
1686 xfrm_audit_state_replay(x, skb, net_seq);
1687 return -EINVAL;
1690 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1692 u32 diff;
1693 u32 seq = ntohl(net_seq);
1695 if (seq > x->replay.seq) {
1696 diff = seq - x->replay.seq;
1697 if (diff < x->props.replay_window)
1698 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1699 else
1700 x->replay.bitmap = 1;
1701 x->replay.seq = seq;
1702 } else {
1703 diff = x->replay.seq - seq;
1704 x->replay.bitmap |= (1U << diff);
1707 if (xfrm_aevent_is_on(xs_net(x)))
1708 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1711 static LIST_HEAD(xfrm_km_list);
1712 static DEFINE_RWLOCK(xfrm_km_lock);
1714 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1716 struct xfrm_mgr *km;
1718 read_lock(&xfrm_km_lock);
1719 list_for_each_entry(km, &xfrm_km_list, list)
1720 if (km->notify_policy)
1721 km->notify_policy(xp, dir, c);
1722 read_unlock(&xfrm_km_lock);
1725 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1727 struct xfrm_mgr *km;
1728 read_lock(&xfrm_km_lock);
1729 list_for_each_entry(km, &xfrm_km_list, list)
1730 if (km->notify)
1731 km->notify(x, c);
1732 read_unlock(&xfrm_km_lock);
1735 EXPORT_SYMBOL(km_policy_notify);
1736 EXPORT_SYMBOL(km_state_notify);
1738 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1740 struct net *net = xs_net(x);
1741 struct km_event c;
1743 c.data.hard = hard;
1744 c.pid = pid;
1745 c.event = XFRM_MSG_EXPIRE;
1746 km_state_notify(x, &c);
1748 if (hard)
1749 wake_up(&net->xfrm.km_waitq);
1752 EXPORT_SYMBOL(km_state_expired);
1754 * We send to all registered managers regardless of failure
1755 * We are happy with one success
1757 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1759 int err = -EINVAL, acqret;
1760 struct xfrm_mgr *km;
1762 read_lock(&xfrm_km_lock);
1763 list_for_each_entry(km, &xfrm_km_list, list) {
1764 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1765 if (!acqret)
1766 err = acqret;
1768 read_unlock(&xfrm_km_lock);
1769 return err;
1771 EXPORT_SYMBOL(km_query);
1773 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1775 int err = -EINVAL;
1776 struct xfrm_mgr *km;
1778 read_lock(&xfrm_km_lock);
1779 list_for_each_entry(km, &xfrm_km_list, list) {
1780 if (km->new_mapping)
1781 err = km->new_mapping(x, ipaddr, sport);
1782 if (!err)
1783 break;
1785 read_unlock(&xfrm_km_lock);
1786 return err;
1788 EXPORT_SYMBOL(km_new_mapping);
1790 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1792 struct net *net = xp_net(pol);
1793 struct km_event c;
1795 c.data.hard = hard;
1796 c.pid = pid;
1797 c.event = XFRM_MSG_POLEXPIRE;
1798 km_policy_notify(pol, dir, &c);
1800 if (hard)
1801 wake_up(&net->xfrm.km_waitq);
1803 EXPORT_SYMBOL(km_policy_expired);
1805 #ifdef CONFIG_XFRM_MIGRATE
1806 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1807 struct xfrm_migrate *m, int num_migrate,
1808 struct xfrm_kmaddress *k)
1810 int err = -EINVAL;
1811 int ret;
1812 struct xfrm_mgr *km;
1814 read_lock(&xfrm_km_lock);
1815 list_for_each_entry(km, &xfrm_km_list, list) {
1816 if (km->migrate) {
1817 ret = km->migrate(sel, dir, type, m, num_migrate, k);
1818 if (!ret)
1819 err = ret;
1822 read_unlock(&xfrm_km_lock);
1823 return err;
1825 EXPORT_SYMBOL(km_migrate);
1826 #endif
1828 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1830 int err = -EINVAL;
1831 int ret;
1832 struct xfrm_mgr *km;
1834 read_lock(&xfrm_km_lock);
1835 list_for_each_entry(km, &xfrm_km_list, list) {
1836 if (km->report) {
1837 ret = km->report(net, proto, sel, addr);
1838 if (!ret)
1839 err = ret;
1842 read_unlock(&xfrm_km_lock);
1843 return err;
1845 EXPORT_SYMBOL(km_report);
1847 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1849 int err;
1850 u8 *data;
1851 struct xfrm_mgr *km;
1852 struct xfrm_policy *pol = NULL;
1854 if (optlen <= 0 || optlen > PAGE_SIZE)
1855 return -EMSGSIZE;
1857 data = kmalloc(optlen, GFP_KERNEL);
1858 if (!data)
1859 return -ENOMEM;
1861 err = -EFAULT;
1862 if (copy_from_user(data, optval, optlen))
1863 goto out;
1865 err = -EINVAL;
1866 read_lock(&xfrm_km_lock);
1867 list_for_each_entry(km, &xfrm_km_list, list) {
1868 pol = km->compile_policy(sk, optname, data,
1869 optlen, &err);
1870 if (err >= 0)
1871 break;
1873 read_unlock(&xfrm_km_lock);
1875 if (err >= 0) {
1876 xfrm_sk_policy_insert(sk, err, pol);
1877 xfrm_pol_put(pol);
1878 err = 0;
1881 out:
1882 kfree(data);
1883 return err;
1885 EXPORT_SYMBOL(xfrm_user_policy);
1887 int xfrm_register_km(struct xfrm_mgr *km)
1889 write_lock_bh(&xfrm_km_lock);
1890 list_add_tail(&km->list, &xfrm_km_list);
1891 write_unlock_bh(&xfrm_km_lock);
1892 return 0;
1894 EXPORT_SYMBOL(xfrm_register_km);
1896 int xfrm_unregister_km(struct xfrm_mgr *km)
1898 write_lock_bh(&xfrm_km_lock);
1899 list_del(&km->list);
1900 write_unlock_bh(&xfrm_km_lock);
1901 return 0;
1903 EXPORT_SYMBOL(xfrm_unregister_km);
1905 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1907 int err = 0;
1908 if (unlikely(afinfo == NULL))
1909 return -EINVAL;
1910 if (unlikely(afinfo->family >= NPROTO))
1911 return -EAFNOSUPPORT;
1912 write_lock_bh(&xfrm_state_afinfo_lock);
1913 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1914 err = -ENOBUFS;
1915 else
1916 xfrm_state_afinfo[afinfo->family] = afinfo;
1917 write_unlock_bh(&xfrm_state_afinfo_lock);
1918 return err;
1920 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1922 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1924 int err = 0;
1925 if (unlikely(afinfo == NULL))
1926 return -EINVAL;
1927 if (unlikely(afinfo->family >= NPROTO))
1928 return -EAFNOSUPPORT;
1929 write_lock_bh(&xfrm_state_afinfo_lock);
1930 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1931 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1932 err = -EINVAL;
1933 else
1934 xfrm_state_afinfo[afinfo->family] = NULL;
1936 write_unlock_bh(&xfrm_state_afinfo_lock);
1937 return err;
1939 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1941 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1943 struct xfrm_state_afinfo *afinfo;
1944 if (unlikely(family >= NPROTO))
1945 return NULL;
1946 read_lock(&xfrm_state_afinfo_lock);
1947 afinfo = xfrm_state_afinfo[family];
1948 if (unlikely(!afinfo))
1949 read_unlock(&xfrm_state_afinfo_lock);
1950 return afinfo;
1953 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1954 __releases(xfrm_state_afinfo_lock)
1956 read_unlock(&xfrm_state_afinfo_lock);
1959 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1960 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1962 if (x->tunnel) {
1963 struct xfrm_state *t = x->tunnel;
1965 if (atomic_read(&t->tunnel_users) == 2)
1966 xfrm_state_delete(t);
1967 atomic_dec(&t->tunnel_users);
1968 xfrm_state_put(t);
1969 x->tunnel = NULL;
1972 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1974 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1976 int res;
1978 spin_lock_bh(&x->lock);
1979 if (x->km.state == XFRM_STATE_VALID &&
1980 x->type && x->type->get_mtu)
1981 res = x->type->get_mtu(x, mtu);
1982 else
1983 res = mtu - x->props.header_len;
1984 spin_unlock_bh(&x->lock);
1985 return res;
1988 int xfrm_init_state(struct xfrm_state *x)
1990 struct xfrm_state_afinfo *afinfo;
1991 struct xfrm_mode *inner_mode;
1992 int family = x->props.family;
1993 int err;
1995 err = -EAFNOSUPPORT;
1996 afinfo = xfrm_state_get_afinfo(family);
1997 if (!afinfo)
1998 goto error;
2000 err = 0;
2001 if (afinfo->init_flags)
2002 err = afinfo->init_flags(x);
2004 xfrm_state_put_afinfo(afinfo);
2006 if (err)
2007 goto error;
2009 err = -EPROTONOSUPPORT;
2011 if (x->sel.family != AF_UNSPEC) {
2012 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2013 if (inner_mode == NULL)
2014 goto error;
2016 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2017 family != x->sel.family) {
2018 xfrm_put_mode(inner_mode);
2019 goto error;
2022 x->inner_mode = inner_mode;
2023 } else {
2024 struct xfrm_mode *inner_mode_iaf;
2025 int iafamily = AF_INET;
2027 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2028 if (inner_mode == NULL)
2029 goto error;
2031 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2032 xfrm_put_mode(inner_mode);
2033 goto error;
2035 x->inner_mode = inner_mode;
2037 if (x->props.family == AF_INET)
2038 iafamily = AF_INET6;
2040 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2041 if (inner_mode_iaf) {
2042 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
2043 x->inner_mode_iaf = inner_mode_iaf;
2044 else
2045 xfrm_put_mode(inner_mode_iaf);
2049 x->type = xfrm_get_type(x->id.proto, family);
2050 if (x->type == NULL)
2051 goto error;
2053 err = x->type->init_state(x);
2054 if (err)
2055 goto error;
2057 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2058 if (x->outer_mode == NULL)
2059 goto error;
2061 x->km.state = XFRM_STATE_VALID;
2063 error:
2064 return err;
2067 EXPORT_SYMBOL(xfrm_init_state);
2069 int __net_init xfrm_state_init(struct net *net)
2071 unsigned int sz;
2073 INIT_LIST_HEAD(&net->xfrm.state_all);
2075 sz = sizeof(struct hlist_head) * 8;
2077 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2078 if (!net->xfrm.state_bydst)
2079 goto out_bydst;
2080 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2081 if (!net->xfrm.state_bysrc)
2082 goto out_bysrc;
2083 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2084 if (!net->xfrm.state_byspi)
2085 goto out_byspi;
2086 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2088 net->xfrm.state_num = 0;
2089 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2090 INIT_HLIST_HEAD(&net->xfrm.state_gc_list);
2091 INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task);
2092 init_waitqueue_head(&net->xfrm.km_waitq);
2093 return 0;
2095 out_byspi:
2096 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2097 out_bysrc:
2098 xfrm_hash_free(net->xfrm.state_bydst, sz);
2099 out_bydst:
2100 return -ENOMEM;
2103 void xfrm_state_fini(struct net *net)
2105 struct xfrm_audit audit_info;
2106 unsigned int sz;
2108 flush_work(&net->xfrm.state_hash_work);
2109 audit_info.loginuid = -1;
2110 audit_info.sessionid = -1;
2111 audit_info.secid = 0;
2112 xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info);
2113 flush_work(&net->xfrm.state_gc_work);
2115 WARN_ON(!list_empty(&net->xfrm.state_all));
2117 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2118 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2119 xfrm_hash_free(net->xfrm.state_byspi, sz);
2120 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2121 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2122 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2123 xfrm_hash_free(net->xfrm.state_bydst, sz);
2126 #ifdef CONFIG_AUDITSYSCALL
2127 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2128 struct audit_buffer *audit_buf)
2130 struct xfrm_sec_ctx *ctx = x->security;
2131 u32 spi = ntohl(x->id.spi);
2133 if (ctx)
2134 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2135 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2137 switch(x->props.family) {
2138 case AF_INET:
2139 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2140 &x->props.saddr.a4, &x->id.daddr.a4);
2141 break;
2142 case AF_INET6:
2143 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2144 x->props.saddr.a6, x->id.daddr.a6);
2145 break;
2148 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2151 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2152 struct audit_buffer *audit_buf)
2154 struct iphdr *iph4;
2155 struct ipv6hdr *iph6;
2157 switch (family) {
2158 case AF_INET:
2159 iph4 = ip_hdr(skb);
2160 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2161 &iph4->saddr, &iph4->daddr);
2162 break;
2163 case AF_INET6:
2164 iph6 = ipv6_hdr(skb);
2165 audit_log_format(audit_buf,
2166 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2167 &iph6->saddr,&iph6->daddr,
2168 iph6->flow_lbl[0] & 0x0f,
2169 iph6->flow_lbl[1],
2170 iph6->flow_lbl[2]);
2171 break;
2175 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2176 uid_t auid, u32 sessionid, u32 secid)
2178 struct audit_buffer *audit_buf;
2180 audit_buf = xfrm_audit_start("SAD-add");
2181 if (audit_buf == NULL)
2182 return;
2183 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2184 xfrm_audit_helper_sainfo(x, audit_buf);
2185 audit_log_format(audit_buf, " res=%u", result);
2186 audit_log_end(audit_buf);
2188 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2190 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2191 uid_t auid, u32 sessionid, u32 secid)
2193 struct audit_buffer *audit_buf;
2195 audit_buf = xfrm_audit_start("SAD-delete");
2196 if (audit_buf == NULL)
2197 return;
2198 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2199 xfrm_audit_helper_sainfo(x, audit_buf);
2200 audit_log_format(audit_buf, " res=%u", result);
2201 audit_log_end(audit_buf);
2203 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2205 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2206 struct sk_buff *skb)
2208 struct audit_buffer *audit_buf;
2209 u32 spi;
2211 audit_buf = xfrm_audit_start("SA-replay-overflow");
2212 if (audit_buf == NULL)
2213 return;
2214 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2215 /* don't record the sequence number because it's inherent in this kind
2216 * of audit message */
2217 spi = ntohl(x->id.spi);
2218 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2219 audit_log_end(audit_buf);
2221 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2223 static void xfrm_audit_state_replay(struct xfrm_state *x,
2224 struct sk_buff *skb, __be32 net_seq)
2226 struct audit_buffer *audit_buf;
2227 u32 spi;
2229 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2230 if (audit_buf == NULL)
2231 return;
2232 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2233 spi = ntohl(x->id.spi);
2234 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2235 spi, spi, ntohl(net_seq));
2236 audit_log_end(audit_buf);
2239 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2241 struct audit_buffer *audit_buf;
2243 audit_buf = xfrm_audit_start("SA-notfound");
2244 if (audit_buf == NULL)
2245 return;
2246 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2247 audit_log_end(audit_buf);
2249 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2251 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2252 __be32 net_spi, __be32 net_seq)
2254 struct audit_buffer *audit_buf;
2255 u32 spi;
2257 audit_buf = xfrm_audit_start("SA-notfound");
2258 if (audit_buf == NULL)
2259 return;
2260 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2261 spi = ntohl(net_spi);
2262 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2263 spi, spi, ntohl(net_seq));
2264 audit_log_end(audit_buf);
2266 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2268 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2269 struct sk_buff *skb, u8 proto)
2271 struct audit_buffer *audit_buf;
2272 __be32 net_spi;
2273 __be32 net_seq;
2275 audit_buf = xfrm_audit_start("SA-icv-failure");
2276 if (audit_buf == NULL)
2277 return;
2278 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2279 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2280 u32 spi = ntohl(net_spi);
2281 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2282 spi, spi, ntohl(net_seq));
2284 audit_log_end(audit_buf);
2286 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2287 #endif /* CONFIG_AUDITSYSCALL */