Xilinx: ARM: adding xylon IP snippet dts file
[linux-2.6-xlnx.git] / net / xfrm / xfrm_policy.c
blob5ce74a385525c9a5ef036428670663d1d924e063
1 /*
2 * xfrm_policy.c
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
28 #include <net/dst.h>
29 #include <net/xfrm.h>
30 #include <net/ip.h>
31 #ifdef CONFIG_XFRM_STATISTICS
32 #include <net/snmp.h>
33 #endif
35 #include "xfrm_hash.h"
37 DEFINE_MUTEX(xfrm_cfg_mutex);
38 EXPORT_SYMBOL(xfrm_cfg_mutex);
40 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
41 static struct dst_entry *xfrm_policy_sk_bundles;
42 static DEFINE_RWLOCK(xfrm_policy_lock);
44 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
45 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
47 static struct kmem_cache *xfrm_dst_cache __read_mostly;
49 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
50 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
51 static void xfrm_init_pmtu(struct dst_entry *dst);
52 static int stale_bundle(struct dst_entry *dst);
53 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
56 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
57 int dir);
59 static inline int
60 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
62 const struct flowi4 *fl4 = &fl->u.ip4;
64 return addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) &&
65 addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) &&
66 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
67 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
68 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
69 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
72 static inline int
73 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
75 const struct flowi6 *fl6 = &fl->u.ip6;
77 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
78 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
79 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
80 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
81 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
82 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
85 int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
86 unsigned short family)
88 switch (family) {
89 case AF_INET:
90 return __xfrm4_selector_match(sel, fl);
91 case AF_INET6:
92 return __xfrm6_selector_match(sel, fl);
94 return 0;
97 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
98 const xfrm_address_t *saddr,
99 const xfrm_address_t *daddr,
100 int family)
102 struct xfrm_policy_afinfo *afinfo;
103 struct dst_entry *dst;
105 afinfo = xfrm_policy_get_afinfo(family);
106 if (unlikely(afinfo == NULL))
107 return ERR_PTR(-EAFNOSUPPORT);
109 dst = afinfo->dst_lookup(net, tos, saddr, daddr);
111 xfrm_policy_put_afinfo(afinfo);
113 return dst;
116 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
117 xfrm_address_t *prev_saddr,
118 xfrm_address_t *prev_daddr,
119 int family)
121 struct net *net = xs_net(x);
122 xfrm_address_t *saddr = &x->props.saddr;
123 xfrm_address_t *daddr = &x->id.daddr;
124 struct dst_entry *dst;
126 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
127 saddr = x->coaddr;
128 daddr = prev_daddr;
130 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
131 saddr = prev_saddr;
132 daddr = x->coaddr;
135 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
137 if (!IS_ERR(dst)) {
138 if (prev_saddr != saddr)
139 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
140 if (prev_daddr != daddr)
141 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
144 return dst;
147 static inline unsigned long make_jiffies(long secs)
149 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
150 return MAX_SCHEDULE_TIMEOUT-1;
151 else
152 return secs*HZ;
155 static void xfrm_policy_timer(unsigned long data)
157 struct xfrm_policy *xp = (struct xfrm_policy*)data;
158 unsigned long now = get_seconds();
159 long next = LONG_MAX;
160 int warn = 0;
161 int dir;
163 read_lock(&xp->lock);
165 if (unlikely(xp->walk.dead))
166 goto out;
168 dir = xfrm_policy_id2dir(xp->index);
170 if (xp->lft.hard_add_expires_seconds) {
171 long tmo = xp->lft.hard_add_expires_seconds +
172 xp->curlft.add_time - now;
173 if (tmo <= 0)
174 goto expired;
175 if (tmo < next)
176 next = tmo;
178 if (xp->lft.hard_use_expires_seconds) {
179 long tmo = xp->lft.hard_use_expires_seconds +
180 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
181 if (tmo <= 0)
182 goto expired;
183 if (tmo < next)
184 next = tmo;
186 if (xp->lft.soft_add_expires_seconds) {
187 long tmo = xp->lft.soft_add_expires_seconds +
188 xp->curlft.add_time - now;
189 if (tmo <= 0) {
190 warn = 1;
191 tmo = XFRM_KM_TIMEOUT;
193 if (tmo < next)
194 next = tmo;
196 if (xp->lft.soft_use_expires_seconds) {
197 long tmo = xp->lft.soft_use_expires_seconds +
198 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
199 if (tmo <= 0) {
200 warn = 1;
201 tmo = XFRM_KM_TIMEOUT;
203 if (tmo < next)
204 next = tmo;
207 if (warn)
208 km_policy_expired(xp, dir, 0, 0);
209 if (next != LONG_MAX &&
210 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
211 xfrm_pol_hold(xp);
213 out:
214 read_unlock(&xp->lock);
215 xfrm_pol_put(xp);
216 return;
218 expired:
219 read_unlock(&xp->lock);
220 if (!xfrm_policy_delete(xp, dir))
221 km_policy_expired(xp, dir, 1, 0);
222 xfrm_pol_put(xp);
225 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
227 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
229 if (unlikely(pol->walk.dead))
230 flo = NULL;
231 else
232 xfrm_pol_hold(pol);
234 return flo;
237 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
239 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
241 return !pol->walk.dead;
244 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
246 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
249 static const struct flow_cache_ops xfrm_policy_fc_ops = {
250 .get = xfrm_policy_flo_get,
251 .check = xfrm_policy_flo_check,
252 .delete = xfrm_policy_flo_delete,
255 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
256 * SPD calls.
259 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
261 struct xfrm_policy *policy;
263 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
265 if (policy) {
266 write_pnet(&policy->xp_net, net);
267 INIT_LIST_HEAD(&policy->walk.all);
268 INIT_HLIST_NODE(&policy->bydst);
269 INIT_HLIST_NODE(&policy->byidx);
270 rwlock_init(&policy->lock);
271 atomic_set(&policy->refcnt, 1);
272 setup_timer(&policy->timer, xfrm_policy_timer,
273 (unsigned long)policy);
274 policy->flo.ops = &xfrm_policy_fc_ops;
276 return policy;
278 EXPORT_SYMBOL(xfrm_policy_alloc);
280 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
282 void xfrm_policy_destroy(struct xfrm_policy *policy)
284 BUG_ON(!policy->walk.dead);
286 if (del_timer(&policy->timer))
287 BUG();
289 security_xfrm_policy_free(policy->security);
290 kfree(policy);
292 EXPORT_SYMBOL(xfrm_policy_destroy);
294 /* Rule must be locked. Release descentant resources, announce
295 * entry dead. The rule must be unlinked from lists to the moment.
298 static void xfrm_policy_kill(struct xfrm_policy *policy)
300 policy->walk.dead = 1;
302 atomic_inc(&policy->genid);
304 if (del_timer(&policy->timer))
305 xfrm_pol_put(policy);
307 xfrm_pol_put(policy);
310 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
312 static inline unsigned int idx_hash(struct net *net, u32 index)
314 return __idx_hash(index, net->xfrm.policy_idx_hmask);
317 static struct hlist_head *policy_hash_bysel(struct net *net,
318 const struct xfrm_selector *sel,
319 unsigned short family, int dir)
321 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
322 unsigned int hash = __sel_hash(sel, family, hmask);
324 return (hash == hmask + 1 ?
325 &net->xfrm.policy_inexact[dir] :
326 net->xfrm.policy_bydst[dir].table + hash);
329 static struct hlist_head *policy_hash_direct(struct net *net,
330 const xfrm_address_t *daddr,
331 const xfrm_address_t *saddr,
332 unsigned short family, int dir)
334 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
335 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
337 return net->xfrm.policy_bydst[dir].table + hash;
340 static void xfrm_dst_hash_transfer(struct hlist_head *list,
341 struct hlist_head *ndsttable,
342 unsigned int nhashmask)
344 struct hlist_node *entry, *tmp, *entry0 = NULL;
345 struct xfrm_policy *pol;
346 unsigned int h0 = 0;
348 redo:
349 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
350 unsigned int h;
352 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
353 pol->family, nhashmask);
354 if (!entry0) {
355 hlist_del(entry);
356 hlist_add_head(&pol->bydst, ndsttable+h);
357 h0 = h;
358 } else {
359 if (h != h0)
360 continue;
361 hlist_del(entry);
362 hlist_add_after(entry0, &pol->bydst);
364 entry0 = entry;
366 if (!hlist_empty(list)) {
367 entry0 = NULL;
368 goto redo;
372 static void xfrm_idx_hash_transfer(struct hlist_head *list,
373 struct hlist_head *nidxtable,
374 unsigned int nhashmask)
376 struct hlist_node *entry, *tmp;
377 struct xfrm_policy *pol;
379 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
380 unsigned int h;
382 h = __idx_hash(pol->index, nhashmask);
383 hlist_add_head(&pol->byidx, nidxtable+h);
387 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
389 return ((old_hmask + 1) << 1) - 1;
392 static void xfrm_bydst_resize(struct net *net, int dir)
394 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
395 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
396 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
397 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
398 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
399 int i;
401 if (!ndst)
402 return;
404 write_lock_bh(&xfrm_policy_lock);
406 for (i = hmask; i >= 0; i--)
407 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
409 net->xfrm.policy_bydst[dir].table = ndst;
410 net->xfrm.policy_bydst[dir].hmask = nhashmask;
412 write_unlock_bh(&xfrm_policy_lock);
414 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
417 static void xfrm_byidx_resize(struct net *net, int total)
419 unsigned int hmask = net->xfrm.policy_idx_hmask;
420 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
421 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
422 struct hlist_head *oidx = net->xfrm.policy_byidx;
423 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
424 int i;
426 if (!nidx)
427 return;
429 write_lock_bh(&xfrm_policy_lock);
431 for (i = hmask; i >= 0; i--)
432 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
434 net->xfrm.policy_byidx = nidx;
435 net->xfrm.policy_idx_hmask = nhashmask;
437 write_unlock_bh(&xfrm_policy_lock);
439 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
442 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
444 unsigned int cnt = net->xfrm.policy_count[dir];
445 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
447 if (total)
448 *total += cnt;
450 if ((hmask + 1) < xfrm_policy_hashmax &&
451 cnt > hmask)
452 return 1;
454 return 0;
457 static inline int xfrm_byidx_should_resize(struct net *net, int total)
459 unsigned int hmask = net->xfrm.policy_idx_hmask;
461 if ((hmask + 1) < xfrm_policy_hashmax &&
462 total > hmask)
463 return 1;
465 return 0;
468 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
470 read_lock_bh(&xfrm_policy_lock);
471 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
472 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
473 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
474 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
475 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
476 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
477 si->spdhcnt = net->xfrm.policy_idx_hmask;
478 si->spdhmcnt = xfrm_policy_hashmax;
479 read_unlock_bh(&xfrm_policy_lock);
481 EXPORT_SYMBOL(xfrm_spd_getinfo);
483 static DEFINE_MUTEX(hash_resize_mutex);
484 static void xfrm_hash_resize(struct work_struct *work)
486 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
487 int dir, total;
489 mutex_lock(&hash_resize_mutex);
491 total = 0;
492 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
493 if (xfrm_bydst_should_resize(net, dir, &total))
494 xfrm_bydst_resize(net, dir);
496 if (xfrm_byidx_should_resize(net, total))
497 xfrm_byidx_resize(net, total);
499 mutex_unlock(&hash_resize_mutex);
502 /* Generate new index... KAME seems to generate them ordered by cost
503 * of an absolute inpredictability of ordering of rules. This will not pass. */
504 static u32 xfrm_gen_index(struct net *net, int dir)
506 static u32 idx_generator;
508 for (;;) {
509 struct hlist_node *entry;
510 struct hlist_head *list;
511 struct xfrm_policy *p;
512 u32 idx;
513 int found;
515 idx = (idx_generator | dir);
516 idx_generator += 8;
517 if (idx == 0)
518 idx = 8;
519 list = net->xfrm.policy_byidx + idx_hash(net, idx);
520 found = 0;
521 hlist_for_each_entry(p, entry, list, byidx) {
522 if (p->index == idx) {
523 found = 1;
524 break;
527 if (!found)
528 return idx;
532 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
534 u32 *p1 = (u32 *) s1;
535 u32 *p2 = (u32 *) s2;
536 int len = sizeof(struct xfrm_selector) / sizeof(u32);
537 int i;
539 for (i = 0; i < len; i++) {
540 if (p1[i] != p2[i])
541 return 1;
544 return 0;
547 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
549 struct net *net = xp_net(policy);
550 struct xfrm_policy *pol;
551 struct xfrm_policy *delpol;
552 struct hlist_head *chain;
553 struct hlist_node *entry, *newpos;
554 u32 mark = policy->mark.v & policy->mark.m;
556 write_lock_bh(&xfrm_policy_lock);
557 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
558 delpol = NULL;
559 newpos = NULL;
560 hlist_for_each_entry(pol, entry, chain, bydst) {
561 if (pol->type == policy->type &&
562 !selector_cmp(&pol->selector, &policy->selector) &&
563 (mark & pol->mark.m) == pol->mark.v &&
564 xfrm_sec_ctx_match(pol->security, policy->security) &&
565 !WARN_ON(delpol)) {
566 if (excl) {
567 write_unlock_bh(&xfrm_policy_lock);
568 return -EEXIST;
570 delpol = pol;
571 if (policy->priority > pol->priority)
572 continue;
573 } else if (policy->priority >= pol->priority) {
574 newpos = &pol->bydst;
575 continue;
577 if (delpol)
578 break;
580 if (newpos)
581 hlist_add_after(newpos, &policy->bydst);
582 else
583 hlist_add_head(&policy->bydst, chain);
584 xfrm_pol_hold(policy);
585 net->xfrm.policy_count[dir]++;
586 atomic_inc(&flow_cache_genid);
587 if (delpol)
588 __xfrm_policy_unlink(delpol, dir);
589 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
590 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
591 policy->curlft.add_time = get_seconds();
592 policy->curlft.use_time = 0;
593 if (!mod_timer(&policy->timer, jiffies + HZ))
594 xfrm_pol_hold(policy);
595 list_add(&policy->walk.all, &net->xfrm.policy_all);
596 write_unlock_bh(&xfrm_policy_lock);
598 if (delpol)
599 xfrm_policy_kill(delpol);
600 else if (xfrm_bydst_should_resize(net, dir, NULL))
601 schedule_work(&net->xfrm.policy_hash_work);
603 return 0;
605 EXPORT_SYMBOL(xfrm_policy_insert);
607 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
608 int dir, struct xfrm_selector *sel,
609 struct xfrm_sec_ctx *ctx, int delete,
610 int *err)
612 struct xfrm_policy *pol, *ret;
613 struct hlist_head *chain;
614 struct hlist_node *entry;
616 *err = 0;
617 write_lock_bh(&xfrm_policy_lock);
618 chain = policy_hash_bysel(net, sel, sel->family, dir);
619 ret = NULL;
620 hlist_for_each_entry(pol, entry, chain, bydst) {
621 if (pol->type == type &&
622 (mark & pol->mark.m) == pol->mark.v &&
623 !selector_cmp(sel, &pol->selector) &&
624 xfrm_sec_ctx_match(ctx, pol->security)) {
625 xfrm_pol_hold(pol);
626 if (delete) {
627 *err = security_xfrm_policy_delete(
628 pol->security);
629 if (*err) {
630 write_unlock_bh(&xfrm_policy_lock);
631 return pol;
633 __xfrm_policy_unlink(pol, dir);
635 ret = pol;
636 break;
639 write_unlock_bh(&xfrm_policy_lock);
641 if (ret && delete)
642 xfrm_policy_kill(ret);
643 return ret;
645 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
647 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
648 int dir, u32 id, int delete, int *err)
650 struct xfrm_policy *pol, *ret;
651 struct hlist_head *chain;
652 struct hlist_node *entry;
654 *err = -ENOENT;
655 if (xfrm_policy_id2dir(id) != dir)
656 return NULL;
658 *err = 0;
659 write_lock_bh(&xfrm_policy_lock);
660 chain = net->xfrm.policy_byidx + idx_hash(net, id);
661 ret = NULL;
662 hlist_for_each_entry(pol, entry, chain, byidx) {
663 if (pol->type == type && pol->index == id &&
664 (mark & pol->mark.m) == pol->mark.v) {
665 xfrm_pol_hold(pol);
666 if (delete) {
667 *err = security_xfrm_policy_delete(
668 pol->security);
669 if (*err) {
670 write_unlock_bh(&xfrm_policy_lock);
671 return pol;
673 __xfrm_policy_unlink(pol, dir);
675 ret = pol;
676 break;
679 write_unlock_bh(&xfrm_policy_lock);
681 if (ret && delete)
682 xfrm_policy_kill(ret);
683 return ret;
685 EXPORT_SYMBOL(xfrm_policy_byid);
687 #ifdef CONFIG_SECURITY_NETWORK_XFRM
688 static inline int
689 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
691 int dir, err = 0;
693 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
694 struct xfrm_policy *pol;
695 struct hlist_node *entry;
696 int i;
698 hlist_for_each_entry(pol, entry,
699 &net->xfrm.policy_inexact[dir], bydst) {
700 if (pol->type != type)
701 continue;
702 err = security_xfrm_policy_delete(pol->security);
703 if (err) {
704 xfrm_audit_policy_delete(pol, 0,
705 audit_info->loginuid,
706 audit_info->sessionid,
707 audit_info->secid);
708 return err;
711 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
712 hlist_for_each_entry(pol, entry,
713 net->xfrm.policy_bydst[dir].table + i,
714 bydst) {
715 if (pol->type != type)
716 continue;
717 err = security_xfrm_policy_delete(
718 pol->security);
719 if (err) {
720 xfrm_audit_policy_delete(pol, 0,
721 audit_info->loginuid,
722 audit_info->sessionid,
723 audit_info->secid);
724 return err;
729 return err;
731 #else
732 static inline int
733 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
735 return 0;
737 #endif
739 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
741 int dir, err = 0, cnt = 0;
743 write_lock_bh(&xfrm_policy_lock);
745 err = xfrm_policy_flush_secctx_check(net, type, audit_info);
746 if (err)
747 goto out;
749 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
750 struct xfrm_policy *pol;
751 struct hlist_node *entry;
752 int i;
754 again1:
755 hlist_for_each_entry(pol, entry,
756 &net->xfrm.policy_inexact[dir], bydst) {
757 if (pol->type != type)
758 continue;
759 __xfrm_policy_unlink(pol, dir);
760 write_unlock_bh(&xfrm_policy_lock);
761 cnt++;
763 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
764 audit_info->sessionid,
765 audit_info->secid);
767 xfrm_policy_kill(pol);
769 write_lock_bh(&xfrm_policy_lock);
770 goto again1;
773 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
774 again2:
775 hlist_for_each_entry(pol, entry,
776 net->xfrm.policy_bydst[dir].table + i,
777 bydst) {
778 if (pol->type != type)
779 continue;
780 __xfrm_policy_unlink(pol, dir);
781 write_unlock_bh(&xfrm_policy_lock);
782 cnt++;
784 xfrm_audit_policy_delete(pol, 1,
785 audit_info->loginuid,
786 audit_info->sessionid,
787 audit_info->secid);
788 xfrm_policy_kill(pol);
790 write_lock_bh(&xfrm_policy_lock);
791 goto again2;
796 if (!cnt)
797 err = -ESRCH;
798 out:
799 write_unlock_bh(&xfrm_policy_lock);
800 return err;
802 EXPORT_SYMBOL(xfrm_policy_flush);
804 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
805 int (*func)(struct xfrm_policy *, int, int, void*),
806 void *data)
808 struct xfrm_policy *pol;
809 struct xfrm_policy_walk_entry *x;
810 int error = 0;
812 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
813 walk->type != XFRM_POLICY_TYPE_ANY)
814 return -EINVAL;
816 if (list_empty(&walk->walk.all) && walk->seq != 0)
817 return 0;
819 write_lock_bh(&xfrm_policy_lock);
820 if (list_empty(&walk->walk.all))
821 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
822 else
823 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
824 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
825 if (x->dead)
826 continue;
827 pol = container_of(x, struct xfrm_policy, walk);
828 if (walk->type != XFRM_POLICY_TYPE_ANY &&
829 walk->type != pol->type)
830 continue;
831 error = func(pol, xfrm_policy_id2dir(pol->index),
832 walk->seq, data);
833 if (error) {
834 list_move_tail(&walk->walk.all, &x->all);
835 goto out;
837 walk->seq++;
839 if (walk->seq == 0) {
840 error = -ENOENT;
841 goto out;
843 list_del_init(&walk->walk.all);
844 out:
845 write_unlock_bh(&xfrm_policy_lock);
846 return error;
848 EXPORT_SYMBOL(xfrm_policy_walk);
850 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
852 INIT_LIST_HEAD(&walk->walk.all);
853 walk->walk.dead = 1;
854 walk->type = type;
855 walk->seq = 0;
857 EXPORT_SYMBOL(xfrm_policy_walk_init);
859 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
861 if (list_empty(&walk->walk.all))
862 return;
864 write_lock_bh(&xfrm_policy_lock);
865 list_del(&walk->walk.all);
866 write_unlock_bh(&xfrm_policy_lock);
868 EXPORT_SYMBOL(xfrm_policy_walk_done);
871 * Find policy to apply to this flow.
873 * Returns 0 if policy found, else an -errno.
875 static int xfrm_policy_match(const struct xfrm_policy *pol,
876 const struct flowi *fl,
877 u8 type, u16 family, int dir)
879 const struct xfrm_selector *sel = &pol->selector;
880 int match, ret = -ESRCH;
882 if (pol->family != family ||
883 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
884 pol->type != type)
885 return ret;
887 match = xfrm_selector_match(sel, fl, family);
888 if (match)
889 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
890 dir);
892 return ret;
895 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
896 const struct flowi *fl,
897 u16 family, u8 dir)
899 int err;
900 struct xfrm_policy *pol, *ret;
901 const xfrm_address_t *daddr, *saddr;
902 struct hlist_node *entry;
903 struct hlist_head *chain;
904 u32 priority = ~0U;
906 daddr = xfrm_flowi_daddr(fl, family);
907 saddr = xfrm_flowi_saddr(fl, family);
908 if (unlikely(!daddr || !saddr))
909 return NULL;
911 read_lock_bh(&xfrm_policy_lock);
912 chain = policy_hash_direct(net, daddr, saddr, family, dir);
913 ret = NULL;
914 hlist_for_each_entry(pol, entry, chain, bydst) {
915 err = xfrm_policy_match(pol, fl, type, family, dir);
916 if (err) {
917 if (err == -ESRCH)
918 continue;
919 else {
920 ret = ERR_PTR(err);
921 goto fail;
923 } else {
924 ret = pol;
925 priority = ret->priority;
926 break;
929 chain = &net->xfrm.policy_inexact[dir];
930 hlist_for_each_entry(pol, entry, chain, bydst) {
931 err = xfrm_policy_match(pol, fl, type, family, dir);
932 if (err) {
933 if (err == -ESRCH)
934 continue;
935 else {
936 ret = ERR_PTR(err);
937 goto fail;
939 } else if (pol->priority < priority) {
940 ret = pol;
941 break;
944 if (ret)
945 xfrm_pol_hold(ret);
946 fail:
947 read_unlock_bh(&xfrm_policy_lock);
949 return ret;
952 static struct xfrm_policy *
953 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
955 #ifdef CONFIG_XFRM_SUB_POLICY
956 struct xfrm_policy *pol;
958 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
959 if (pol != NULL)
960 return pol;
961 #endif
962 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
965 static struct flow_cache_object *
966 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
967 u8 dir, struct flow_cache_object *old_obj, void *ctx)
969 struct xfrm_policy *pol;
971 if (old_obj)
972 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
974 pol = __xfrm_policy_lookup(net, fl, family, dir);
975 if (IS_ERR_OR_NULL(pol))
976 return ERR_CAST(pol);
978 /* Resolver returns two references:
979 * one for cache and one for caller of flow_cache_lookup() */
980 xfrm_pol_hold(pol);
982 return &pol->flo;
985 static inline int policy_to_flow_dir(int dir)
987 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
988 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
989 XFRM_POLICY_FWD == FLOW_DIR_FWD)
990 return dir;
991 switch (dir) {
992 default:
993 case XFRM_POLICY_IN:
994 return FLOW_DIR_IN;
995 case XFRM_POLICY_OUT:
996 return FLOW_DIR_OUT;
997 case XFRM_POLICY_FWD:
998 return FLOW_DIR_FWD;
1002 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1003 const struct flowi *fl)
1005 struct xfrm_policy *pol;
1007 read_lock_bh(&xfrm_policy_lock);
1008 if ((pol = sk->sk_policy[dir]) != NULL) {
1009 int match = xfrm_selector_match(&pol->selector, fl,
1010 sk->sk_family);
1011 int err = 0;
1013 if (match) {
1014 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1015 pol = NULL;
1016 goto out;
1018 err = security_xfrm_policy_lookup(pol->security,
1019 fl->flowi_secid,
1020 policy_to_flow_dir(dir));
1021 if (!err)
1022 xfrm_pol_hold(pol);
1023 else if (err == -ESRCH)
1024 pol = NULL;
1025 else
1026 pol = ERR_PTR(err);
1027 } else
1028 pol = NULL;
1030 out:
1031 read_unlock_bh(&xfrm_policy_lock);
1032 return pol;
1035 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1037 struct net *net = xp_net(pol);
1038 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1039 pol->family, dir);
1041 list_add(&pol->walk.all, &net->xfrm.policy_all);
1042 hlist_add_head(&pol->bydst, chain);
1043 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1044 net->xfrm.policy_count[dir]++;
1045 xfrm_pol_hold(pol);
1047 if (xfrm_bydst_should_resize(net, dir, NULL))
1048 schedule_work(&net->xfrm.policy_hash_work);
1051 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1052 int dir)
1054 struct net *net = xp_net(pol);
1056 if (hlist_unhashed(&pol->bydst))
1057 return NULL;
1059 hlist_del(&pol->bydst);
1060 hlist_del(&pol->byidx);
1061 list_del(&pol->walk.all);
1062 net->xfrm.policy_count[dir]--;
1064 return pol;
1067 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1069 write_lock_bh(&xfrm_policy_lock);
1070 pol = __xfrm_policy_unlink(pol, dir);
1071 write_unlock_bh(&xfrm_policy_lock);
1072 if (pol) {
1073 xfrm_policy_kill(pol);
1074 return 0;
1076 return -ENOENT;
1078 EXPORT_SYMBOL(xfrm_policy_delete);
1080 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1082 struct net *net = xp_net(pol);
1083 struct xfrm_policy *old_pol;
1085 #ifdef CONFIG_XFRM_SUB_POLICY
1086 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1087 return -EINVAL;
1088 #endif
1090 write_lock_bh(&xfrm_policy_lock);
1091 old_pol = sk->sk_policy[dir];
1092 sk->sk_policy[dir] = pol;
1093 if (pol) {
1094 pol->curlft.add_time = get_seconds();
1095 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1096 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1098 if (old_pol)
1099 /* Unlinking succeeds always. This is the only function
1100 * allowed to delete or replace socket policy.
1102 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1103 write_unlock_bh(&xfrm_policy_lock);
1105 if (old_pol) {
1106 xfrm_policy_kill(old_pol);
1108 return 0;
1111 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1113 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1115 if (newp) {
1116 newp->selector = old->selector;
1117 if (security_xfrm_policy_clone(old->security,
1118 &newp->security)) {
1119 kfree(newp);
1120 return NULL; /* ENOMEM */
1122 newp->lft = old->lft;
1123 newp->curlft = old->curlft;
1124 newp->mark = old->mark;
1125 newp->action = old->action;
1126 newp->flags = old->flags;
1127 newp->xfrm_nr = old->xfrm_nr;
1128 newp->index = old->index;
1129 newp->type = old->type;
1130 memcpy(newp->xfrm_vec, old->xfrm_vec,
1131 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1132 write_lock_bh(&xfrm_policy_lock);
1133 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1134 write_unlock_bh(&xfrm_policy_lock);
1135 xfrm_pol_put(newp);
1137 return newp;
1140 int __xfrm_sk_clone_policy(struct sock *sk)
1142 struct xfrm_policy *p0 = sk->sk_policy[0],
1143 *p1 = sk->sk_policy[1];
1145 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1146 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1147 return -ENOMEM;
1148 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1149 return -ENOMEM;
1150 return 0;
1153 static int
1154 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1155 unsigned short family)
1157 int err;
1158 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1160 if (unlikely(afinfo == NULL))
1161 return -EINVAL;
1162 err = afinfo->get_saddr(net, local, remote);
1163 xfrm_policy_put_afinfo(afinfo);
1164 return err;
1167 /* Resolve list of templates for the flow, given policy. */
1169 static int
1170 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1171 struct xfrm_state **xfrm, unsigned short family)
1173 struct net *net = xp_net(policy);
1174 int nx;
1175 int i, error;
1176 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1177 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1178 xfrm_address_t tmp;
1180 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1181 struct xfrm_state *x;
1182 xfrm_address_t *remote = daddr;
1183 xfrm_address_t *local = saddr;
1184 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1186 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1187 tmpl->mode == XFRM_MODE_BEET) {
1188 remote = &tmpl->id.daddr;
1189 local = &tmpl->saddr;
1190 if (xfrm_addr_any(local, tmpl->encap_family)) {
1191 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1192 if (error)
1193 goto fail;
1194 local = &tmp;
1198 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1200 if (x && x->km.state == XFRM_STATE_VALID) {
1201 xfrm[nx++] = x;
1202 daddr = remote;
1203 saddr = local;
1204 continue;
1206 if (x) {
1207 error = (x->km.state == XFRM_STATE_ERROR ?
1208 -EINVAL : -EAGAIN);
1209 xfrm_state_put(x);
1211 else if (error == -ESRCH)
1212 error = -EAGAIN;
1214 if (!tmpl->optional)
1215 goto fail;
1217 return nx;
1219 fail:
1220 for (nx--; nx>=0; nx--)
1221 xfrm_state_put(xfrm[nx]);
1222 return error;
1225 static int
1226 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1227 struct xfrm_state **xfrm, unsigned short family)
1229 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1230 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1231 int cnx = 0;
1232 int error;
1233 int ret;
1234 int i;
1236 for (i = 0; i < npols; i++) {
1237 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1238 error = -ENOBUFS;
1239 goto fail;
1242 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1243 if (ret < 0) {
1244 error = ret;
1245 goto fail;
1246 } else
1247 cnx += ret;
1250 /* found states are sorted for outbound processing */
1251 if (npols > 1)
1252 xfrm_state_sort(xfrm, tpp, cnx, family);
1254 return cnx;
1256 fail:
1257 for (cnx--; cnx>=0; cnx--)
1258 xfrm_state_put(tpp[cnx]);
1259 return error;
1263 /* Check that the bundle accepts the flow and its components are
1264 * still valid.
1267 static inline int xfrm_get_tos(const struct flowi *fl, int family)
1269 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1270 int tos;
1272 if (!afinfo)
1273 return -EINVAL;
1275 tos = afinfo->get_tos(fl);
1277 xfrm_policy_put_afinfo(afinfo);
1279 return tos;
1282 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1284 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1285 struct dst_entry *dst = &xdst->u.dst;
1287 if (xdst->route == NULL) {
1288 /* Dummy bundle - if it has xfrms we were not
1289 * able to build bundle as template resolution failed.
1290 * It means we need to try again resolving. */
1291 if (xdst->num_xfrms > 0)
1292 return NULL;
1293 } else {
1294 /* Real bundle */
1295 if (stale_bundle(dst))
1296 return NULL;
1299 dst_hold(dst);
1300 return flo;
1303 static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1305 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1306 struct dst_entry *dst = &xdst->u.dst;
1308 if (!xdst->route)
1309 return 0;
1310 if (stale_bundle(dst))
1311 return 0;
1313 return 1;
1316 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1318 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1319 struct dst_entry *dst = &xdst->u.dst;
1321 dst_free(dst);
1324 static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1325 .get = xfrm_bundle_flo_get,
1326 .check = xfrm_bundle_flo_check,
1327 .delete = xfrm_bundle_flo_delete,
1330 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1332 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1333 struct dst_ops *dst_ops;
1334 struct xfrm_dst *xdst;
1336 if (!afinfo)
1337 return ERR_PTR(-EINVAL);
1339 switch (family) {
1340 case AF_INET:
1341 dst_ops = &net->xfrm.xfrm4_dst_ops;
1342 break;
1343 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1344 case AF_INET6:
1345 dst_ops = &net->xfrm.xfrm6_dst_ops;
1346 break;
1347 #endif
1348 default:
1349 BUG();
1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
1352 memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
1353 xfrm_policy_put_afinfo(afinfo);
1355 if (likely(xdst))
1356 xdst->flo.ops = &xfrm_bundle_fc_ops;
1357 else
1358 xdst = ERR_PTR(-ENOBUFS);
1360 return xdst;
1363 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1364 int nfheader_len)
1366 struct xfrm_policy_afinfo *afinfo =
1367 xfrm_policy_get_afinfo(dst->ops->family);
1368 int err;
1370 if (!afinfo)
1371 return -EINVAL;
1373 err = afinfo->init_path(path, dst, nfheader_len);
1375 xfrm_policy_put_afinfo(afinfo);
1377 return err;
1380 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1381 const struct flowi *fl)
1383 struct xfrm_policy_afinfo *afinfo =
1384 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1385 int err;
1387 if (!afinfo)
1388 return -EINVAL;
1390 err = afinfo->fill_dst(xdst, dev, fl);
1392 xfrm_policy_put_afinfo(afinfo);
1394 return err;
1398 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1399 * all the metrics... Shortly, bundle a bundle.
1402 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1403 struct xfrm_state **xfrm, int nx,
1404 const struct flowi *fl,
1405 struct dst_entry *dst)
1407 struct net *net = xp_net(policy);
1408 unsigned long now = jiffies;
1409 struct net_device *dev;
1410 struct xfrm_mode *inner_mode;
1411 struct dst_entry *dst_prev = NULL;
1412 struct dst_entry *dst0 = NULL;
1413 int i = 0;
1414 int err;
1415 int header_len = 0;
1416 int nfheader_len = 0;
1417 int trailer_len = 0;
1418 int tos;
1419 int family = policy->selector.family;
1420 xfrm_address_t saddr, daddr;
1422 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1424 tos = xfrm_get_tos(fl, family);
1425 err = tos;
1426 if (tos < 0)
1427 goto put_states;
1429 dst_hold(dst);
1431 for (; i < nx; i++) {
1432 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1433 struct dst_entry *dst1 = &xdst->u.dst;
1435 err = PTR_ERR(xdst);
1436 if (IS_ERR(xdst)) {
1437 dst_release(dst);
1438 goto put_states;
1441 if (xfrm[i]->sel.family == AF_UNSPEC) {
1442 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1443 xfrm_af2proto(family));
1444 if (!inner_mode) {
1445 err = -EAFNOSUPPORT;
1446 dst_release(dst);
1447 goto put_states;
1449 } else
1450 inner_mode = xfrm[i]->inner_mode;
1452 if (!dst_prev)
1453 dst0 = dst1;
1454 else {
1455 dst_prev->child = dst_clone(dst1);
1456 dst1->flags |= DST_NOHASH;
1459 xdst->route = dst;
1460 dst_copy_metrics(dst1, dst);
1462 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1463 family = xfrm[i]->props.family;
1464 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1465 family);
1466 err = PTR_ERR(dst);
1467 if (IS_ERR(dst))
1468 goto put_states;
1469 } else
1470 dst_hold(dst);
1472 dst1->xfrm = xfrm[i];
1473 xdst->xfrm_genid = xfrm[i]->genid;
1475 dst1->obsolete = -1;
1476 dst1->flags |= DST_HOST;
1477 dst1->lastuse = now;
1479 dst1->input = dst_discard;
1480 dst1->output = inner_mode->afinfo->output;
1482 dst1->next = dst_prev;
1483 dst_prev = dst1;
1485 header_len += xfrm[i]->props.header_len;
1486 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1487 nfheader_len += xfrm[i]->props.header_len;
1488 trailer_len += xfrm[i]->props.trailer_len;
1491 dst_prev->child = dst;
1492 dst0->path = dst;
1494 err = -ENODEV;
1495 dev = dst->dev;
1496 if (!dev)
1497 goto free_dst;
1499 /* Copy neighbour for reachability confirmation */
1500 dst0->neighbour = neigh_clone(dst->neighbour);
1502 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1503 xfrm_init_pmtu(dst_prev);
1505 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1506 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1508 err = xfrm_fill_dst(xdst, dev, fl);
1509 if (err)
1510 goto free_dst;
1512 dst_prev->header_len = header_len;
1513 dst_prev->trailer_len = trailer_len;
1514 header_len -= xdst->u.dst.xfrm->props.header_len;
1515 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1518 out:
1519 return dst0;
1521 put_states:
1522 for (; i < nx; i++)
1523 xfrm_state_put(xfrm[i]);
1524 free_dst:
1525 if (dst0)
1526 dst_free(dst0);
1527 dst0 = ERR_PTR(err);
1528 goto out;
1531 static int inline
1532 xfrm_dst_alloc_copy(void **target, const void *src, int size)
1534 if (!*target) {
1535 *target = kmalloc(size, GFP_ATOMIC);
1536 if (!*target)
1537 return -ENOMEM;
1539 memcpy(*target, src, size);
1540 return 0;
1543 static int inline
1544 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
1546 #ifdef CONFIG_XFRM_SUB_POLICY
1547 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1548 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1549 sel, sizeof(*sel));
1550 #else
1551 return 0;
1552 #endif
1555 static int inline
1556 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
1558 #ifdef CONFIG_XFRM_SUB_POLICY
1559 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1560 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1561 #else
1562 return 0;
1563 #endif
1566 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1567 struct xfrm_policy **pols,
1568 int *num_pols, int *num_xfrms)
1570 int i;
1572 if (*num_pols == 0 || !pols[0]) {
1573 *num_pols = 0;
1574 *num_xfrms = 0;
1575 return 0;
1577 if (IS_ERR(pols[0]))
1578 return PTR_ERR(pols[0]);
1580 *num_xfrms = pols[0]->xfrm_nr;
1582 #ifdef CONFIG_XFRM_SUB_POLICY
1583 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1584 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1585 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1586 XFRM_POLICY_TYPE_MAIN,
1587 fl, family,
1588 XFRM_POLICY_OUT);
1589 if (pols[1]) {
1590 if (IS_ERR(pols[1])) {
1591 xfrm_pols_put(pols, *num_pols);
1592 return PTR_ERR(pols[1]);
1594 (*num_pols) ++;
1595 (*num_xfrms) += pols[1]->xfrm_nr;
1598 #endif
1599 for (i = 0; i < *num_pols; i++) {
1600 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1601 *num_xfrms = -1;
1602 break;
1606 return 0;
1610 static struct xfrm_dst *
1611 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1612 const struct flowi *fl, u16 family,
1613 struct dst_entry *dst_orig)
1615 struct net *net = xp_net(pols[0]);
1616 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1617 struct dst_entry *dst;
1618 struct xfrm_dst *xdst;
1619 int err;
1621 /* Try to instantiate a bundle */
1622 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1623 if (err <= 0) {
1624 if (err != 0 && err != -EAGAIN)
1625 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1626 return ERR_PTR(err);
1629 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1630 if (IS_ERR(dst)) {
1631 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1632 return ERR_CAST(dst);
1635 xdst = (struct xfrm_dst *)dst;
1636 xdst->num_xfrms = err;
1637 if (num_pols > 1)
1638 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1639 else
1640 err = xfrm_dst_update_origin(dst, fl);
1641 if (unlikely(err)) {
1642 dst_free(dst);
1643 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1644 return ERR_PTR(err);
1647 xdst->num_pols = num_pols;
1648 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1649 xdst->policy_genid = atomic_read(&pols[0]->genid);
1651 return xdst;
1654 static struct flow_cache_object *
1655 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1656 struct flow_cache_object *oldflo, void *ctx)
1658 struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1659 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1660 struct xfrm_dst *xdst, *new_xdst;
1661 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1663 /* Check if the policies from old bundle are usable */
1664 xdst = NULL;
1665 if (oldflo) {
1666 xdst = container_of(oldflo, struct xfrm_dst, flo);
1667 num_pols = xdst->num_pols;
1668 num_xfrms = xdst->num_xfrms;
1669 pol_dead = 0;
1670 for (i = 0; i < num_pols; i++) {
1671 pols[i] = xdst->pols[i];
1672 pol_dead |= pols[i]->walk.dead;
1674 if (pol_dead) {
1675 dst_free(&xdst->u.dst);
1676 xdst = NULL;
1677 num_pols = 0;
1678 num_xfrms = 0;
1679 oldflo = NULL;
1683 /* Resolve policies to use if we couldn't get them from
1684 * previous cache entry */
1685 if (xdst == NULL) {
1686 num_pols = 1;
1687 pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
1688 err = xfrm_expand_policies(fl, family, pols,
1689 &num_pols, &num_xfrms);
1690 if (err < 0)
1691 goto inc_error;
1692 if (num_pols == 0)
1693 return NULL;
1694 if (num_xfrms <= 0)
1695 goto make_dummy_bundle;
1698 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1699 if (IS_ERR(new_xdst)) {
1700 err = PTR_ERR(new_xdst);
1701 if (err != -EAGAIN)
1702 goto error;
1703 if (oldflo == NULL)
1704 goto make_dummy_bundle;
1705 dst_hold(&xdst->u.dst);
1706 return oldflo;
1707 } else if (new_xdst == NULL) {
1708 num_xfrms = 0;
1709 if (oldflo == NULL)
1710 goto make_dummy_bundle;
1711 xdst->num_xfrms = 0;
1712 dst_hold(&xdst->u.dst);
1713 return oldflo;
1716 /* Kill the previous bundle */
1717 if (xdst) {
1718 /* The policies were stolen for newly generated bundle */
1719 xdst->num_pols = 0;
1720 dst_free(&xdst->u.dst);
1723 /* Flow cache does not have reference, it dst_free()'s,
1724 * but we do need to return one reference for original caller */
1725 dst_hold(&new_xdst->u.dst);
1726 return &new_xdst->flo;
1728 make_dummy_bundle:
1729 /* We found policies, but there's no bundles to instantiate:
1730 * either because the policy blocks, has no transformations or
1731 * we could not build template (no xfrm_states).*/
1732 xdst = xfrm_alloc_dst(net, family);
1733 if (IS_ERR(xdst)) {
1734 xfrm_pols_put(pols, num_pols);
1735 return ERR_CAST(xdst);
1737 xdst->num_pols = num_pols;
1738 xdst->num_xfrms = num_xfrms;
1739 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1741 dst_hold(&xdst->u.dst);
1742 return &xdst->flo;
1744 inc_error:
1745 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1746 error:
1747 if (xdst != NULL)
1748 dst_free(&xdst->u.dst);
1749 else
1750 xfrm_pols_put(pols, num_pols);
1751 return ERR_PTR(err);
1754 static struct dst_entry *make_blackhole(struct net *net, u16 family,
1755 struct dst_entry *dst_orig)
1757 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1758 struct dst_entry *ret;
1760 if (!afinfo) {
1761 dst_release(dst_orig);
1762 ret = ERR_PTR(-EINVAL);
1763 } else {
1764 ret = afinfo->blackhole_route(net, dst_orig);
1766 xfrm_policy_put_afinfo(afinfo);
1768 return ret;
1771 /* Main function: finds/creates a bundle for given flow.
1773 * At the moment we eat a raw IP route. Mostly to speed up lookups
1774 * on interfaces with disabled IPsec.
1776 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
1777 const struct flowi *fl,
1778 struct sock *sk, int flags)
1780 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1781 struct flow_cache_object *flo;
1782 struct xfrm_dst *xdst;
1783 struct dst_entry *dst, *route;
1784 u16 family = dst_orig->ops->family;
1785 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1786 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
1788 restart:
1789 dst = NULL;
1790 xdst = NULL;
1791 route = NULL;
1793 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1794 num_pols = 1;
1795 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1796 err = xfrm_expand_policies(fl, family, pols,
1797 &num_pols, &num_xfrms);
1798 if (err < 0)
1799 goto dropdst;
1801 if (num_pols) {
1802 if (num_xfrms <= 0) {
1803 drop_pols = num_pols;
1804 goto no_transform;
1807 xdst = xfrm_resolve_and_create_bundle(
1808 pols, num_pols, fl,
1809 family, dst_orig);
1810 if (IS_ERR(xdst)) {
1811 xfrm_pols_put(pols, num_pols);
1812 err = PTR_ERR(xdst);
1813 goto dropdst;
1814 } else if (xdst == NULL) {
1815 num_xfrms = 0;
1816 drop_pols = num_pols;
1817 goto no_transform;
1820 dst_hold(&xdst->u.dst);
1822 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
1823 xdst->u.dst.next = xfrm_policy_sk_bundles;
1824 xfrm_policy_sk_bundles = &xdst->u.dst;
1825 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
1827 route = xdst->route;
1831 if (xdst == NULL) {
1832 /* To accelerate a bit... */
1833 if ((dst_orig->flags & DST_NOXFRM) ||
1834 !net->xfrm.policy_count[XFRM_POLICY_OUT])
1835 goto nopol;
1837 flo = flow_cache_lookup(net, fl, family, dir,
1838 xfrm_bundle_lookup, dst_orig);
1839 if (flo == NULL)
1840 goto nopol;
1841 if (IS_ERR(flo)) {
1842 err = PTR_ERR(flo);
1843 goto dropdst;
1845 xdst = container_of(flo, struct xfrm_dst, flo);
1847 num_pols = xdst->num_pols;
1848 num_xfrms = xdst->num_xfrms;
1849 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
1850 route = xdst->route;
1853 dst = &xdst->u.dst;
1854 if (route == NULL && num_xfrms > 0) {
1855 /* The only case when xfrm_bundle_lookup() returns a
1856 * bundle with null route, is when the template could
1857 * not be resolved. It means policies are there, but
1858 * bundle could not be created, since we don't yet
1859 * have the xfrm_state's. We need to wait for KM to
1860 * negotiate new SA's or bail out with error.*/
1861 if (net->xfrm.sysctl_larval_drop) {
1862 /* EREMOTE tells the caller to generate
1863 * a one-shot blackhole route. */
1864 dst_release(dst);
1865 xfrm_pols_put(pols, drop_pols);
1866 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1868 return make_blackhole(net, family, dst_orig);
1870 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
1871 DECLARE_WAITQUEUE(wait, current);
1873 add_wait_queue(&net->xfrm.km_waitq, &wait);
1874 set_current_state(TASK_INTERRUPTIBLE);
1875 schedule();
1876 set_current_state(TASK_RUNNING);
1877 remove_wait_queue(&net->xfrm.km_waitq, &wait);
1879 if (!signal_pending(current)) {
1880 dst_release(dst);
1881 goto restart;
1884 err = -ERESTART;
1885 } else
1886 err = -EAGAIN;
1888 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1889 goto error;
1892 no_transform:
1893 if (num_pols == 0)
1894 goto nopol;
1896 if ((flags & XFRM_LOOKUP_ICMP) &&
1897 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
1898 err = -ENOENT;
1899 goto error;
1902 for (i = 0; i < num_pols; i++)
1903 pols[i]->curlft.use_time = get_seconds();
1905 if (num_xfrms < 0) {
1906 /* Prohibit the flow */
1907 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1908 err = -EPERM;
1909 goto error;
1910 } else if (num_xfrms > 0) {
1911 /* Flow transformed */
1912 dst_release(dst_orig);
1913 } else {
1914 /* Flow passes untransformed */
1915 dst_release(dst);
1916 dst = dst_orig;
1919 xfrm_pols_put(pols, drop_pols);
1920 return dst;
1922 nopol:
1923 if (!(flags & XFRM_LOOKUP_ICMP)) {
1924 dst = dst_orig;
1925 goto ok;
1927 err = -ENOENT;
1928 error:
1929 dst_release(dst);
1930 dropdst:
1931 dst_release(dst_orig);
1932 xfrm_pols_put(pols, drop_pols);
1933 return ERR_PTR(err);
1935 EXPORT_SYMBOL(xfrm_lookup);
1937 static inline int
1938 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
1940 struct xfrm_state *x;
1942 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
1943 return 0;
1944 x = skb->sp->xvec[idx];
1945 if (!x->type->reject)
1946 return 0;
1947 return x->type->reject(x, skb, fl);
1950 /* When skb is transformed back to its "native" form, we have to
1951 * check policy restrictions. At the moment we make this in maximally
1952 * stupid way. Shame on me. :-) Of course, connected sockets must
1953 * have policy cached at them.
1956 static inline int
1957 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
1958 unsigned short family)
1960 if (xfrm_state_kern(x))
1961 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
1962 return x->id.proto == tmpl->id.proto &&
1963 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1964 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1965 x->props.mode == tmpl->mode &&
1966 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
1967 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1968 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1969 xfrm_state_addr_cmp(tmpl, x, family));
1973 * 0 or more than 0 is returned when validation is succeeded (either bypass
1974 * because of optional transport mode, or next index of the mathced secpath
1975 * state with the template.
1976 * -1 is returned when no matching template is found.
1977 * Otherwise "-2 - errored_index" is returned.
1979 static inline int
1980 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
1981 unsigned short family)
1983 int idx = start;
1985 if (tmpl->optional) {
1986 if (tmpl->mode == XFRM_MODE_TRANSPORT)
1987 return start;
1988 } else
1989 start = -1;
1990 for (; idx < sp->len; idx++) {
1991 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1992 return ++idx;
1993 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
1994 if (start == -1)
1995 start = -2-idx;
1996 break;
1999 return start;
2002 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2003 unsigned int family, int reverse)
2005 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2006 int err;
2008 if (unlikely(afinfo == NULL))
2009 return -EAFNOSUPPORT;
2011 afinfo->decode_session(skb, fl, reverse);
2012 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2013 xfrm_policy_put_afinfo(afinfo);
2014 return err;
2016 EXPORT_SYMBOL(__xfrm_decode_session);
2018 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2020 for (; k < sp->len; k++) {
2021 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2022 *idxp = k;
2023 return 1;
2027 return 0;
2030 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2031 unsigned short family)
2033 struct net *net = dev_net(skb->dev);
2034 struct xfrm_policy *pol;
2035 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2036 int npols = 0;
2037 int xfrm_nr;
2038 int pi;
2039 int reverse;
2040 struct flowi fl;
2041 u8 fl_dir;
2042 int xerr_idx = -1;
2044 reverse = dir & ~XFRM_POLICY_MASK;
2045 dir &= XFRM_POLICY_MASK;
2046 fl_dir = policy_to_flow_dir(dir);
2048 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2049 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2050 return 0;
2053 nf_nat_decode_session(skb, &fl, family);
2055 /* First, check used SA against their selectors. */
2056 if (skb->sp) {
2057 int i;
2059 for (i=skb->sp->len-1; i>=0; i--) {
2060 struct xfrm_state *x = skb->sp->xvec[i];
2061 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2062 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2063 return 0;
2068 pol = NULL;
2069 if (sk && sk->sk_policy[dir]) {
2070 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2071 if (IS_ERR(pol)) {
2072 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2073 return 0;
2077 if (!pol) {
2078 struct flow_cache_object *flo;
2080 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2081 xfrm_policy_lookup, NULL);
2082 if (IS_ERR_OR_NULL(flo))
2083 pol = ERR_CAST(flo);
2084 else
2085 pol = container_of(flo, struct xfrm_policy, flo);
2088 if (IS_ERR(pol)) {
2089 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2090 return 0;
2093 if (!pol) {
2094 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2095 xfrm_secpath_reject(xerr_idx, skb, &fl);
2096 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2097 return 0;
2099 return 1;
2102 pol->curlft.use_time = get_seconds();
2104 pols[0] = pol;
2105 npols ++;
2106 #ifdef CONFIG_XFRM_SUB_POLICY
2107 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2108 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2109 &fl, family,
2110 XFRM_POLICY_IN);
2111 if (pols[1]) {
2112 if (IS_ERR(pols[1])) {
2113 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2114 return 0;
2116 pols[1]->curlft.use_time = get_seconds();
2117 npols ++;
2120 #endif
2122 if (pol->action == XFRM_POLICY_ALLOW) {
2123 struct sec_path *sp;
2124 static struct sec_path dummy;
2125 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2126 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2127 struct xfrm_tmpl **tpp = tp;
2128 int ti = 0;
2129 int i, k;
2131 if ((sp = skb->sp) == NULL)
2132 sp = &dummy;
2134 for (pi = 0; pi < npols; pi++) {
2135 if (pols[pi] != pol &&
2136 pols[pi]->action != XFRM_POLICY_ALLOW) {
2137 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2138 goto reject;
2140 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2141 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2142 goto reject_error;
2144 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2145 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2147 xfrm_nr = ti;
2148 if (npols > 1) {
2149 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
2150 tpp = stp;
2153 /* For each tunnel xfrm, find the first matching tmpl.
2154 * For each tmpl before that, find corresponding xfrm.
2155 * Order is _important_. Later we will implement
2156 * some barriers, but at the moment barriers
2157 * are implied between each two transformations.
2159 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2160 k = xfrm_policy_ok(tpp[i], sp, k, family);
2161 if (k < 0) {
2162 if (k < -1)
2163 /* "-2 - errored_index" returned */
2164 xerr_idx = -(2+k);
2165 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2166 goto reject;
2170 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2171 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2172 goto reject;
2175 xfrm_pols_put(pols, npols);
2176 return 1;
2178 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2180 reject:
2181 xfrm_secpath_reject(xerr_idx, skb, &fl);
2182 reject_error:
2183 xfrm_pols_put(pols, npols);
2184 return 0;
2186 EXPORT_SYMBOL(__xfrm_policy_check);
2188 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2190 struct net *net = dev_net(skb->dev);
2191 struct flowi fl;
2192 struct dst_entry *dst;
2193 int res = 1;
2195 if (xfrm_decode_session(skb, &fl, family) < 0) {
2196 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2197 return 0;
2200 skb_dst_force(skb);
2202 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2203 if (IS_ERR(dst)) {
2204 res = 0;
2205 dst = NULL;
2207 skb_dst_set(skb, dst);
2208 return res;
2210 EXPORT_SYMBOL(__xfrm_route_forward);
2212 /* Optimize later using cookies and generation ids. */
2214 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2216 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2217 * to "-1" to force all XFRM destinations to get validated by
2218 * dst_ops->check on every use. We do this because when a
2219 * normal route referenced by an XFRM dst is obsoleted we do
2220 * not go looking around for all parent referencing XFRM dsts
2221 * so that we can invalidate them. It is just too much work.
2222 * Instead we make the checks here on every use. For example:
2224 * XFRM dst A --> IPv4 dst X
2226 * X is the "xdst->route" of A (X is also the "dst->path" of A
2227 * in this example). If X is marked obsolete, "A" will not
2228 * notice. That's what we are validating here via the
2229 * stale_bundle() check.
2231 * When a policy's bundle is pruned, we dst_free() the XFRM
2232 * dst which causes it's ->obsolete field to be set to a
2233 * positive non-zero integer. If an XFRM dst has been pruned
2234 * like this, we want to force a new route lookup.
2236 if (dst->obsolete < 0 && !stale_bundle(dst))
2237 return dst;
2239 return NULL;
2242 static int stale_bundle(struct dst_entry *dst)
2244 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2247 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2249 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2250 dst->dev = dev_net(dev)->loopback_dev;
2251 dev_hold(dst->dev);
2252 dev_put(dev);
2255 EXPORT_SYMBOL(xfrm_dst_ifdown);
2257 static void xfrm_link_failure(struct sk_buff *skb)
2259 /* Impossible. Such dst must be popped before reaches point of failure. */
2262 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2264 if (dst) {
2265 if (dst->obsolete) {
2266 dst_release(dst);
2267 dst = NULL;
2270 return dst;
2273 static void __xfrm_garbage_collect(struct net *net)
2275 struct dst_entry *head, *next;
2277 flow_cache_flush();
2279 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2280 head = xfrm_policy_sk_bundles;
2281 xfrm_policy_sk_bundles = NULL;
2282 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2284 while (head) {
2285 next = head->next;
2286 dst_free(head);
2287 head = next;
2291 static void xfrm_init_pmtu(struct dst_entry *dst)
2293 do {
2294 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2295 u32 pmtu, route_mtu_cached;
2297 pmtu = dst_mtu(dst->child);
2298 xdst->child_mtu_cached = pmtu;
2300 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2302 route_mtu_cached = dst_mtu(xdst->route);
2303 xdst->route_mtu_cached = route_mtu_cached;
2305 if (pmtu > route_mtu_cached)
2306 pmtu = route_mtu_cached;
2308 dst_metric_set(dst, RTAX_MTU, pmtu);
2309 } while ((dst = dst->next));
2312 /* Check that the bundle accepts the flow and its components are
2313 * still valid.
2316 static int xfrm_bundle_ok(struct xfrm_dst *first)
2318 struct dst_entry *dst = &first->u.dst;
2319 struct xfrm_dst *last;
2320 u32 mtu;
2322 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2323 (dst->dev && !netif_running(dst->dev)))
2324 return 0;
2326 last = NULL;
2328 do {
2329 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2331 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2332 return 0;
2333 if (xdst->xfrm_genid != dst->xfrm->genid)
2334 return 0;
2335 if (xdst->num_pols > 0 &&
2336 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2337 return 0;
2339 mtu = dst_mtu(dst->child);
2340 if (xdst->child_mtu_cached != mtu) {
2341 last = xdst;
2342 xdst->child_mtu_cached = mtu;
2345 if (!dst_check(xdst->route, xdst->route_cookie))
2346 return 0;
2347 mtu = dst_mtu(xdst->route);
2348 if (xdst->route_mtu_cached != mtu) {
2349 last = xdst;
2350 xdst->route_mtu_cached = mtu;
2353 dst = dst->child;
2354 } while (dst->xfrm);
2356 if (likely(!last))
2357 return 1;
2359 mtu = last->child_mtu_cached;
2360 for (;;) {
2361 dst = &last->u.dst;
2363 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2364 if (mtu > last->route_mtu_cached)
2365 mtu = last->route_mtu_cached;
2366 dst_metric_set(dst, RTAX_MTU, mtu);
2368 if (last == first)
2369 break;
2371 last = (struct xfrm_dst *)last->u.dst.next;
2372 last->child_mtu_cached = mtu;
2375 return 1;
2378 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2380 return dst_metric_advmss(dst->path);
2383 static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
2385 return dst_mtu(dst->path);
2388 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2390 struct net *net;
2391 int err = 0;
2392 if (unlikely(afinfo == NULL))
2393 return -EINVAL;
2394 if (unlikely(afinfo->family >= NPROTO))
2395 return -EAFNOSUPPORT;
2396 write_lock_bh(&xfrm_policy_afinfo_lock);
2397 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2398 err = -ENOBUFS;
2399 else {
2400 struct dst_ops *dst_ops = afinfo->dst_ops;
2401 if (likely(dst_ops->kmem_cachep == NULL))
2402 dst_ops->kmem_cachep = xfrm_dst_cache;
2403 if (likely(dst_ops->check == NULL))
2404 dst_ops->check = xfrm_dst_check;
2405 if (likely(dst_ops->default_advmss == NULL))
2406 dst_ops->default_advmss = xfrm_default_advmss;
2407 if (likely(dst_ops->default_mtu == NULL))
2408 dst_ops->default_mtu = xfrm_default_mtu;
2409 if (likely(dst_ops->negative_advice == NULL))
2410 dst_ops->negative_advice = xfrm_negative_advice;
2411 if (likely(dst_ops->link_failure == NULL))
2412 dst_ops->link_failure = xfrm_link_failure;
2413 if (likely(afinfo->garbage_collect == NULL))
2414 afinfo->garbage_collect = __xfrm_garbage_collect;
2415 xfrm_policy_afinfo[afinfo->family] = afinfo;
2417 write_unlock_bh(&xfrm_policy_afinfo_lock);
2419 rtnl_lock();
2420 for_each_net(net) {
2421 struct dst_ops *xfrm_dst_ops;
2423 switch (afinfo->family) {
2424 case AF_INET:
2425 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2426 break;
2427 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2428 case AF_INET6:
2429 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2430 break;
2431 #endif
2432 default:
2433 BUG();
2435 *xfrm_dst_ops = *afinfo->dst_ops;
2437 rtnl_unlock();
2439 return err;
2441 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2443 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2445 int err = 0;
2446 if (unlikely(afinfo == NULL))
2447 return -EINVAL;
2448 if (unlikely(afinfo->family >= NPROTO))
2449 return -EAFNOSUPPORT;
2450 write_lock_bh(&xfrm_policy_afinfo_lock);
2451 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2452 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2453 err = -EINVAL;
2454 else {
2455 struct dst_ops *dst_ops = afinfo->dst_ops;
2456 xfrm_policy_afinfo[afinfo->family] = NULL;
2457 dst_ops->kmem_cachep = NULL;
2458 dst_ops->check = NULL;
2459 dst_ops->negative_advice = NULL;
2460 dst_ops->link_failure = NULL;
2461 afinfo->garbage_collect = NULL;
2464 write_unlock_bh(&xfrm_policy_afinfo_lock);
2465 return err;
2467 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2469 static void __net_init xfrm_dst_ops_init(struct net *net)
2471 struct xfrm_policy_afinfo *afinfo;
2473 read_lock_bh(&xfrm_policy_afinfo_lock);
2474 afinfo = xfrm_policy_afinfo[AF_INET];
2475 if (afinfo)
2476 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2477 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2478 afinfo = xfrm_policy_afinfo[AF_INET6];
2479 if (afinfo)
2480 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2481 #endif
2482 read_unlock_bh(&xfrm_policy_afinfo_lock);
2485 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2487 struct xfrm_policy_afinfo *afinfo;
2488 if (unlikely(family >= NPROTO))
2489 return NULL;
2490 read_lock(&xfrm_policy_afinfo_lock);
2491 afinfo = xfrm_policy_afinfo[family];
2492 if (unlikely(!afinfo))
2493 read_unlock(&xfrm_policy_afinfo_lock);
2494 return afinfo;
2497 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2499 read_unlock(&xfrm_policy_afinfo_lock);
2502 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2504 struct net_device *dev = ptr;
2506 switch (event) {
2507 case NETDEV_DOWN:
2508 __xfrm_garbage_collect(dev_net(dev));
2510 return NOTIFY_DONE;
2513 static struct notifier_block xfrm_dev_notifier = {
2514 .notifier_call = xfrm_dev_event,
2517 #ifdef CONFIG_XFRM_STATISTICS
2518 static int __net_init xfrm_statistics_init(struct net *net)
2520 int rv;
2522 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2523 sizeof(struct linux_xfrm_mib),
2524 __alignof__(struct linux_xfrm_mib)) < 0)
2525 return -ENOMEM;
2526 rv = xfrm_proc_init(net);
2527 if (rv < 0)
2528 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2529 return rv;
2532 static void xfrm_statistics_fini(struct net *net)
2534 xfrm_proc_fini(net);
2535 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2537 #else
2538 static int __net_init xfrm_statistics_init(struct net *net)
2540 return 0;
2543 static void xfrm_statistics_fini(struct net *net)
2546 #endif
2548 static int __net_init xfrm_policy_init(struct net *net)
2550 unsigned int hmask, sz;
2551 int dir;
2553 if (net_eq(net, &init_net))
2554 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2555 sizeof(struct xfrm_dst),
2556 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2557 NULL);
2559 hmask = 8 - 1;
2560 sz = (hmask+1) * sizeof(struct hlist_head);
2562 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2563 if (!net->xfrm.policy_byidx)
2564 goto out_byidx;
2565 net->xfrm.policy_idx_hmask = hmask;
2567 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2568 struct xfrm_policy_hash *htab;
2570 net->xfrm.policy_count[dir] = 0;
2571 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2573 htab = &net->xfrm.policy_bydst[dir];
2574 htab->table = xfrm_hash_alloc(sz);
2575 if (!htab->table)
2576 goto out_bydst;
2577 htab->hmask = hmask;
2580 INIT_LIST_HEAD(&net->xfrm.policy_all);
2581 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2582 if (net_eq(net, &init_net))
2583 register_netdevice_notifier(&xfrm_dev_notifier);
2584 return 0;
2586 out_bydst:
2587 for (dir--; dir >= 0; dir--) {
2588 struct xfrm_policy_hash *htab;
2590 htab = &net->xfrm.policy_bydst[dir];
2591 xfrm_hash_free(htab->table, sz);
2593 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2594 out_byidx:
2595 return -ENOMEM;
2598 static void xfrm_policy_fini(struct net *net)
2600 struct xfrm_audit audit_info;
2601 unsigned int sz;
2602 int dir;
2604 flush_work(&net->xfrm.policy_hash_work);
2605 #ifdef CONFIG_XFRM_SUB_POLICY
2606 audit_info.loginuid = -1;
2607 audit_info.sessionid = -1;
2608 audit_info.secid = 0;
2609 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2610 #endif
2611 audit_info.loginuid = -1;
2612 audit_info.sessionid = -1;
2613 audit_info.secid = 0;
2614 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2616 WARN_ON(!list_empty(&net->xfrm.policy_all));
2618 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2619 struct xfrm_policy_hash *htab;
2621 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2623 htab = &net->xfrm.policy_bydst[dir];
2624 sz = (htab->hmask + 1);
2625 WARN_ON(!hlist_empty(htab->table));
2626 xfrm_hash_free(htab->table, sz);
2629 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2630 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2631 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2634 static int __net_init xfrm_net_init(struct net *net)
2636 int rv;
2638 rv = xfrm_statistics_init(net);
2639 if (rv < 0)
2640 goto out_statistics;
2641 rv = xfrm_state_init(net);
2642 if (rv < 0)
2643 goto out_state;
2644 rv = xfrm_policy_init(net);
2645 if (rv < 0)
2646 goto out_policy;
2647 xfrm_dst_ops_init(net);
2648 rv = xfrm_sysctl_init(net);
2649 if (rv < 0)
2650 goto out_sysctl;
2651 return 0;
2653 out_sysctl:
2654 xfrm_policy_fini(net);
2655 out_policy:
2656 xfrm_state_fini(net);
2657 out_state:
2658 xfrm_statistics_fini(net);
2659 out_statistics:
2660 return rv;
2663 static void __net_exit xfrm_net_exit(struct net *net)
2665 xfrm_sysctl_fini(net);
2666 xfrm_policy_fini(net);
2667 xfrm_state_fini(net);
2668 xfrm_statistics_fini(net);
2671 static struct pernet_operations __net_initdata xfrm_net_ops = {
2672 .init = xfrm_net_init,
2673 .exit = xfrm_net_exit,
2676 void __init xfrm_init(void)
2678 register_pernet_subsys(&xfrm_net_ops);
2679 xfrm_input_init();
2682 #ifdef CONFIG_AUDITSYSCALL
2683 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2684 struct audit_buffer *audit_buf)
2686 struct xfrm_sec_ctx *ctx = xp->security;
2687 struct xfrm_selector *sel = &xp->selector;
2689 if (ctx)
2690 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2691 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2693 switch(sel->family) {
2694 case AF_INET:
2695 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2696 if (sel->prefixlen_s != 32)
2697 audit_log_format(audit_buf, " src_prefixlen=%d",
2698 sel->prefixlen_s);
2699 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2700 if (sel->prefixlen_d != 32)
2701 audit_log_format(audit_buf, " dst_prefixlen=%d",
2702 sel->prefixlen_d);
2703 break;
2704 case AF_INET6:
2705 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2706 if (sel->prefixlen_s != 128)
2707 audit_log_format(audit_buf, " src_prefixlen=%d",
2708 sel->prefixlen_s);
2709 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2710 if (sel->prefixlen_d != 128)
2711 audit_log_format(audit_buf, " dst_prefixlen=%d",
2712 sel->prefixlen_d);
2713 break;
2717 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2718 uid_t auid, u32 sessionid, u32 secid)
2720 struct audit_buffer *audit_buf;
2722 audit_buf = xfrm_audit_start("SPD-add");
2723 if (audit_buf == NULL)
2724 return;
2725 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2726 audit_log_format(audit_buf, " res=%u", result);
2727 xfrm_audit_common_policyinfo(xp, audit_buf);
2728 audit_log_end(audit_buf);
2730 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2732 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
2733 uid_t auid, u32 sessionid, u32 secid)
2735 struct audit_buffer *audit_buf;
2737 audit_buf = xfrm_audit_start("SPD-delete");
2738 if (audit_buf == NULL)
2739 return;
2740 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2741 audit_log_format(audit_buf, " res=%u", result);
2742 xfrm_audit_common_policyinfo(xp, audit_buf);
2743 audit_log_end(audit_buf);
2745 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2746 #endif
2748 #ifdef CONFIG_XFRM_MIGRATE
2749 static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
2750 const struct xfrm_selector *sel_tgt)
2752 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2753 if (sel_tgt->family == sel_cmp->family &&
2754 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2755 sel_cmp->family) == 0 &&
2756 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2757 sel_cmp->family) == 0 &&
2758 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2759 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2760 return 1;
2762 } else {
2763 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2764 return 1;
2767 return 0;
2770 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
2771 u8 dir, u8 type)
2773 struct xfrm_policy *pol, *ret = NULL;
2774 struct hlist_node *entry;
2775 struct hlist_head *chain;
2776 u32 priority = ~0U;
2778 read_lock_bh(&xfrm_policy_lock);
2779 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
2780 hlist_for_each_entry(pol, entry, chain, bydst) {
2781 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2782 pol->type == type) {
2783 ret = pol;
2784 priority = ret->priority;
2785 break;
2788 chain = &init_net.xfrm.policy_inexact[dir];
2789 hlist_for_each_entry(pol, entry, chain, bydst) {
2790 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2791 pol->type == type &&
2792 pol->priority < priority) {
2793 ret = pol;
2794 break;
2798 if (ret)
2799 xfrm_pol_hold(ret);
2801 read_unlock_bh(&xfrm_policy_lock);
2803 return ret;
2806 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
2808 int match = 0;
2810 if (t->mode == m->mode && t->id.proto == m->proto &&
2811 (m->reqid == 0 || t->reqid == m->reqid)) {
2812 switch (t->mode) {
2813 case XFRM_MODE_TUNNEL:
2814 case XFRM_MODE_BEET:
2815 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2816 m->old_family) == 0 &&
2817 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2818 m->old_family) == 0) {
2819 match = 1;
2821 break;
2822 case XFRM_MODE_TRANSPORT:
2823 /* in case of transport mode, template does not store
2824 any IP addresses, hence we just compare mode and
2825 protocol */
2826 match = 1;
2827 break;
2828 default:
2829 break;
2832 return match;
2835 /* update endpoint address(es) of template(s) */
2836 static int xfrm_policy_migrate(struct xfrm_policy *pol,
2837 struct xfrm_migrate *m, int num_migrate)
2839 struct xfrm_migrate *mp;
2840 int i, j, n = 0;
2842 write_lock_bh(&pol->lock);
2843 if (unlikely(pol->walk.dead)) {
2844 /* target policy has been deleted */
2845 write_unlock_bh(&pol->lock);
2846 return -ENOENT;
2849 for (i = 0; i < pol->xfrm_nr; i++) {
2850 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2851 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2852 continue;
2853 n++;
2854 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
2855 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
2856 continue;
2857 /* update endpoints */
2858 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2859 sizeof(pol->xfrm_vec[i].id.daddr));
2860 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2861 sizeof(pol->xfrm_vec[i].saddr));
2862 pol->xfrm_vec[i].encap_family = mp->new_family;
2863 /* flush bundles */
2864 atomic_inc(&pol->genid);
2868 write_unlock_bh(&pol->lock);
2870 if (!n)
2871 return -ENODATA;
2873 return 0;
2876 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
2878 int i, j;
2880 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2881 return -EINVAL;
2883 for (i = 0; i < num_migrate; i++) {
2884 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2885 m[i].old_family) == 0) &&
2886 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2887 m[i].old_family) == 0))
2888 return -EINVAL;
2889 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2890 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2891 return -EINVAL;
2893 /* check if there is any duplicated entry */
2894 for (j = i + 1; j < num_migrate; j++) {
2895 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2896 sizeof(m[i].old_daddr)) &&
2897 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2898 sizeof(m[i].old_saddr)) &&
2899 m[i].proto == m[j].proto &&
2900 m[i].mode == m[j].mode &&
2901 m[i].reqid == m[j].reqid &&
2902 m[i].old_family == m[j].old_family)
2903 return -EINVAL;
2907 return 0;
2910 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2911 struct xfrm_migrate *m, int num_migrate,
2912 struct xfrm_kmaddress *k)
2914 int i, err, nx_cur = 0, nx_new = 0;
2915 struct xfrm_policy *pol = NULL;
2916 struct xfrm_state *x, *xc;
2917 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2918 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2919 struct xfrm_migrate *mp;
2921 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2922 goto out;
2924 /* Stage 1 - find policy */
2925 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2926 err = -ENOENT;
2927 goto out;
2930 /* Stage 2 - find and update state(s) */
2931 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2932 if ((x = xfrm_migrate_state_find(mp))) {
2933 x_cur[nx_cur] = x;
2934 nx_cur++;
2935 if ((xc = xfrm_state_migrate(x, mp))) {
2936 x_new[nx_new] = xc;
2937 nx_new++;
2938 } else {
2939 err = -ENODATA;
2940 goto restore_state;
2945 /* Stage 3 - update policy */
2946 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2947 goto restore_state;
2949 /* Stage 4 - delete old state(s) */
2950 if (nx_cur) {
2951 xfrm_states_put(x_cur, nx_cur);
2952 xfrm_states_delete(x_cur, nx_cur);
2955 /* Stage 5 - announce */
2956 km_migrate(sel, dir, type, m, num_migrate, k);
2958 xfrm_pol_put(pol);
2960 return 0;
2961 out:
2962 return err;
2964 restore_state:
2965 if (pol)
2966 xfrm_pol_put(pol);
2967 if (nx_cur)
2968 xfrm_states_put(x_cur, nx_cur);
2969 if (nx_new)
2970 xfrm_states_delete(x_new, nx_new);
2972 return err;
2974 EXPORT_SYMBOL(xfrm_migrate);
2975 #endif