6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
31 #ifdef CONFIG_XFRM_STATISTICS
35 #include "xfrm_hash.h"
37 DEFINE_MUTEX(xfrm_cfg_mutex
);
38 EXPORT_SYMBOL(xfrm_cfg_mutex
);
40 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock
);
41 static struct dst_entry
*xfrm_policy_sk_bundles
;
42 static DEFINE_RWLOCK(xfrm_policy_lock
);
44 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock
);
45 static struct xfrm_policy_afinfo
*xfrm_policy_afinfo
[NPROTO
];
47 static struct kmem_cache
*xfrm_dst_cache __read_mostly
;
49 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
);
50 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
);
51 static void xfrm_init_pmtu(struct dst_entry
*dst
);
52 static int stale_bundle(struct dst_entry
*dst
);
54 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
58 __xfrm4_selector_match(struct xfrm_selector
*sel
, struct flowi
*fl
)
60 return addr_match(&fl
->fl4_dst
, &sel
->daddr
, sel
->prefixlen_d
) &&
61 addr_match(&fl
->fl4_src
, &sel
->saddr
, sel
->prefixlen_s
) &&
62 !((xfrm_flowi_dport(fl
) ^ sel
->dport
) & sel
->dport_mask
) &&
63 !((xfrm_flowi_sport(fl
) ^ sel
->sport
) & sel
->sport_mask
) &&
64 (fl
->proto
== sel
->proto
|| !sel
->proto
) &&
65 (fl
->oif
== sel
->ifindex
|| !sel
->ifindex
);
69 __xfrm6_selector_match(struct xfrm_selector
*sel
, struct flowi
*fl
)
71 return addr_match(&fl
->fl6_dst
, &sel
->daddr
, sel
->prefixlen_d
) &&
72 addr_match(&fl
->fl6_src
, &sel
->saddr
, sel
->prefixlen_s
) &&
73 !((xfrm_flowi_dport(fl
) ^ sel
->dport
) & sel
->dport_mask
) &&
74 !((xfrm_flowi_sport(fl
) ^ sel
->sport
) & sel
->sport_mask
) &&
75 (fl
->proto
== sel
->proto
|| !sel
->proto
) &&
76 (fl
->oif
== sel
->ifindex
|| !sel
->ifindex
);
79 int xfrm_selector_match(struct xfrm_selector
*sel
, struct flowi
*fl
,
80 unsigned short family
)
84 return __xfrm4_selector_match(sel
, fl
);
86 return __xfrm6_selector_match(sel
, fl
);
91 static inline struct dst_entry
*__xfrm_dst_lookup(struct net
*net
, int tos
,
92 xfrm_address_t
*saddr
,
93 xfrm_address_t
*daddr
,
96 struct xfrm_policy_afinfo
*afinfo
;
97 struct dst_entry
*dst
;
99 afinfo
= xfrm_policy_get_afinfo(family
);
100 if (unlikely(afinfo
== NULL
))
101 return ERR_PTR(-EAFNOSUPPORT
);
103 dst
= afinfo
->dst_lookup(net
, tos
, saddr
, daddr
);
105 xfrm_policy_put_afinfo(afinfo
);
110 static inline struct dst_entry
*xfrm_dst_lookup(struct xfrm_state
*x
, int tos
,
111 xfrm_address_t
*prev_saddr
,
112 xfrm_address_t
*prev_daddr
,
115 struct net
*net
= xs_net(x
);
116 xfrm_address_t
*saddr
= &x
->props
.saddr
;
117 xfrm_address_t
*daddr
= &x
->id
.daddr
;
118 struct dst_entry
*dst
;
120 if (x
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
) {
124 if (x
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
) {
129 dst
= __xfrm_dst_lookup(net
, tos
, saddr
, daddr
, family
);
132 if (prev_saddr
!= saddr
)
133 memcpy(prev_saddr
, saddr
, sizeof(*prev_saddr
));
134 if (prev_daddr
!= daddr
)
135 memcpy(prev_daddr
, daddr
, sizeof(*prev_daddr
));
141 static inline unsigned long make_jiffies(long secs
)
143 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
144 return MAX_SCHEDULE_TIMEOUT
-1;
149 static void xfrm_policy_timer(unsigned long data
)
151 struct xfrm_policy
*xp
= (struct xfrm_policy
*)data
;
152 unsigned long now
= get_seconds();
153 long next
= LONG_MAX
;
157 read_lock(&xp
->lock
);
159 if (unlikely(xp
->walk
.dead
))
162 dir
= xfrm_policy_id2dir(xp
->index
);
164 if (xp
->lft
.hard_add_expires_seconds
) {
165 long tmo
= xp
->lft
.hard_add_expires_seconds
+
166 xp
->curlft
.add_time
- now
;
172 if (xp
->lft
.hard_use_expires_seconds
) {
173 long tmo
= xp
->lft
.hard_use_expires_seconds
+
174 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
180 if (xp
->lft
.soft_add_expires_seconds
) {
181 long tmo
= xp
->lft
.soft_add_expires_seconds
+
182 xp
->curlft
.add_time
- now
;
185 tmo
= XFRM_KM_TIMEOUT
;
190 if (xp
->lft
.soft_use_expires_seconds
) {
191 long tmo
= xp
->lft
.soft_use_expires_seconds
+
192 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
195 tmo
= XFRM_KM_TIMEOUT
;
202 km_policy_expired(xp
, dir
, 0, 0);
203 if (next
!= LONG_MAX
&&
204 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
208 read_unlock(&xp
->lock
);
213 read_unlock(&xp
->lock
);
214 if (!xfrm_policy_delete(xp
, dir
))
215 km_policy_expired(xp
, dir
, 1, 0);
219 static struct flow_cache_object
*xfrm_policy_flo_get(struct flow_cache_object
*flo
)
221 struct xfrm_policy
*pol
= container_of(flo
, struct xfrm_policy
, flo
);
223 if (unlikely(pol
->walk
.dead
))
231 static int xfrm_policy_flo_check(struct flow_cache_object
*flo
)
233 struct xfrm_policy
*pol
= container_of(flo
, struct xfrm_policy
, flo
);
235 return !pol
->walk
.dead
;
238 static void xfrm_policy_flo_delete(struct flow_cache_object
*flo
)
240 xfrm_pol_put(container_of(flo
, struct xfrm_policy
, flo
));
243 static const struct flow_cache_ops xfrm_policy_fc_ops
= {
244 .get
= xfrm_policy_flo_get
,
245 .check
= xfrm_policy_flo_check
,
246 .delete = xfrm_policy_flo_delete
,
249 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
253 struct xfrm_policy
*xfrm_policy_alloc(struct net
*net
, gfp_t gfp
)
255 struct xfrm_policy
*policy
;
257 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
260 write_pnet(&policy
->xp_net
, net
);
261 INIT_LIST_HEAD(&policy
->walk
.all
);
262 INIT_HLIST_NODE(&policy
->bydst
);
263 INIT_HLIST_NODE(&policy
->byidx
);
264 rwlock_init(&policy
->lock
);
265 atomic_set(&policy
->refcnt
, 1);
266 setup_timer(&policy
->timer
, xfrm_policy_timer
,
267 (unsigned long)policy
);
268 policy
->flo
.ops
= &xfrm_policy_fc_ops
;
272 EXPORT_SYMBOL(xfrm_policy_alloc
);
274 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
276 void xfrm_policy_destroy(struct xfrm_policy
*policy
)
278 BUG_ON(!policy
->walk
.dead
);
280 if (del_timer(&policy
->timer
))
283 security_xfrm_policy_free(policy
->security
);
286 EXPORT_SYMBOL(xfrm_policy_destroy
);
288 /* Rule must be locked. Release descentant resources, announce
289 * entry dead. The rule must be unlinked from lists to the moment.
292 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
294 policy
->walk
.dead
= 1;
296 atomic_inc(&policy
->genid
);
298 if (del_timer(&policy
->timer
))
299 xfrm_pol_put(policy
);
301 xfrm_pol_put(policy
);
304 static unsigned int xfrm_policy_hashmax __read_mostly
= 1 * 1024 * 1024;
306 static inline unsigned int idx_hash(struct net
*net
, u32 index
)
308 return __idx_hash(index
, net
->xfrm
.policy_idx_hmask
);
311 static struct hlist_head
*policy_hash_bysel(struct net
*net
, struct xfrm_selector
*sel
, unsigned short family
, int dir
)
313 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
314 unsigned int hash
= __sel_hash(sel
, family
, hmask
);
316 return (hash
== hmask
+ 1 ?
317 &net
->xfrm
.policy_inexact
[dir
] :
318 net
->xfrm
.policy_bydst
[dir
].table
+ hash
);
321 static struct hlist_head
*policy_hash_direct(struct net
*net
, xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, unsigned short family
, int dir
)
323 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
324 unsigned int hash
= __addr_hash(daddr
, saddr
, family
, hmask
);
326 return net
->xfrm
.policy_bydst
[dir
].table
+ hash
;
329 static void xfrm_dst_hash_transfer(struct hlist_head
*list
,
330 struct hlist_head
*ndsttable
,
331 unsigned int nhashmask
)
333 struct hlist_node
*entry
, *tmp
, *entry0
= NULL
;
334 struct xfrm_policy
*pol
;
338 hlist_for_each_entry_safe(pol
, entry
, tmp
, list
, bydst
) {
341 h
= __addr_hash(&pol
->selector
.daddr
, &pol
->selector
.saddr
,
342 pol
->family
, nhashmask
);
345 hlist_add_head(&pol
->bydst
, ndsttable
+h
);
351 hlist_add_after(entry0
, &pol
->bydst
);
355 if (!hlist_empty(list
)) {
361 static void xfrm_idx_hash_transfer(struct hlist_head
*list
,
362 struct hlist_head
*nidxtable
,
363 unsigned int nhashmask
)
365 struct hlist_node
*entry
, *tmp
;
366 struct xfrm_policy
*pol
;
368 hlist_for_each_entry_safe(pol
, entry
, tmp
, list
, byidx
) {
371 h
= __idx_hash(pol
->index
, nhashmask
);
372 hlist_add_head(&pol
->byidx
, nidxtable
+h
);
376 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask
)
378 return ((old_hmask
+ 1) << 1) - 1;
381 static void xfrm_bydst_resize(struct net
*net
, int dir
)
383 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
384 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
385 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
386 struct hlist_head
*odst
= net
->xfrm
.policy_bydst
[dir
].table
;
387 struct hlist_head
*ndst
= xfrm_hash_alloc(nsize
);
393 write_lock_bh(&xfrm_policy_lock
);
395 for (i
= hmask
; i
>= 0; i
--)
396 xfrm_dst_hash_transfer(odst
+ i
, ndst
, nhashmask
);
398 net
->xfrm
.policy_bydst
[dir
].table
= ndst
;
399 net
->xfrm
.policy_bydst
[dir
].hmask
= nhashmask
;
401 write_unlock_bh(&xfrm_policy_lock
);
403 xfrm_hash_free(odst
, (hmask
+ 1) * sizeof(struct hlist_head
));
406 static void xfrm_byidx_resize(struct net
*net
, int total
)
408 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
409 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
410 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
411 struct hlist_head
*oidx
= net
->xfrm
.policy_byidx
;
412 struct hlist_head
*nidx
= xfrm_hash_alloc(nsize
);
418 write_lock_bh(&xfrm_policy_lock
);
420 for (i
= hmask
; i
>= 0; i
--)
421 xfrm_idx_hash_transfer(oidx
+ i
, nidx
, nhashmask
);
423 net
->xfrm
.policy_byidx
= nidx
;
424 net
->xfrm
.policy_idx_hmask
= nhashmask
;
426 write_unlock_bh(&xfrm_policy_lock
);
428 xfrm_hash_free(oidx
, (hmask
+ 1) * sizeof(struct hlist_head
));
431 static inline int xfrm_bydst_should_resize(struct net
*net
, int dir
, int *total
)
433 unsigned int cnt
= net
->xfrm
.policy_count
[dir
];
434 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
439 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
446 static inline int xfrm_byidx_should_resize(struct net
*net
, int total
)
448 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
450 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
457 void xfrm_spd_getinfo(struct net
*net
, struct xfrmk_spdinfo
*si
)
459 read_lock_bh(&xfrm_policy_lock
);
460 si
->incnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
];
461 si
->outcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
];
462 si
->fwdcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
];
463 si
->inscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
+XFRM_POLICY_MAX
];
464 si
->outscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
+XFRM_POLICY_MAX
];
465 si
->fwdscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
+XFRM_POLICY_MAX
];
466 si
->spdhcnt
= net
->xfrm
.policy_idx_hmask
;
467 si
->spdhmcnt
= xfrm_policy_hashmax
;
468 read_unlock_bh(&xfrm_policy_lock
);
470 EXPORT_SYMBOL(xfrm_spd_getinfo
);
472 static DEFINE_MUTEX(hash_resize_mutex
);
473 static void xfrm_hash_resize(struct work_struct
*work
)
475 struct net
*net
= container_of(work
, struct net
, xfrm
.policy_hash_work
);
478 mutex_lock(&hash_resize_mutex
);
481 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
482 if (xfrm_bydst_should_resize(net
, dir
, &total
))
483 xfrm_bydst_resize(net
, dir
);
485 if (xfrm_byidx_should_resize(net
, total
))
486 xfrm_byidx_resize(net
, total
);
488 mutex_unlock(&hash_resize_mutex
);
491 /* Generate new index... KAME seems to generate them ordered by cost
492 * of an absolute inpredictability of ordering of rules. This will not pass. */
493 static u32
xfrm_gen_index(struct net
*net
, int dir
)
495 static u32 idx_generator
;
498 struct hlist_node
*entry
;
499 struct hlist_head
*list
;
500 struct xfrm_policy
*p
;
504 idx
= (idx_generator
| dir
);
508 list
= net
->xfrm
.policy_byidx
+ idx_hash(net
, idx
);
510 hlist_for_each_entry(p
, entry
, list
, byidx
) {
511 if (p
->index
== idx
) {
521 static inline int selector_cmp(struct xfrm_selector
*s1
, struct xfrm_selector
*s2
)
523 u32
*p1
= (u32
*) s1
;
524 u32
*p2
= (u32
*) s2
;
525 int len
= sizeof(struct xfrm_selector
) / sizeof(u32
);
528 for (i
= 0; i
< len
; i
++) {
536 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
538 struct net
*net
= xp_net(policy
);
539 struct xfrm_policy
*pol
;
540 struct xfrm_policy
*delpol
;
541 struct hlist_head
*chain
;
542 struct hlist_node
*entry
, *newpos
;
543 u32 mark
= policy
->mark
.v
& policy
->mark
.m
;
545 write_lock_bh(&xfrm_policy_lock
);
546 chain
= policy_hash_bysel(net
, &policy
->selector
, policy
->family
, dir
);
549 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
550 if (pol
->type
== policy
->type
&&
551 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
552 (mark
& pol
->mark
.m
) == pol
->mark
.v
&&
553 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
556 write_unlock_bh(&xfrm_policy_lock
);
560 if (policy
->priority
> pol
->priority
)
562 } else if (policy
->priority
>= pol
->priority
) {
563 newpos
= &pol
->bydst
;
570 hlist_add_after(newpos
, &policy
->bydst
);
572 hlist_add_head(&policy
->bydst
, chain
);
573 xfrm_pol_hold(policy
);
574 net
->xfrm
.policy_count
[dir
]++;
575 atomic_inc(&flow_cache_genid
);
577 __xfrm_policy_unlink(delpol
, dir
);
578 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(net
, dir
);
579 hlist_add_head(&policy
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, policy
->index
));
580 policy
->curlft
.add_time
= get_seconds();
581 policy
->curlft
.use_time
= 0;
582 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
583 xfrm_pol_hold(policy
);
584 list_add(&policy
->walk
.all
, &net
->xfrm
.policy_all
);
585 write_unlock_bh(&xfrm_policy_lock
);
588 xfrm_policy_kill(delpol
);
589 else if (xfrm_bydst_should_resize(net
, dir
, NULL
))
590 schedule_work(&net
->xfrm
.policy_hash_work
);
594 EXPORT_SYMBOL(xfrm_policy_insert
);
596 struct xfrm_policy
*xfrm_policy_bysel_ctx(struct net
*net
, u32 mark
, u8 type
,
597 int dir
, struct xfrm_selector
*sel
,
598 struct xfrm_sec_ctx
*ctx
, int delete,
601 struct xfrm_policy
*pol
, *ret
;
602 struct hlist_head
*chain
;
603 struct hlist_node
*entry
;
606 write_lock_bh(&xfrm_policy_lock
);
607 chain
= policy_hash_bysel(net
, sel
, sel
->family
, dir
);
609 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
610 if (pol
->type
== type
&&
611 (mark
& pol
->mark
.m
) == pol
->mark
.v
&&
612 !selector_cmp(sel
, &pol
->selector
) &&
613 xfrm_sec_ctx_match(ctx
, pol
->security
)) {
616 *err
= security_xfrm_policy_delete(
619 write_unlock_bh(&xfrm_policy_lock
);
622 __xfrm_policy_unlink(pol
, dir
);
628 write_unlock_bh(&xfrm_policy_lock
);
631 xfrm_policy_kill(ret
);
634 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
636 struct xfrm_policy
*xfrm_policy_byid(struct net
*net
, u32 mark
, u8 type
,
637 int dir
, u32 id
, int delete, int *err
)
639 struct xfrm_policy
*pol
, *ret
;
640 struct hlist_head
*chain
;
641 struct hlist_node
*entry
;
644 if (xfrm_policy_id2dir(id
) != dir
)
648 write_lock_bh(&xfrm_policy_lock
);
649 chain
= net
->xfrm
.policy_byidx
+ idx_hash(net
, id
);
651 hlist_for_each_entry(pol
, entry
, chain
, byidx
) {
652 if (pol
->type
== type
&& pol
->index
== id
&&
653 (mark
& pol
->mark
.m
) == pol
->mark
.v
) {
656 *err
= security_xfrm_policy_delete(
659 write_unlock_bh(&xfrm_policy_lock
);
662 __xfrm_policy_unlink(pol
, dir
);
668 write_unlock_bh(&xfrm_policy_lock
);
671 xfrm_policy_kill(ret
);
674 EXPORT_SYMBOL(xfrm_policy_byid
);
676 #ifdef CONFIG_SECURITY_NETWORK_XFRM
678 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
682 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
683 struct xfrm_policy
*pol
;
684 struct hlist_node
*entry
;
687 hlist_for_each_entry(pol
, entry
,
688 &net
->xfrm
.policy_inexact
[dir
], bydst
) {
689 if (pol
->type
!= type
)
691 err
= security_xfrm_policy_delete(pol
->security
);
693 xfrm_audit_policy_delete(pol
, 0,
694 audit_info
->loginuid
,
695 audit_info
->sessionid
,
700 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
701 hlist_for_each_entry(pol
, entry
,
702 net
->xfrm
.policy_bydst
[dir
].table
+ i
,
704 if (pol
->type
!= type
)
706 err
= security_xfrm_policy_delete(
709 xfrm_audit_policy_delete(pol
, 0,
710 audit_info
->loginuid
,
711 audit_info
->sessionid
,
722 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
728 int xfrm_policy_flush(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
730 int dir
, err
= 0, cnt
= 0;
732 write_lock_bh(&xfrm_policy_lock
);
734 err
= xfrm_policy_flush_secctx_check(net
, type
, audit_info
);
738 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
739 struct xfrm_policy
*pol
;
740 struct hlist_node
*entry
;
744 hlist_for_each_entry(pol
, entry
,
745 &net
->xfrm
.policy_inexact
[dir
], bydst
) {
746 if (pol
->type
!= type
)
748 __xfrm_policy_unlink(pol
, dir
);
749 write_unlock_bh(&xfrm_policy_lock
);
752 xfrm_audit_policy_delete(pol
, 1, audit_info
->loginuid
,
753 audit_info
->sessionid
,
756 xfrm_policy_kill(pol
);
758 write_lock_bh(&xfrm_policy_lock
);
762 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
764 hlist_for_each_entry(pol
, entry
,
765 net
->xfrm
.policy_bydst
[dir
].table
+ i
,
767 if (pol
->type
!= type
)
769 __xfrm_policy_unlink(pol
, dir
);
770 write_unlock_bh(&xfrm_policy_lock
);
773 xfrm_audit_policy_delete(pol
, 1,
774 audit_info
->loginuid
,
775 audit_info
->sessionid
,
777 xfrm_policy_kill(pol
);
779 write_lock_bh(&xfrm_policy_lock
);
788 write_unlock_bh(&xfrm_policy_lock
);
791 EXPORT_SYMBOL(xfrm_policy_flush
);
793 int xfrm_policy_walk(struct net
*net
, struct xfrm_policy_walk
*walk
,
794 int (*func
)(struct xfrm_policy
*, int, int, void*),
797 struct xfrm_policy
*pol
;
798 struct xfrm_policy_walk_entry
*x
;
801 if (walk
->type
>= XFRM_POLICY_TYPE_MAX
&&
802 walk
->type
!= XFRM_POLICY_TYPE_ANY
)
805 if (list_empty(&walk
->walk
.all
) && walk
->seq
!= 0)
808 write_lock_bh(&xfrm_policy_lock
);
809 if (list_empty(&walk
->walk
.all
))
810 x
= list_first_entry(&net
->xfrm
.policy_all
, struct xfrm_policy_walk_entry
, all
);
812 x
= list_entry(&walk
->walk
.all
, struct xfrm_policy_walk_entry
, all
);
813 list_for_each_entry_from(x
, &net
->xfrm
.policy_all
, all
) {
816 pol
= container_of(x
, struct xfrm_policy
, walk
);
817 if (walk
->type
!= XFRM_POLICY_TYPE_ANY
&&
818 walk
->type
!= pol
->type
)
820 error
= func(pol
, xfrm_policy_id2dir(pol
->index
),
823 list_move_tail(&walk
->walk
.all
, &x
->all
);
828 if (walk
->seq
== 0) {
832 list_del_init(&walk
->walk
.all
);
834 write_unlock_bh(&xfrm_policy_lock
);
837 EXPORT_SYMBOL(xfrm_policy_walk
);
839 void xfrm_policy_walk_init(struct xfrm_policy_walk
*walk
, u8 type
)
841 INIT_LIST_HEAD(&walk
->walk
.all
);
846 EXPORT_SYMBOL(xfrm_policy_walk_init
);
848 void xfrm_policy_walk_done(struct xfrm_policy_walk
*walk
)
850 if (list_empty(&walk
->walk
.all
))
853 write_lock_bh(&xfrm_policy_lock
);
854 list_del(&walk
->walk
.all
);
855 write_unlock_bh(&xfrm_policy_lock
);
857 EXPORT_SYMBOL(xfrm_policy_walk_done
);
860 * Find policy to apply to this flow.
862 * Returns 0 if policy found, else an -errno.
864 static int xfrm_policy_match(struct xfrm_policy
*pol
, struct flowi
*fl
,
865 u8 type
, u16 family
, int dir
)
867 struct xfrm_selector
*sel
= &pol
->selector
;
868 int match
, ret
= -ESRCH
;
870 if (pol
->family
!= family
||
871 (fl
->mark
& pol
->mark
.m
) != pol
->mark
.v
||
875 match
= xfrm_selector_match(sel
, fl
, family
);
877 ret
= security_xfrm_policy_lookup(pol
->security
, fl
->secid
,
883 static struct xfrm_policy
*xfrm_policy_lookup_bytype(struct net
*net
, u8 type
,
888 struct xfrm_policy
*pol
, *ret
;
889 xfrm_address_t
*daddr
, *saddr
;
890 struct hlist_node
*entry
;
891 struct hlist_head
*chain
;
894 daddr
= xfrm_flowi_daddr(fl
, family
);
895 saddr
= xfrm_flowi_saddr(fl
, family
);
896 if (unlikely(!daddr
|| !saddr
))
899 read_lock_bh(&xfrm_policy_lock
);
900 chain
= policy_hash_direct(net
, daddr
, saddr
, family
, dir
);
902 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
903 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
);
913 priority
= ret
->priority
;
917 chain
= &net
->xfrm
.policy_inexact
[dir
];
918 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
919 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
);
927 } else if (pol
->priority
< priority
) {
935 read_unlock_bh(&xfrm_policy_lock
);
940 static struct xfrm_policy
*
941 __xfrm_policy_lookup(struct net
*net
, struct flowi
*fl
, u16 family
, u8 dir
)
943 #ifdef CONFIG_XFRM_SUB_POLICY
944 struct xfrm_policy
*pol
;
946 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_SUB
, fl
, family
, dir
);
950 return xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
, fl
, family
, dir
);
953 static struct flow_cache_object
*
954 xfrm_policy_lookup(struct net
*net
, struct flowi
*fl
, u16 family
,
955 u8 dir
, struct flow_cache_object
*old_obj
, void *ctx
)
957 struct xfrm_policy
*pol
;
960 xfrm_pol_put(container_of(old_obj
, struct xfrm_policy
, flo
));
962 pol
= __xfrm_policy_lookup(net
, fl
, family
, dir
);
963 if (IS_ERR_OR_NULL(pol
))
964 return ERR_CAST(pol
);
966 /* Resolver returns two references:
967 * one for cache and one for caller of flow_cache_lookup() */
973 static inline int policy_to_flow_dir(int dir
)
975 if (XFRM_POLICY_IN
== FLOW_DIR_IN
&&
976 XFRM_POLICY_OUT
== FLOW_DIR_OUT
&&
977 XFRM_POLICY_FWD
== FLOW_DIR_FWD
)
983 case XFRM_POLICY_OUT
:
985 case XFRM_POLICY_FWD
:
990 static struct xfrm_policy
*xfrm_sk_policy_lookup(struct sock
*sk
, int dir
, struct flowi
*fl
)
992 struct xfrm_policy
*pol
;
994 read_lock_bh(&xfrm_policy_lock
);
995 if ((pol
= sk
->sk_policy
[dir
]) != NULL
) {
996 int match
= xfrm_selector_match(&pol
->selector
, fl
,
1001 if ((sk
->sk_mark
& pol
->mark
.m
) != pol
->mark
.v
) {
1005 err
= security_xfrm_policy_lookup(pol
->security
,
1007 policy_to_flow_dir(dir
));
1010 else if (err
== -ESRCH
)
1018 read_unlock_bh(&xfrm_policy_lock
);
1022 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
1024 struct net
*net
= xp_net(pol
);
1025 struct hlist_head
*chain
= policy_hash_bysel(net
, &pol
->selector
,
1028 list_add(&pol
->walk
.all
, &net
->xfrm
.policy_all
);
1029 hlist_add_head(&pol
->bydst
, chain
);
1030 hlist_add_head(&pol
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, pol
->index
));
1031 net
->xfrm
.policy_count
[dir
]++;
1034 if (xfrm_bydst_should_resize(net
, dir
, NULL
))
1035 schedule_work(&net
->xfrm
.policy_hash_work
);
1038 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
1041 struct net
*net
= xp_net(pol
);
1043 if (hlist_unhashed(&pol
->bydst
))
1046 hlist_del(&pol
->bydst
);
1047 hlist_del(&pol
->byidx
);
1048 list_del(&pol
->walk
.all
);
1049 net
->xfrm
.policy_count
[dir
]--;
1054 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
1056 write_lock_bh(&xfrm_policy_lock
);
1057 pol
= __xfrm_policy_unlink(pol
, dir
);
1058 write_unlock_bh(&xfrm_policy_lock
);
1060 xfrm_policy_kill(pol
);
1065 EXPORT_SYMBOL(xfrm_policy_delete
);
1067 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
1069 struct net
*net
= xp_net(pol
);
1070 struct xfrm_policy
*old_pol
;
1072 #ifdef CONFIG_XFRM_SUB_POLICY
1073 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
1077 write_lock_bh(&xfrm_policy_lock
);
1078 old_pol
= sk
->sk_policy
[dir
];
1079 sk
->sk_policy
[dir
] = pol
;
1081 pol
->curlft
.add_time
= get_seconds();
1082 pol
->index
= xfrm_gen_index(net
, XFRM_POLICY_MAX
+dir
);
1083 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+dir
);
1086 /* Unlinking succeeds always. This is the only function
1087 * allowed to delete or replace socket policy.
1089 __xfrm_policy_unlink(old_pol
, XFRM_POLICY_MAX
+dir
);
1090 write_unlock_bh(&xfrm_policy_lock
);
1093 xfrm_policy_kill(old_pol
);
1098 static struct xfrm_policy
*clone_policy(struct xfrm_policy
*old
, int dir
)
1100 struct xfrm_policy
*newp
= xfrm_policy_alloc(xp_net(old
), GFP_ATOMIC
);
1103 newp
->selector
= old
->selector
;
1104 if (security_xfrm_policy_clone(old
->security
,
1107 return NULL
; /* ENOMEM */
1109 newp
->lft
= old
->lft
;
1110 newp
->curlft
= old
->curlft
;
1111 newp
->mark
= old
->mark
;
1112 newp
->action
= old
->action
;
1113 newp
->flags
= old
->flags
;
1114 newp
->xfrm_nr
= old
->xfrm_nr
;
1115 newp
->index
= old
->index
;
1116 newp
->type
= old
->type
;
1117 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
1118 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
1119 write_lock_bh(&xfrm_policy_lock
);
1120 __xfrm_policy_link(newp
, XFRM_POLICY_MAX
+dir
);
1121 write_unlock_bh(&xfrm_policy_lock
);
1127 int __xfrm_sk_clone_policy(struct sock
*sk
)
1129 struct xfrm_policy
*p0
= sk
->sk_policy
[0],
1130 *p1
= sk
->sk_policy
[1];
1132 sk
->sk_policy
[0] = sk
->sk_policy
[1] = NULL
;
1133 if (p0
&& (sk
->sk_policy
[0] = clone_policy(p0
, 0)) == NULL
)
1135 if (p1
&& (sk
->sk_policy
[1] = clone_policy(p1
, 1)) == NULL
)
1141 xfrm_get_saddr(struct net
*net
, xfrm_address_t
*local
, xfrm_address_t
*remote
,
1142 unsigned short family
)
1145 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1147 if (unlikely(afinfo
== NULL
))
1149 err
= afinfo
->get_saddr(net
, local
, remote
);
1150 xfrm_policy_put_afinfo(afinfo
);
1154 /* Resolve list of templates for the flow, given policy. */
1157 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, struct flowi
*fl
,
1158 struct xfrm_state
**xfrm
,
1159 unsigned short family
)
1161 struct net
*net
= xp_net(policy
);
1164 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
1165 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
1168 for (nx
=0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
1169 struct xfrm_state
*x
;
1170 xfrm_address_t
*remote
= daddr
;
1171 xfrm_address_t
*local
= saddr
;
1172 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
1174 if (tmpl
->mode
== XFRM_MODE_TUNNEL
||
1175 tmpl
->mode
== XFRM_MODE_BEET
) {
1176 remote
= &tmpl
->id
.daddr
;
1177 local
= &tmpl
->saddr
;
1178 if (xfrm_addr_any(local
, tmpl
->encap_family
)) {
1179 error
= xfrm_get_saddr(net
, &tmp
, remote
, tmpl
->encap_family
);
1186 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
, family
);
1188 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
1195 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
1199 else if (error
== -ESRCH
)
1202 if (!tmpl
->optional
)
1208 for (nx
--; nx
>=0; nx
--)
1209 xfrm_state_put(xfrm
[nx
]);
1214 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, struct flowi
*fl
,
1215 struct xfrm_state
**xfrm
,
1216 unsigned short family
)
1218 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
1219 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
1225 for (i
= 0; i
< npols
; i
++) {
1226 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
1231 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
1239 /* found states are sorted for outbound processing */
1241 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
1246 for (cnx
--; cnx
>=0; cnx
--)
1247 xfrm_state_put(tpp
[cnx
]);
1252 /* Check that the bundle accepts the flow and its components are
1256 static inline int xfrm_get_tos(struct flowi
*fl
, int family
)
1258 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1264 tos
= afinfo
->get_tos(fl
);
1266 xfrm_policy_put_afinfo(afinfo
);
1271 static struct flow_cache_object
*xfrm_bundle_flo_get(struct flow_cache_object
*flo
)
1273 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1274 struct dst_entry
*dst
= &xdst
->u
.dst
;
1276 if (xdst
->route
== NULL
) {
1277 /* Dummy bundle - if it has xfrms we were not
1278 * able to build bundle as template resolution failed.
1279 * It means we need to try again resolving. */
1280 if (xdst
->num_xfrms
> 0)
1284 if (stale_bundle(dst
))
1292 static int xfrm_bundle_flo_check(struct flow_cache_object
*flo
)
1294 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1295 struct dst_entry
*dst
= &xdst
->u
.dst
;
1299 if (stale_bundle(dst
))
1305 static void xfrm_bundle_flo_delete(struct flow_cache_object
*flo
)
1307 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1308 struct dst_entry
*dst
= &xdst
->u
.dst
;
1313 static const struct flow_cache_ops xfrm_bundle_fc_ops
= {
1314 .get
= xfrm_bundle_flo_get
,
1315 .check
= xfrm_bundle_flo_check
,
1316 .delete = xfrm_bundle_flo_delete
,
1319 static inline struct xfrm_dst
*xfrm_alloc_dst(struct net
*net
, int family
)
1321 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1322 struct dst_ops
*dst_ops
;
1323 struct xfrm_dst
*xdst
;
1326 return ERR_PTR(-EINVAL
);
1330 dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
1332 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1334 dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
1340 xdst
= dst_alloc(dst_ops
) ?: ERR_PTR(-ENOBUFS
);
1341 xfrm_policy_put_afinfo(afinfo
);
1343 xdst
->flo
.ops
= &xfrm_bundle_fc_ops
;
1348 static inline int xfrm_init_path(struct xfrm_dst
*path
, struct dst_entry
*dst
,
1351 struct xfrm_policy_afinfo
*afinfo
=
1352 xfrm_policy_get_afinfo(dst
->ops
->family
);
1358 err
= afinfo
->init_path(path
, dst
, nfheader_len
);
1360 xfrm_policy_put_afinfo(afinfo
);
1365 static inline int xfrm_fill_dst(struct xfrm_dst
*xdst
, struct net_device
*dev
,
1368 struct xfrm_policy_afinfo
*afinfo
=
1369 xfrm_policy_get_afinfo(xdst
->u
.dst
.ops
->family
);
1375 err
= afinfo
->fill_dst(xdst
, dev
, fl
);
1377 xfrm_policy_put_afinfo(afinfo
);
1383 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1384 * all the metrics... Shortly, bundle a bundle.
1387 static struct dst_entry
*xfrm_bundle_create(struct xfrm_policy
*policy
,
1388 struct xfrm_state
**xfrm
, int nx
,
1390 struct dst_entry
*dst
)
1392 struct net
*net
= xp_net(policy
);
1393 unsigned long now
= jiffies
;
1394 struct net_device
*dev
;
1395 struct dst_entry
*dst_prev
= NULL
;
1396 struct dst_entry
*dst0
= NULL
;
1400 int nfheader_len
= 0;
1401 int trailer_len
= 0;
1403 int family
= policy
->selector
.family
;
1404 xfrm_address_t saddr
, daddr
;
1406 xfrm_flowi_addr_get(fl
, &saddr
, &daddr
, family
);
1408 tos
= xfrm_get_tos(fl
, family
);
1415 for (; i
< nx
; i
++) {
1416 struct xfrm_dst
*xdst
= xfrm_alloc_dst(net
, family
);
1417 struct dst_entry
*dst1
= &xdst
->u
.dst
;
1419 err
= PTR_ERR(xdst
);
1428 dst_prev
->child
= dst_clone(dst1
);
1429 dst1
->flags
|= DST_NOHASH
;
1433 memcpy(&dst1
->metrics
, &dst
->metrics
, sizeof(dst
->metrics
));
1435 if (xfrm
[i
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1436 family
= xfrm
[i
]->props
.family
;
1437 dst
= xfrm_dst_lookup(xfrm
[i
], tos
, &saddr
, &daddr
,
1445 dst1
->xfrm
= xfrm
[i
];
1446 xdst
->xfrm_genid
= xfrm
[i
]->genid
;
1448 dst1
->obsolete
= -1;
1449 dst1
->flags
|= DST_HOST
;
1450 dst1
->lastuse
= now
;
1452 dst1
->input
= dst_discard
;
1453 dst1
->output
= xfrm
[i
]->outer_mode
->afinfo
->output
;
1455 dst1
->next
= dst_prev
;
1458 header_len
+= xfrm
[i
]->props
.header_len
;
1459 if (xfrm
[i
]->type
->flags
& XFRM_TYPE_NON_FRAGMENT
)
1460 nfheader_len
+= xfrm
[i
]->props
.header_len
;
1461 trailer_len
+= xfrm
[i
]->props
.trailer_len
;
1464 dst_prev
->child
= dst
;
1472 /* Copy neighbour for reachability confirmation */
1473 dst0
->neighbour
= neigh_clone(dst
->neighbour
);
1475 xfrm_init_path((struct xfrm_dst
*)dst0
, dst
, nfheader_len
);
1476 xfrm_init_pmtu(dst_prev
);
1478 for (dst_prev
= dst0
; dst_prev
!= dst
; dst_prev
= dst_prev
->child
) {
1479 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst_prev
;
1481 err
= xfrm_fill_dst(xdst
, dev
, fl
);
1485 dst_prev
->header_len
= header_len
;
1486 dst_prev
->trailer_len
= trailer_len
;
1487 header_len
-= xdst
->u
.dst
.xfrm
->props
.header_len
;
1488 trailer_len
-= xdst
->u
.dst
.xfrm
->props
.trailer_len
;
1496 xfrm_state_put(xfrm
[i
]);
1500 dst0
= ERR_PTR(err
);
1505 xfrm_dst_alloc_copy(void **target
, void *src
, int size
)
1508 *target
= kmalloc(size
, GFP_ATOMIC
);
1512 memcpy(*target
, src
, size
);
1517 xfrm_dst_update_parent(struct dst_entry
*dst
, struct xfrm_selector
*sel
)
1519 #ifdef CONFIG_XFRM_SUB_POLICY
1520 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1521 return xfrm_dst_alloc_copy((void **)&(xdst
->partner
),
1529 xfrm_dst_update_origin(struct dst_entry
*dst
, struct flowi
*fl
)
1531 #ifdef CONFIG_XFRM_SUB_POLICY
1532 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1533 return xfrm_dst_alloc_copy((void **)&(xdst
->origin
), fl
, sizeof(*fl
));
1539 static int xfrm_expand_policies(struct flowi
*fl
, u16 family
,
1540 struct xfrm_policy
**pols
,
1541 int *num_pols
, int *num_xfrms
)
1545 if (*num_pols
== 0 || !pols
[0]) {
1550 if (IS_ERR(pols
[0]))
1551 return PTR_ERR(pols
[0]);
1553 *num_xfrms
= pols
[0]->xfrm_nr
;
1555 #ifdef CONFIG_XFRM_SUB_POLICY
1556 if (pols
[0] && pols
[0]->action
== XFRM_POLICY_ALLOW
&&
1557 pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
1558 pols
[1] = xfrm_policy_lookup_bytype(xp_net(pols
[0]),
1559 XFRM_POLICY_TYPE_MAIN
,
1563 if (IS_ERR(pols
[1])) {
1564 xfrm_pols_put(pols
, *num_pols
);
1565 return PTR_ERR(pols
[1]);
1568 (*num_xfrms
) += pols
[1]->xfrm_nr
;
1572 for (i
= 0; i
< *num_pols
; i
++) {
1573 if (pols
[i
]->action
!= XFRM_POLICY_ALLOW
) {
1583 static struct xfrm_dst
*
1584 xfrm_resolve_and_create_bundle(struct xfrm_policy
**pols
, int num_pols
,
1585 struct flowi
*fl
, u16 family
,
1586 struct dst_entry
*dst_orig
)
1588 struct net
*net
= xp_net(pols
[0]);
1589 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
1590 struct dst_entry
*dst
;
1591 struct xfrm_dst
*xdst
;
1594 /* Try to instantiate a bundle */
1595 err
= xfrm_tmpl_resolve(pols
, num_pols
, fl
, xfrm
, family
);
1597 if (err
!= 0 && err
!= -EAGAIN
)
1598 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
1599 return ERR_PTR(err
);
1602 dst
= xfrm_bundle_create(pols
[0], xfrm
, err
, fl
, dst_orig
);
1604 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLEGENERROR
);
1605 return ERR_CAST(dst
);
1608 xdst
= (struct xfrm_dst
*)dst
;
1609 xdst
->num_xfrms
= err
;
1611 err
= xfrm_dst_update_parent(dst
, &pols
[1]->selector
);
1613 err
= xfrm_dst_update_origin(dst
, fl
);
1614 if (unlikely(err
)) {
1616 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLECHECKERROR
);
1617 return ERR_PTR(err
);
1620 xdst
->num_pols
= num_pols
;
1621 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
1622 xdst
->policy_genid
= atomic_read(&pols
[0]->genid
);
1627 static struct flow_cache_object
*
1628 xfrm_bundle_lookup(struct net
*net
, struct flowi
*fl
, u16 family
, u8 dir
,
1629 struct flow_cache_object
*oldflo
, void *ctx
)
1631 struct dst_entry
*dst_orig
= (struct dst_entry
*)ctx
;
1632 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
1633 struct xfrm_dst
*xdst
, *new_xdst
;
1634 int num_pols
= 0, num_xfrms
= 0, i
, err
, pol_dead
;
1636 /* Check if the policies from old bundle are usable */
1639 xdst
= container_of(oldflo
, struct xfrm_dst
, flo
);
1640 num_pols
= xdst
->num_pols
;
1641 num_xfrms
= xdst
->num_xfrms
;
1643 for (i
= 0; i
< num_pols
; i
++) {
1644 pols
[i
] = xdst
->pols
[i
];
1645 pol_dead
|= pols
[i
]->walk
.dead
;
1648 dst_free(&xdst
->u
.dst
);
1656 /* Resolve policies to use if we couldn't get them from
1657 * previous cache entry */
1660 pols
[0] = __xfrm_policy_lookup(net
, fl
, family
, dir
);
1661 err
= xfrm_expand_policies(fl
, family
, pols
,
1662 &num_pols
, &num_xfrms
);
1668 goto make_dummy_bundle
;
1671 new_xdst
= xfrm_resolve_and_create_bundle(pols
, num_pols
, fl
, family
, dst_orig
);
1672 if (IS_ERR(new_xdst
)) {
1673 err
= PTR_ERR(new_xdst
);
1677 goto make_dummy_bundle
;
1678 dst_hold(&xdst
->u
.dst
);
1680 } else if (new_xdst
== NULL
) {
1683 goto make_dummy_bundle
;
1684 xdst
->num_xfrms
= 0;
1685 dst_hold(&xdst
->u
.dst
);
1689 /* Kill the previous bundle */
1691 /* The policies were stolen for newly generated bundle */
1693 dst_free(&xdst
->u
.dst
);
1696 /* Flow cache does not have reference, it dst_free()'s,
1697 * but we do need to return one reference for original caller */
1698 dst_hold(&new_xdst
->u
.dst
);
1699 return &new_xdst
->flo
;
1702 /* We found policies, but there's no bundles to instantiate:
1703 * either because the policy blocks, has no transformations or
1704 * we could not build template (no xfrm_states).*/
1705 xdst
= xfrm_alloc_dst(net
, family
);
1707 xfrm_pols_put(pols
, num_pols
);
1708 return ERR_CAST(xdst
);
1710 xdst
->num_pols
= num_pols
;
1711 xdst
->num_xfrms
= num_xfrms
;
1712 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
1714 dst_hold(&xdst
->u
.dst
);
1718 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
1721 dst_free(&xdst
->u
.dst
);
1723 xfrm_pols_put(pols
, num_pols
);
1724 return ERR_PTR(err
);
1727 /* Main function: finds/creates a bundle for given flow.
1729 * At the moment we eat a raw IP route. Mostly to speed up lookups
1730 * on interfaces with disabled IPsec.
1732 int __xfrm_lookup(struct net
*net
, struct dst_entry
**dst_p
, struct flowi
*fl
,
1733 struct sock
*sk
, int flags
)
1735 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
1736 struct flow_cache_object
*flo
;
1737 struct xfrm_dst
*xdst
;
1738 struct dst_entry
*dst
, *dst_orig
= *dst_p
, *route
;
1739 u16 family
= dst_orig
->ops
->family
;
1740 u8 dir
= policy_to_flow_dir(XFRM_POLICY_OUT
);
1741 int i
, err
, num_pols
, num_xfrms
= 0, drop_pols
= 0;
1748 if (sk
&& sk
->sk_policy
[XFRM_POLICY_OUT
]) {
1750 pols
[0] = xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
);
1751 err
= xfrm_expand_policies(fl
, family
, pols
,
1752 &num_pols
, &num_xfrms
);
1757 if (num_xfrms
<= 0) {
1758 drop_pols
= num_pols
;
1762 xdst
= xfrm_resolve_and_create_bundle(
1766 xfrm_pols_put(pols
, num_pols
);
1767 err
= PTR_ERR(xdst
);
1769 } else if (xdst
== NULL
) {
1771 drop_pols
= num_pols
;
1775 spin_lock_bh(&xfrm_policy_sk_bundle_lock
);
1776 xdst
->u
.dst
.next
= xfrm_policy_sk_bundles
;
1777 xfrm_policy_sk_bundles
= &xdst
->u
.dst
;
1778 spin_unlock_bh(&xfrm_policy_sk_bundle_lock
);
1780 route
= xdst
->route
;
1785 /* To accelerate a bit... */
1786 if ((dst_orig
->flags
& DST_NOXFRM
) ||
1787 !net
->xfrm
.policy_count
[XFRM_POLICY_OUT
])
1790 flo
= flow_cache_lookup(net
, fl
, family
, dir
,
1791 xfrm_bundle_lookup
, dst_orig
);
1798 xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1800 num_pols
= xdst
->num_pols
;
1801 num_xfrms
= xdst
->num_xfrms
;
1802 memcpy(pols
, xdst
->pols
, sizeof(struct xfrm_policy
*) * num_pols
);
1803 route
= xdst
->route
;
1807 if (route
== NULL
&& num_xfrms
> 0) {
1808 /* The only case when xfrm_bundle_lookup() returns a
1809 * bundle with null route, is when the template could
1810 * not be resolved. It means policies are there, but
1811 * bundle could not be created, since we don't yet
1812 * have the xfrm_state's. We need to wait for KM to
1813 * negotiate new SA's or bail out with error.*/
1814 if (net
->xfrm
.sysctl_larval_drop
) {
1815 /* EREMOTE tells the caller to generate
1816 * a one-shot blackhole route. */
1818 xfrm_pols_put(pols
, drop_pols
);
1819 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
1822 if (flags
& XFRM_LOOKUP_WAIT
) {
1823 DECLARE_WAITQUEUE(wait
, current
);
1825 add_wait_queue(&net
->xfrm
.km_waitq
, &wait
);
1826 set_current_state(TASK_INTERRUPTIBLE
);
1828 set_current_state(TASK_RUNNING
);
1829 remove_wait_queue(&net
->xfrm
.km_waitq
, &wait
);
1831 if (!signal_pending(current
)) {
1840 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
1848 if ((flags
& XFRM_LOOKUP_ICMP
) &&
1849 !(pols
[0]->flags
& XFRM_POLICY_ICMP
)) {
1854 for (i
= 0; i
< num_pols
; i
++)
1855 pols
[i
]->curlft
.use_time
= get_seconds();
1857 if (num_xfrms
< 0) {
1858 /* Prohibit the flow */
1859 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
1862 } else if (num_xfrms
> 0) {
1863 /* Flow transformed */
1865 dst_release(dst_orig
);
1867 /* Flow passes untransformed */
1871 xfrm_pols_put(pols
, drop_pols
);
1875 if (!(flags
& XFRM_LOOKUP_ICMP
))
1881 dst_release(dst_orig
);
1883 xfrm_pols_put(pols
, drop_pols
);
1886 EXPORT_SYMBOL(__xfrm_lookup
);
1888 int xfrm_lookup(struct net
*net
, struct dst_entry
**dst_p
, struct flowi
*fl
,
1889 struct sock
*sk
, int flags
)
1891 int err
= __xfrm_lookup(net
, dst_p
, fl
, sk
, flags
);
1893 if (err
== -EREMOTE
) {
1894 dst_release(*dst_p
);
1901 EXPORT_SYMBOL(xfrm_lookup
);
1904 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, struct flowi
*fl
)
1906 struct xfrm_state
*x
;
1908 if (!skb
->sp
|| idx
< 0 || idx
>= skb
->sp
->len
)
1910 x
= skb
->sp
->xvec
[idx
];
1911 if (!x
->type
->reject
)
1913 return x
->type
->reject(x
, skb
, fl
);
1916 /* When skb is transformed back to its "native" form, we have to
1917 * check policy restrictions. At the moment we make this in maximally
1918 * stupid way. Shame on me. :-) Of course, connected sockets must
1919 * have policy cached at them.
1923 xfrm_state_ok(struct xfrm_tmpl
*tmpl
, struct xfrm_state
*x
,
1924 unsigned short family
)
1926 if (xfrm_state_kern(x
))
1927 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, tmpl
->encap_family
);
1928 return x
->id
.proto
== tmpl
->id
.proto
&&
1929 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
1930 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
1931 x
->props
.mode
== tmpl
->mode
&&
1932 (tmpl
->allalgs
|| (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
1933 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
1934 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
1935 xfrm_state_addr_cmp(tmpl
, x
, family
));
1939 * 0 or more than 0 is returned when validation is succeeded (either bypass
1940 * because of optional transport mode, or next index of the mathced secpath
1941 * state with the template.
1942 * -1 is returned when no matching template is found.
1943 * Otherwise "-2 - errored_index" is returned.
1946 xfrm_policy_ok(struct xfrm_tmpl
*tmpl
, struct sec_path
*sp
, int start
,
1947 unsigned short family
)
1951 if (tmpl
->optional
) {
1952 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
1956 for (; idx
< sp
->len
; idx
++) {
1957 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
))
1959 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1968 int __xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
,
1969 unsigned int family
, int reverse
)
1971 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1974 if (unlikely(afinfo
== NULL
))
1975 return -EAFNOSUPPORT
;
1977 afinfo
->decode_session(skb
, fl
, reverse
);
1978 err
= security_xfrm_decode_session(skb
, &fl
->secid
);
1979 xfrm_policy_put_afinfo(afinfo
);
1982 EXPORT_SYMBOL(__xfrm_decode_session
);
1984 static inline int secpath_has_nontransport(struct sec_path
*sp
, int k
, int *idxp
)
1986 for (; k
< sp
->len
; k
++) {
1987 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1996 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
1997 unsigned short family
)
1999 struct net
*net
= dev_net(skb
->dev
);
2000 struct xfrm_policy
*pol
;
2001 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
2010 reverse
= dir
& ~XFRM_POLICY_MASK
;
2011 dir
&= XFRM_POLICY_MASK
;
2012 fl_dir
= policy_to_flow_dir(dir
);
2014 if (__xfrm_decode_session(skb
, &fl
, family
, reverse
) < 0) {
2015 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
2019 nf_nat_decode_session(skb
, &fl
, family
);
2021 /* First, check used SA against their selectors. */
2025 for (i
=skb
->sp
->len
-1; i
>=0; i
--) {
2026 struct xfrm_state
*x
= skb
->sp
->xvec
[i
];
2027 if (!xfrm_selector_match(&x
->sel
, &fl
, family
)) {
2028 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
2035 if (sk
&& sk
->sk_policy
[dir
]) {
2036 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
);
2038 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2044 struct flow_cache_object
*flo
;
2046 flo
= flow_cache_lookup(net
, &fl
, family
, fl_dir
,
2047 xfrm_policy_lookup
, NULL
);
2048 if (IS_ERR_OR_NULL(flo
))
2049 pol
= ERR_CAST(flo
);
2051 pol
= container_of(flo
, struct xfrm_policy
, flo
);
2055 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2060 if (skb
->sp
&& secpath_has_nontransport(skb
->sp
, 0, &xerr_idx
)) {
2061 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
2062 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
2068 pol
->curlft
.use_time
= get_seconds();
2072 #ifdef CONFIG_XFRM_SUB_POLICY
2073 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
2074 pols
[1] = xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
,
2078 if (IS_ERR(pols
[1])) {
2079 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2082 pols
[1]->curlft
.use_time
= get_seconds();
2088 if (pol
->action
== XFRM_POLICY_ALLOW
) {
2089 struct sec_path
*sp
;
2090 static struct sec_path dummy
;
2091 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
2092 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
2093 struct xfrm_tmpl
**tpp
= tp
;
2097 if ((sp
= skb
->sp
) == NULL
)
2100 for (pi
= 0; pi
< npols
; pi
++) {
2101 if (pols
[pi
] != pol
&&
2102 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
) {
2103 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
2106 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
2107 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
2110 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
2111 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
2115 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
);
2119 /* For each tunnel xfrm, find the first matching tmpl.
2120 * For each tmpl before that, find corresponding xfrm.
2121 * Order is _important_. Later we will implement
2122 * some barriers, but at the moment barriers
2123 * are implied between each two transformations.
2125 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
2126 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
);
2129 /* "-2 - errored_index" returned */
2131 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
2136 if (secpath_has_nontransport(sp
, k
, &xerr_idx
)) {
2137 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
2141 xfrm_pols_put(pols
, npols
);
2144 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
2147 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
2149 xfrm_pols_put(pols
, npols
);
2152 EXPORT_SYMBOL(__xfrm_policy_check
);
2154 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
2156 struct net
*net
= dev_net(skb
->dev
);
2158 struct dst_entry
*dst
;
2161 if (xfrm_decode_session(skb
, &fl
, family
) < 0) {
2162 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
2169 res
= xfrm_lookup(net
, &dst
, &fl
, NULL
, 0) == 0;
2170 skb_dst_set(skb
, dst
);
2173 EXPORT_SYMBOL(__xfrm_route_forward
);
2175 /* Optimize later using cookies and generation ids. */
2177 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
2179 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2180 * to "-1" to force all XFRM destinations to get validated by
2181 * dst_ops->check on every use. We do this because when a
2182 * normal route referenced by an XFRM dst is obsoleted we do
2183 * not go looking around for all parent referencing XFRM dsts
2184 * so that we can invalidate them. It is just too much work.
2185 * Instead we make the checks here on every use. For example:
2187 * XFRM dst A --> IPv4 dst X
2189 * X is the "xdst->route" of A (X is also the "dst->path" of A
2190 * in this example). If X is marked obsolete, "A" will not
2191 * notice. That's what we are validating here via the
2192 * stale_bundle() check.
2194 * When a policy's bundle is pruned, we dst_free() the XFRM
2195 * dst which causes it's ->obsolete field to be set to a
2196 * positive non-zero integer. If an XFRM dst has been pruned
2197 * like this, we want to force a new route lookup.
2199 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
2205 static int stale_bundle(struct dst_entry
*dst
)
2207 return !xfrm_bundle_ok(NULL
, (struct xfrm_dst
*)dst
, NULL
, AF_UNSPEC
, 0);
2210 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
2212 while ((dst
= dst
->child
) && dst
->xfrm
&& dst
->dev
== dev
) {
2213 dst
->dev
= dev_net(dev
)->loopback_dev
;
2218 EXPORT_SYMBOL(xfrm_dst_ifdown
);
2220 static void xfrm_link_failure(struct sk_buff
*skb
)
2222 /* Impossible. Such dst must be popped before reaches point of failure. */
2225 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
2228 if (dst
->obsolete
) {
2236 static void __xfrm_garbage_collect(struct net
*net
)
2238 struct dst_entry
*head
, *next
;
2242 spin_lock_bh(&xfrm_policy_sk_bundle_lock
);
2243 head
= xfrm_policy_sk_bundles
;
2244 xfrm_policy_sk_bundles
= NULL
;
2245 spin_unlock_bh(&xfrm_policy_sk_bundle_lock
);
2254 static void xfrm_init_pmtu(struct dst_entry
*dst
)
2257 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
2258 u32 pmtu
, route_mtu_cached
;
2260 pmtu
= dst_mtu(dst
->child
);
2261 xdst
->child_mtu_cached
= pmtu
;
2263 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
2265 route_mtu_cached
= dst_mtu(xdst
->route
);
2266 xdst
->route_mtu_cached
= route_mtu_cached
;
2268 if (pmtu
> route_mtu_cached
)
2269 pmtu
= route_mtu_cached
;
2271 dst
->metrics
[RTAX_MTU
-1] = pmtu
;
2272 } while ((dst
= dst
->next
));
2275 /* Check that the bundle accepts the flow and its components are
2279 int xfrm_bundle_ok(struct xfrm_policy
*pol
, struct xfrm_dst
*first
,
2280 struct flowi
*fl
, int family
, int strict
)
2282 struct dst_entry
*dst
= &first
->u
.dst
;
2283 struct xfrm_dst
*last
;
2286 if (!dst_check(dst
->path
, ((struct xfrm_dst
*)dst
)->path_cookie
) ||
2287 (dst
->dev
&& !netif_running(dst
->dev
)))
2289 #ifdef CONFIG_XFRM_SUB_POLICY
2291 if (first
->origin
&& !flow_cache_uli_match(first
->origin
, fl
))
2293 if (first
->partner
&&
2294 !xfrm_selector_match(first
->partner
, fl
, family
))
2302 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
2304 if (fl
&& !xfrm_selector_match(&dst
->xfrm
->sel
, fl
, family
))
2307 !security_xfrm_state_pol_flow_match(dst
->xfrm
, pol
, fl
))
2309 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
2311 if (xdst
->xfrm_genid
!= dst
->xfrm
->genid
)
2313 if (xdst
->num_pols
> 0 &&
2314 xdst
->policy_genid
!= atomic_read(&xdst
->pols
[0]->genid
))
2318 !(dst
->xfrm
->outer_mode
->flags
& XFRM_MODE_FLAG_TUNNEL
) &&
2319 !xfrm_state_addr_flow_check(dst
->xfrm
, fl
, family
))
2322 mtu
= dst_mtu(dst
->child
);
2323 if (xdst
->child_mtu_cached
!= mtu
) {
2325 xdst
->child_mtu_cached
= mtu
;
2328 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
2330 mtu
= dst_mtu(xdst
->route
);
2331 if (xdst
->route_mtu_cached
!= mtu
) {
2333 xdst
->route_mtu_cached
= mtu
;
2337 } while (dst
->xfrm
);
2342 mtu
= last
->child_mtu_cached
;
2346 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
2347 if (mtu
> last
->route_mtu_cached
)
2348 mtu
= last
->route_mtu_cached
;
2349 dst
->metrics
[RTAX_MTU
-1] = mtu
;
2354 last
= (struct xfrm_dst
*)last
->u
.dst
.next
;
2355 last
->child_mtu_cached
= mtu
;
2361 EXPORT_SYMBOL(xfrm_bundle_ok
);
2363 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2367 if (unlikely(afinfo
== NULL
))
2369 if (unlikely(afinfo
->family
>= NPROTO
))
2370 return -EAFNOSUPPORT
;
2371 write_lock_bh(&xfrm_policy_afinfo_lock
);
2372 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
))
2375 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
2376 if (likely(dst_ops
->kmem_cachep
== NULL
))
2377 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
2378 if (likely(dst_ops
->check
== NULL
))
2379 dst_ops
->check
= xfrm_dst_check
;
2380 if (likely(dst_ops
->negative_advice
== NULL
))
2381 dst_ops
->negative_advice
= xfrm_negative_advice
;
2382 if (likely(dst_ops
->link_failure
== NULL
))
2383 dst_ops
->link_failure
= xfrm_link_failure
;
2384 if (likely(afinfo
->garbage_collect
== NULL
))
2385 afinfo
->garbage_collect
= __xfrm_garbage_collect
;
2386 xfrm_policy_afinfo
[afinfo
->family
] = afinfo
;
2388 write_unlock_bh(&xfrm_policy_afinfo_lock
);
2392 struct dst_ops
*xfrm_dst_ops
;
2394 switch (afinfo
->family
) {
2396 xfrm_dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
2398 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2400 xfrm_dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
2406 *xfrm_dst_ops
= *afinfo
->dst_ops
;
2412 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
2414 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2417 if (unlikely(afinfo
== NULL
))
2419 if (unlikely(afinfo
->family
>= NPROTO
))
2420 return -EAFNOSUPPORT
;
2421 write_lock_bh(&xfrm_policy_afinfo_lock
);
2422 if (likely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
)) {
2423 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != afinfo
))
2426 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
2427 xfrm_policy_afinfo
[afinfo
->family
] = NULL
;
2428 dst_ops
->kmem_cachep
= NULL
;
2429 dst_ops
->check
= NULL
;
2430 dst_ops
->negative_advice
= NULL
;
2431 dst_ops
->link_failure
= NULL
;
2432 afinfo
->garbage_collect
= NULL
;
2435 write_unlock_bh(&xfrm_policy_afinfo_lock
);
2438 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
2440 static void __net_init
xfrm_dst_ops_init(struct net
*net
)
2442 struct xfrm_policy_afinfo
*afinfo
;
2444 read_lock_bh(&xfrm_policy_afinfo_lock
);
2445 afinfo
= xfrm_policy_afinfo
[AF_INET
];
2447 net
->xfrm
.xfrm4_dst_ops
= *afinfo
->dst_ops
;
2448 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2449 afinfo
= xfrm_policy_afinfo
[AF_INET6
];
2451 net
->xfrm
.xfrm6_dst_ops
= *afinfo
->dst_ops
;
2453 read_unlock_bh(&xfrm_policy_afinfo_lock
);
2456 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
2458 struct xfrm_policy_afinfo
*afinfo
;
2459 if (unlikely(family
>= NPROTO
))
2461 read_lock(&xfrm_policy_afinfo_lock
);
2462 afinfo
= xfrm_policy_afinfo
[family
];
2463 if (unlikely(!afinfo
))
2464 read_unlock(&xfrm_policy_afinfo_lock
);
2468 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2470 read_unlock(&xfrm_policy_afinfo_lock
);
2473 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
2475 struct net_device
*dev
= ptr
;
2479 __xfrm_garbage_collect(dev_net(dev
));
2484 static struct notifier_block xfrm_dev_notifier
= {
2485 .notifier_call
= xfrm_dev_event
,
2488 #ifdef CONFIG_XFRM_STATISTICS
2489 static int __net_init
xfrm_statistics_init(struct net
*net
)
2493 if (snmp_mib_init((void __percpu
**)net
->mib
.xfrm_statistics
,
2494 sizeof(struct linux_xfrm_mib
),
2495 __alignof__(struct linux_xfrm_mib
)) < 0)
2497 rv
= xfrm_proc_init(net
);
2499 snmp_mib_free((void __percpu
**)net
->mib
.xfrm_statistics
);
2503 static void xfrm_statistics_fini(struct net
*net
)
2505 xfrm_proc_fini(net
);
2506 snmp_mib_free((void __percpu
**)net
->mib
.xfrm_statistics
);
2509 static int __net_init
xfrm_statistics_init(struct net
*net
)
2514 static void xfrm_statistics_fini(struct net
*net
)
2519 static int __net_init
xfrm_policy_init(struct net
*net
)
2521 unsigned int hmask
, sz
;
2524 if (net_eq(net
, &init_net
))
2525 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
2526 sizeof(struct xfrm_dst
),
2527 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
2531 sz
= (hmask
+1) * sizeof(struct hlist_head
);
2533 net
->xfrm
.policy_byidx
= xfrm_hash_alloc(sz
);
2534 if (!net
->xfrm
.policy_byidx
)
2536 net
->xfrm
.policy_idx_hmask
= hmask
;
2538 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2539 struct xfrm_policy_hash
*htab
;
2541 net
->xfrm
.policy_count
[dir
] = 0;
2542 INIT_HLIST_HEAD(&net
->xfrm
.policy_inexact
[dir
]);
2544 htab
= &net
->xfrm
.policy_bydst
[dir
];
2545 htab
->table
= xfrm_hash_alloc(sz
);
2548 htab
->hmask
= hmask
;
2551 INIT_LIST_HEAD(&net
->xfrm
.policy_all
);
2552 INIT_WORK(&net
->xfrm
.policy_hash_work
, xfrm_hash_resize
);
2553 if (net_eq(net
, &init_net
))
2554 register_netdevice_notifier(&xfrm_dev_notifier
);
2558 for (dir
--; dir
>= 0; dir
--) {
2559 struct xfrm_policy_hash
*htab
;
2561 htab
= &net
->xfrm
.policy_bydst
[dir
];
2562 xfrm_hash_free(htab
->table
, sz
);
2564 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
2569 static void xfrm_policy_fini(struct net
*net
)
2571 struct xfrm_audit audit_info
;
2575 flush_work(&net
->xfrm
.policy_hash_work
);
2576 #ifdef CONFIG_XFRM_SUB_POLICY
2577 audit_info
.loginuid
= -1;
2578 audit_info
.sessionid
= -1;
2579 audit_info
.secid
= 0;
2580 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_SUB
, &audit_info
);
2582 audit_info
.loginuid
= -1;
2583 audit_info
.sessionid
= -1;
2584 audit_info
.secid
= 0;
2585 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_MAIN
, &audit_info
);
2587 WARN_ON(!list_empty(&net
->xfrm
.policy_all
));
2589 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2590 struct xfrm_policy_hash
*htab
;
2592 WARN_ON(!hlist_empty(&net
->xfrm
.policy_inexact
[dir
]));
2594 htab
= &net
->xfrm
.policy_bydst
[dir
];
2595 sz
= (htab
->hmask
+ 1);
2596 WARN_ON(!hlist_empty(htab
->table
));
2597 xfrm_hash_free(htab
->table
, sz
);
2600 sz
= (net
->xfrm
.policy_idx_hmask
+ 1) * sizeof(struct hlist_head
);
2601 WARN_ON(!hlist_empty(net
->xfrm
.policy_byidx
));
2602 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
2605 static int __net_init
xfrm_net_init(struct net
*net
)
2609 rv
= xfrm_statistics_init(net
);
2611 goto out_statistics
;
2612 rv
= xfrm_state_init(net
);
2615 rv
= xfrm_policy_init(net
);
2618 xfrm_dst_ops_init(net
);
2619 rv
= xfrm_sysctl_init(net
);
2625 xfrm_policy_fini(net
);
2627 xfrm_state_fini(net
);
2629 xfrm_statistics_fini(net
);
2634 static void __net_exit
xfrm_net_exit(struct net
*net
)
2636 xfrm_sysctl_fini(net
);
2637 xfrm_policy_fini(net
);
2638 xfrm_state_fini(net
);
2639 xfrm_statistics_fini(net
);
2642 static struct pernet_operations __net_initdata xfrm_net_ops
= {
2643 .init
= xfrm_net_init
,
2644 .exit
= xfrm_net_exit
,
2647 void __init
xfrm_init(void)
2649 register_pernet_subsys(&xfrm_net_ops
);
2653 #ifdef CONFIG_AUDITSYSCALL
2654 static void xfrm_audit_common_policyinfo(struct xfrm_policy
*xp
,
2655 struct audit_buffer
*audit_buf
)
2657 struct xfrm_sec_ctx
*ctx
= xp
->security
;
2658 struct xfrm_selector
*sel
= &xp
->selector
;
2661 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
2662 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
2664 switch(sel
->family
) {
2666 audit_log_format(audit_buf
, " src=%pI4", &sel
->saddr
.a4
);
2667 if (sel
->prefixlen_s
!= 32)
2668 audit_log_format(audit_buf
, " src_prefixlen=%d",
2670 audit_log_format(audit_buf
, " dst=%pI4", &sel
->daddr
.a4
);
2671 if (sel
->prefixlen_d
!= 32)
2672 audit_log_format(audit_buf
, " dst_prefixlen=%d",
2676 audit_log_format(audit_buf
, " src=%pI6", sel
->saddr
.a6
);
2677 if (sel
->prefixlen_s
!= 128)
2678 audit_log_format(audit_buf
, " src_prefixlen=%d",
2680 audit_log_format(audit_buf
, " dst=%pI6", sel
->daddr
.a6
);
2681 if (sel
->prefixlen_d
!= 128)
2682 audit_log_format(audit_buf
, " dst_prefixlen=%d",
2688 void xfrm_audit_policy_add(struct xfrm_policy
*xp
, int result
,
2689 uid_t auid
, u32 sessionid
, u32 secid
)
2691 struct audit_buffer
*audit_buf
;
2693 audit_buf
= xfrm_audit_start("SPD-add");
2694 if (audit_buf
== NULL
)
2696 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
2697 audit_log_format(audit_buf
, " res=%u", result
);
2698 xfrm_audit_common_policyinfo(xp
, audit_buf
);
2699 audit_log_end(audit_buf
);
2701 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add
);
2703 void xfrm_audit_policy_delete(struct xfrm_policy
*xp
, int result
,
2704 uid_t auid
, u32 sessionid
, u32 secid
)
2706 struct audit_buffer
*audit_buf
;
2708 audit_buf
= xfrm_audit_start("SPD-delete");
2709 if (audit_buf
== NULL
)
2711 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
2712 audit_log_format(audit_buf
, " res=%u", result
);
2713 xfrm_audit_common_policyinfo(xp
, audit_buf
);
2714 audit_log_end(audit_buf
);
2716 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete
);
2719 #ifdef CONFIG_XFRM_MIGRATE
2720 static int xfrm_migrate_selector_match(struct xfrm_selector
*sel_cmp
,
2721 struct xfrm_selector
*sel_tgt
)
2723 if (sel_cmp
->proto
== IPSEC_ULPROTO_ANY
) {
2724 if (sel_tgt
->family
== sel_cmp
->family
&&
2725 xfrm_addr_cmp(&sel_tgt
->daddr
, &sel_cmp
->daddr
,
2726 sel_cmp
->family
) == 0 &&
2727 xfrm_addr_cmp(&sel_tgt
->saddr
, &sel_cmp
->saddr
,
2728 sel_cmp
->family
) == 0 &&
2729 sel_tgt
->prefixlen_d
== sel_cmp
->prefixlen_d
&&
2730 sel_tgt
->prefixlen_s
== sel_cmp
->prefixlen_s
) {
2734 if (memcmp(sel_tgt
, sel_cmp
, sizeof(*sel_tgt
)) == 0) {
2741 static struct xfrm_policy
* xfrm_migrate_policy_find(struct xfrm_selector
*sel
,
2744 struct xfrm_policy
*pol
, *ret
= NULL
;
2745 struct hlist_node
*entry
;
2746 struct hlist_head
*chain
;
2749 read_lock_bh(&xfrm_policy_lock
);
2750 chain
= policy_hash_direct(&init_net
, &sel
->daddr
, &sel
->saddr
, sel
->family
, dir
);
2751 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
2752 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
2753 pol
->type
== type
) {
2755 priority
= ret
->priority
;
2759 chain
= &init_net
.xfrm
.policy_inexact
[dir
];
2760 hlist_for_each_entry(pol
, entry
, chain
, bydst
) {
2761 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
2762 pol
->type
== type
&&
2763 pol
->priority
< priority
) {
2772 read_unlock_bh(&xfrm_policy_lock
);
2777 static int migrate_tmpl_match(struct xfrm_migrate
*m
, struct xfrm_tmpl
*t
)
2781 if (t
->mode
== m
->mode
&& t
->id
.proto
== m
->proto
&&
2782 (m
->reqid
== 0 || t
->reqid
== m
->reqid
)) {
2784 case XFRM_MODE_TUNNEL
:
2785 case XFRM_MODE_BEET
:
2786 if (xfrm_addr_cmp(&t
->id
.daddr
, &m
->old_daddr
,
2787 m
->old_family
) == 0 &&
2788 xfrm_addr_cmp(&t
->saddr
, &m
->old_saddr
,
2789 m
->old_family
) == 0) {
2793 case XFRM_MODE_TRANSPORT
:
2794 /* in case of transport mode, template does not store
2795 any IP addresses, hence we just compare mode and
2806 /* update endpoint address(es) of template(s) */
2807 static int xfrm_policy_migrate(struct xfrm_policy
*pol
,
2808 struct xfrm_migrate
*m
, int num_migrate
)
2810 struct xfrm_migrate
*mp
;
2813 write_lock_bh(&pol
->lock
);
2814 if (unlikely(pol
->walk
.dead
)) {
2815 /* target policy has been deleted */
2816 write_unlock_bh(&pol
->lock
);
2820 for (i
= 0; i
< pol
->xfrm_nr
; i
++) {
2821 for (j
= 0, mp
= m
; j
< num_migrate
; j
++, mp
++) {
2822 if (!migrate_tmpl_match(mp
, &pol
->xfrm_vec
[i
]))
2825 if (pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_TUNNEL
&&
2826 pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_BEET
)
2828 /* update endpoints */
2829 memcpy(&pol
->xfrm_vec
[i
].id
.daddr
, &mp
->new_daddr
,
2830 sizeof(pol
->xfrm_vec
[i
].id
.daddr
));
2831 memcpy(&pol
->xfrm_vec
[i
].saddr
, &mp
->new_saddr
,
2832 sizeof(pol
->xfrm_vec
[i
].saddr
));
2833 pol
->xfrm_vec
[i
].encap_family
= mp
->new_family
;
2835 atomic_inc(&pol
->genid
);
2839 write_unlock_bh(&pol
->lock
);
2847 static int xfrm_migrate_check(struct xfrm_migrate
*m
, int num_migrate
)
2851 if (num_migrate
< 1 || num_migrate
> XFRM_MAX_DEPTH
)
2854 for (i
= 0; i
< num_migrate
; i
++) {
2855 if ((xfrm_addr_cmp(&m
[i
].old_daddr
, &m
[i
].new_daddr
,
2856 m
[i
].old_family
) == 0) &&
2857 (xfrm_addr_cmp(&m
[i
].old_saddr
, &m
[i
].new_saddr
,
2858 m
[i
].old_family
) == 0))
2860 if (xfrm_addr_any(&m
[i
].new_daddr
, m
[i
].new_family
) ||
2861 xfrm_addr_any(&m
[i
].new_saddr
, m
[i
].new_family
))
2864 /* check if there is any duplicated entry */
2865 for (j
= i
+ 1; j
< num_migrate
; j
++) {
2866 if (!memcmp(&m
[i
].old_daddr
, &m
[j
].old_daddr
,
2867 sizeof(m
[i
].old_daddr
)) &&
2868 !memcmp(&m
[i
].old_saddr
, &m
[j
].old_saddr
,
2869 sizeof(m
[i
].old_saddr
)) &&
2870 m
[i
].proto
== m
[j
].proto
&&
2871 m
[i
].mode
== m
[j
].mode
&&
2872 m
[i
].reqid
== m
[j
].reqid
&&
2873 m
[i
].old_family
== m
[j
].old_family
)
2881 int xfrm_migrate(struct xfrm_selector
*sel
, u8 dir
, u8 type
,
2882 struct xfrm_migrate
*m
, int num_migrate
,
2883 struct xfrm_kmaddress
*k
)
2885 int i
, err
, nx_cur
= 0, nx_new
= 0;
2886 struct xfrm_policy
*pol
= NULL
;
2887 struct xfrm_state
*x
, *xc
;
2888 struct xfrm_state
*x_cur
[XFRM_MAX_DEPTH
];
2889 struct xfrm_state
*x_new
[XFRM_MAX_DEPTH
];
2890 struct xfrm_migrate
*mp
;
2892 if ((err
= xfrm_migrate_check(m
, num_migrate
)) < 0)
2895 /* Stage 1 - find policy */
2896 if ((pol
= xfrm_migrate_policy_find(sel
, dir
, type
)) == NULL
) {
2901 /* Stage 2 - find and update state(s) */
2902 for (i
= 0, mp
= m
; i
< num_migrate
; i
++, mp
++) {
2903 if ((x
= xfrm_migrate_state_find(mp
))) {
2906 if ((xc
= xfrm_state_migrate(x
, mp
))) {
2916 /* Stage 3 - update policy */
2917 if ((err
= xfrm_policy_migrate(pol
, m
, num_migrate
)) < 0)
2920 /* Stage 4 - delete old state(s) */
2922 xfrm_states_put(x_cur
, nx_cur
);
2923 xfrm_states_delete(x_cur
, nx_cur
);
2926 /* Stage 5 - announce */
2927 km_migrate(sel
, dir
, type
, m
, num_migrate
, k
);
2939 xfrm_states_put(x_cur
, nx_cur
);
2941 xfrm_states_delete(x_new
, nx_new
);
2945 EXPORT_SYMBOL(xfrm_migrate
);