6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/slab.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/spinlock.h>
20 #include <linux/workqueue.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/module.h>
28 DEFINE_MUTEX(xfrm_cfg_mutex
);
29 EXPORT_SYMBOL(xfrm_cfg_mutex
);
31 static DEFINE_RWLOCK(xfrm_policy_lock
);
33 struct xfrm_policy
*xfrm_policy_list
[XFRM_POLICY_MAX
*2];
34 EXPORT_SYMBOL(xfrm_policy_list
);
35 #ifdef CONFIG_XFRM_SUB_POLICY
36 struct xfrm_policy
*xfrm_policy_list_sub
[XFRM_POLICY_MAX
*2];
37 EXPORT_SYMBOL(xfrm_policy_list_sub
);
39 #define XFRM_POLICY_LISTS(type) \
40 ((type == XFRM_POLICY_TYPE_SUB) ? xfrm_policy_list_sub : \
42 #define XFRM_POLICY_LISTHEAD(type, dir) \
43 ((type == XFRM_POLICY_TYPE_SUB) ? xfrm_policy_list_sub[dir] : \
44 xfrm_policy_list[dir])
45 #define XFRM_POLICY_LISTHEADP(type, dir) \
46 ((type == XFRM_POLICY_TYPE_SUB) ? &xfrm_policy_list_sub[dir] : \
47 &xfrm_policy_list[dir])
49 #define XFRM_POLICY_LISTS(type) xfrm_policy_list
50 #define XFRM_POLICY_LISTHEAD(type, dif) xfrm_policy_list[dir]
51 #define XFRM_POLICY_LISTHEADP(type, dif) &xfrm_policy_list[dir]
54 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock
);
55 static struct xfrm_policy_afinfo
*xfrm_policy_afinfo
[NPROTO
];
57 static kmem_cache_t
*xfrm_dst_cache __read_mostly
;
59 static struct work_struct xfrm_policy_gc_work
;
60 static struct list_head xfrm_policy_gc_list
=
61 LIST_HEAD_INIT(xfrm_policy_gc_list
);
62 static DEFINE_SPINLOCK(xfrm_policy_gc_lock
);
64 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
);
65 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
);
66 static struct xfrm_policy_afinfo
*xfrm_policy_lock_afinfo(unsigned int family
);
67 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo
*afinfo
);
69 int xfrm_register_type(struct xfrm_type
*type
, unsigned short family
)
71 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_lock_afinfo(family
);
72 struct xfrm_type
**typemap
;
75 if (unlikely(afinfo
== NULL
))
77 typemap
= afinfo
->type_map
;
79 if (likely(typemap
[type
->proto
] == NULL
))
80 typemap
[type
->proto
] = type
;
83 xfrm_policy_unlock_afinfo(afinfo
);
86 EXPORT_SYMBOL(xfrm_register_type
);
88 int xfrm_unregister_type(struct xfrm_type
*type
, unsigned short family
)
90 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_lock_afinfo(family
);
91 struct xfrm_type
**typemap
;
94 if (unlikely(afinfo
== NULL
))
96 typemap
= afinfo
->type_map
;
98 if (unlikely(typemap
[type
->proto
] != type
))
101 typemap
[type
->proto
] = NULL
;
102 xfrm_policy_unlock_afinfo(afinfo
);
105 EXPORT_SYMBOL(xfrm_unregister_type
);
107 struct xfrm_type
*xfrm_get_type(u8 proto
, unsigned short family
)
109 struct xfrm_policy_afinfo
*afinfo
;
110 struct xfrm_type
**typemap
;
111 struct xfrm_type
*type
;
112 int modload_attempted
= 0;
115 afinfo
= xfrm_policy_get_afinfo(family
);
116 if (unlikely(afinfo
== NULL
))
118 typemap
= afinfo
->type_map
;
120 type
= typemap
[proto
];
121 if (unlikely(type
&& !try_module_get(type
->owner
)))
123 if (!type
&& !modload_attempted
) {
124 xfrm_policy_put_afinfo(afinfo
);
125 request_module("xfrm-type-%d-%d",
126 (int) family
, (int) proto
);
127 modload_attempted
= 1;
131 xfrm_policy_put_afinfo(afinfo
);
135 int xfrm_dst_lookup(struct xfrm_dst
**dst
, struct flowi
*fl
,
136 unsigned short family
)
138 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
141 if (unlikely(afinfo
== NULL
))
142 return -EAFNOSUPPORT
;
144 if (likely(afinfo
->dst_lookup
!= NULL
))
145 err
= afinfo
->dst_lookup(dst
, fl
);
148 xfrm_policy_put_afinfo(afinfo
);
151 EXPORT_SYMBOL(xfrm_dst_lookup
);
153 void xfrm_put_type(struct xfrm_type
*type
)
155 module_put(type
->owner
);
158 int xfrm_register_mode(struct xfrm_mode
*mode
, int family
)
160 struct xfrm_policy_afinfo
*afinfo
;
161 struct xfrm_mode
**modemap
;
164 if (unlikely(mode
->encap
>= XFRM_MODE_MAX
))
167 afinfo
= xfrm_policy_lock_afinfo(family
);
168 if (unlikely(afinfo
== NULL
))
169 return -EAFNOSUPPORT
;
172 modemap
= afinfo
->mode_map
;
173 if (likely(modemap
[mode
->encap
] == NULL
)) {
174 modemap
[mode
->encap
] = mode
;
178 xfrm_policy_unlock_afinfo(afinfo
);
181 EXPORT_SYMBOL(xfrm_register_mode
);
183 int xfrm_unregister_mode(struct xfrm_mode
*mode
, int family
)
185 struct xfrm_policy_afinfo
*afinfo
;
186 struct xfrm_mode
**modemap
;
189 if (unlikely(mode
->encap
>= XFRM_MODE_MAX
))
192 afinfo
= xfrm_policy_lock_afinfo(family
);
193 if (unlikely(afinfo
== NULL
))
194 return -EAFNOSUPPORT
;
197 modemap
= afinfo
->mode_map
;
198 if (likely(modemap
[mode
->encap
] == mode
)) {
199 modemap
[mode
->encap
] = NULL
;
203 xfrm_policy_unlock_afinfo(afinfo
);
206 EXPORT_SYMBOL(xfrm_unregister_mode
);
208 struct xfrm_mode
*xfrm_get_mode(unsigned int encap
, int family
)
210 struct xfrm_policy_afinfo
*afinfo
;
211 struct xfrm_mode
*mode
;
212 int modload_attempted
= 0;
214 if (unlikely(encap
>= XFRM_MODE_MAX
))
218 afinfo
= xfrm_policy_get_afinfo(family
);
219 if (unlikely(afinfo
== NULL
))
222 mode
= afinfo
->mode_map
[encap
];
223 if (unlikely(mode
&& !try_module_get(mode
->owner
)))
225 if (!mode
&& !modload_attempted
) {
226 xfrm_policy_put_afinfo(afinfo
);
227 request_module("xfrm-mode-%d-%d", family
, encap
);
228 modload_attempted
= 1;
232 xfrm_policy_put_afinfo(afinfo
);
236 void xfrm_put_mode(struct xfrm_mode
*mode
)
238 module_put(mode
->owner
);
241 static inline unsigned long make_jiffies(long secs
)
243 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
244 return MAX_SCHEDULE_TIMEOUT
-1;
249 static void xfrm_policy_timer(unsigned long data
)
251 struct xfrm_policy
*xp
= (struct xfrm_policy
*)data
;
252 unsigned long now
= (unsigned long)xtime
.tv_sec
;
253 long next
= LONG_MAX
;
257 read_lock(&xp
->lock
);
262 dir
= xfrm_policy_id2dir(xp
->index
);
264 if (xp
->lft
.hard_add_expires_seconds
) {
265 long tmo
= xp
->lft
.hard_add_expires_seconds
+
266 xp
->curlft
.add_time
- now
;
272 if (xp
->lft
.hard_use_expires_seconds
) {
273 long tmo
= xp
->lft
.hard_use_expires_seconds
+
274 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
280 if (xp
->lft
.soft_add_expires_seconds
) {
281 long tmo
= xp
->lft
.soft_add_expires_seconds
+
282 xp
->curlft
.add_time
- now
;
285 tmo
= XFRM_KM_TIMEOUT
;
290 if (xp
->lft
.soft_use_expires_seconds
) {
291 long tmo
= xp
->lft
.soft_use_expires_seconds
+
292 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
295 tmo
= XFRM_KM_TIMEOUT
;
302 km_policy_expired(xp
, dir
, 0, 0);
303 if (next
!= LONG_MAX
&&
304 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
308 read_unlock(&xp
->lock
);
313 read_unlock(&xp
->lock
);
314 if (!xfrm_policy_delete(xp
, dir
))
315 km_policy_expired(xp
, dir
, 1, 0);
320 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
324 struct xfrm_policy
*xfrm_policy_alloc(gfp_t gfp
)
326 struct xfrm_policy
*policy
;
328 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
331 atomic_set(&policy
->refcnt
, 1);
332 rwlock_init(&policy
->lock
);
333 init_timer(&policy
->timer
);
334 policy
->timer
.data
= (unsigned long)policy
;
335 policy
->timer
.function
= xfrm_policy_timer
;
339 EXPORT_SYMBOL(xfrm_policy_alloc
);
341 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
343 void __xfrm_policy_destroy(struct xfrm_policy
*policy
)
345 BUG_ON(!policy
->dead
);
347 BUG_ON(policy
->bundles
);
349 if (del_timer(&policy
->timer
))
352 security_xfrm_policy_free(policy
);
355 EXPORT_SYMBOL(__xfrm_policy_destroy
);
357 static void xfrm_policy_gc_kill(struct xfrm_policy
*policy
)
359 struct dst_entry
*dst
;
361 while ((dst
= policy
->bundles
) != NULL
) {
362 policy
->bundles
= dst
->next
;
366 if (del_timer(&policy
->timer
))
367 atomic_dec(&policy
->refcnt
);
369 if (atomic_read(&policy
->refcnt
) > 1)
372 xfrm_pol_put(policy
);
375 static void xfrm_policy_gc_task(void *data
)
377 struct xfrm_policy
*policy
;
378 struct list_head
*entry
, *tmp
;
379 struct list_head gc_list
= LIST_HEAD_INIT(gc_list
);
381 spin_lock_bh(&xfrm_policy_gc_lock
);
382 list_splice_init(&xfrm_policy_gc_list
, &gc_list
);
383 spin_unlock_bh(&xfrm_policy_gc_lock
);
385 list_for_each_safe(entry
, tmp
, &gc_list
) {
386 policy
= list_entry(entry
, struct xfrm_policy
, list
);
387 xfrm_policy_gc_kill(policy
);
391 /* Rule must be locked. Release descentant resources, announce
392 * entry dead. The rule must be unlinked from lists to the moment.
395 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
399 write_lock_bh(&policy
->lock
);
402 write_unlock_bh(&policy
->lock
);
404 if (unlikely(dead
)) {
409 spin_lock(&xfrm_policy_gc_lock
);
410 list_add(&policy
->list
, &xfrm_policy_gc_list
);
411 spin_unlock(&xfrm_policy_gc_lock
);
413 schedule_work(&xfrm_policy_gc_work
);
416 /* Generate new index... KAME seems to generate them ordered by cost
417 * of an absolute inpredictability of ordering of rules. This will not pass. */
418 static u32
xfrm_gen_index(u8 type
, int dir
)
421 struct xfrm_policy
*p
;
422 static u32 idx_generator
;
425 idx
= (idx_generator
| dir
);
429 for (p
= XFRM_POLICY_LISTHEAD(type
, dir
); p
; p
= p
->next
) {
438 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
440 struct xfrm_policy
*pol
, **p
;
441 struct xfrm_policy
*delpol
= NULL
;
442 struct xfrm_policy
**newpos
= NULL
;
443 struct dst_entry
*gc_list
;
445 write_lock_bh(&xfrm_policy_lock
);
446 for (p
= XFRM_POLICY_LISTHEADP(policy
->type
, dir
); (pol
=*p
)!=NULL
;) {
447 if (!delpol
&& memcmp(&policy
->selector
, &pol
->selector
, sizeof(pol
->selector
)) == 0 &&
448 xfrm_sec_ctx_match(pol
->security
, policy
->security
)) {
450 write_unlock_bh(&xfrm_policy_lock
);
455 if (policy
->priority
> pol
->priority
)
457 } else if (policy
->priority
>= pol
->priority
) {
469 xfrm_pol_hold(policy
);
472 atomic_inc(&flow_cache_genid
);
473 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(policy
->type
, dir
);
474 policy
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
475 policy
->curlft
.use_time
= 0;
476 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
477 xfrm_pol_hold(policy
);
478 write_unlock_bh(&xfrm_policy_lock
);
481 xfrm_policy_kill(delpol
);
483 read_lock_bh(&xfrm_policy_lock
);
485 for (policy
= policy
->next
; policy
; policy
= policy
->next
) {
486 struct dst_entry
*dst
;
488 write_lock(&policy
->lock
);
489 dst
= policy
->bundles
;
491 struct dst_entry
*tail
= dst
;
494 tail
->next
= gc_list
;
497 policy
->bundles
= NULL
;
499 write_unlock(&policy
->lock
);
501 read_unlock_bh(&xfrm_policy_lock
);
504 struct dst_entry
*dst
= gc_list
;
512 EXPORT_SYMBOL(xfrm_policy_insert
);
514 struct xfrm_policy
*xfrm_policy_bysel_ctx(u8 type
, int dir
,
515 struct xfrm_selector
*sel
,
516 struct xfrm_sec_ctx
*ctx
, int delete)
518 struct xfrm_policy
*pol
, **p
;
520 write_lock_bh(&xfrm_policy_lock
);
521 for (p
= XFRM_POLICY_LISTHEADP(type
, dir
); (pol
=*p
)!=NULL
; p
= &pol
->next
) {
522 if ((memcmp(sel
, &pol
->selector
, sizeof(*sel
)) == 0) &&
523 (xfrm_sec_ctx_match(ctx
, pol
->security
))) {
530 write_unlock_bh(&xfrm_policy_lock
);
533 atomic_inc(&flow_cache_genid
);
534 xfrm_policy_kill(pol
);
538 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
540 struct xfrm_policy
*xfrm_policy_byid(u8 type
, int dir
, u32 id
, int delete)
542 struct xfrm_policy
*pol
, **p
;
544 write_lock_bh(&xfrm_policy_lock
);
545 for (p
= XFRM_POLICY_LISTHEADP(type
, dir
); (pol
=*p
)!=NULL
; p
= &pol
->next
) {
546 if (pol
->index
== id
) {
553 write_unlock_bh(&xfrm_policy_lock
);
556 atomic_inc(&flow_cache_genid
);
557 xfrm_policy_kill(pol
);
561 EXPORT_SYMBOL(xfrm_policy_byid
);
563 void xfrm_policy_flush(u8 type
)
565 struct xfrm_policy
*xp
;
566 struct xfrm_policy
**p_list
= XFRM_POLICY_LISTS(type
);
569 write_lock_bh(&xfrm_policy_lock
);
570 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
571 while ((xp
= p_list
[dir
]) != NULL
) {
572 p_list
[dir
] = xp
->next
;
573 write_unlock_bh(&xfrm_policy_lock
);
575 xfrm_policy_kill(xp
);
577 write_lock_bh(&xfrm_policy_lock
);
580 atomic_inc(&flow_cache_genid
);
581 write_unlock_bh(&xfrm_policy_lock
);
583 EXPORT_SYMBOL(xfrm_policy_flush
);
585 int xfrm_policy_walk(u8 type
, int (*func
)(struct xfrm_policy
*, int, int, void*),
588 struct xfrm_policy
*xp
;
593 read_lock_bh(&xfrm_policy_lock
);
594 for (dir
= 0; dir
< 2*XFRM_POLICY_MAX
; dir
++) {
595 for (xp
= XFRM_POLICY_LISTHEAD(type
, dir
); xp
; xp
= xp
->next
)
604 for (dir
= 0; dir
< 2*XFRM_POLICY_MAX
; dir
++) {
605 for (xp
= XFRM_POLICY_LISTHEAD(type
, dir
); xp
; xp
= xp
->next
) {
606 error
= func(xp
, dir
%XFRM_POLICY_MAX
, --count
, data
);
613 read_unlock_bh(&xfrm_policy_lock
);
616 EXPORT_SYMBOL(xfrm_policy_walk
);
618 /* Find policy to apply to this flow. */
620 static struct xfrm_policy
*xfrm_policy_lookup_bytype(u8 type
, struct flowi
*fl
,
623 struct xfrm_policy
*pol
;
625 read_lock_bh(&xfrm_policy_lock
);
626 for (pol
= XFRM_POLICY_LISTHEAD(type
, dir
); pol
; pol
= pol
->next
) {
627 struct xfrm_selector
*sel
= &pol
->selector
;
630 if (pol
->family
!= family
)
633 match
= xfrm_selector_match(sel
, fl
, family
);
636 if (!security_xfrm_policy_lookup(pol
, fl
->secid
, dir
)) {
642 read_unlock_bh(&xfrm_policy_lock
);
647 static void xfrm_policy_lookup(struct flowi
*fl
, u16 family
, u8 dir
,
648 void **objp
, atomic_t
**obj_refp
)
650 struct xfrm_policy
*pol
;
652 #ifdef CONFIG_XFRM_SUB_POLICY
653 pol
= xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB
, fl
, family
, dir
);
657 pol
= xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN
, fl
, family
, dir
);
659 #ifdef CONFIG_XFRM_SUB_POLICY
662 if ((*objp
= (void *) pol
) != NULL
)
663 *obj_refp
= &pol
->refcnt
;
666 static inline int policy_to_flow_dir(int dir
)
668 if (XFRM_POLICY_IN
== FLOW_DIR_IN
&&
669 XFRM_POLICY_OUT
== FLOW_DIR_OUT
&&
670 XFRM_POLICY_FWD
== FLOW_DIR_FWD
)
676 case XFRM_POLICY_OUT
:
678 case XFRM_POLICY_FWD
:
683 static struct xfrm_policy
*xfrm_sk_policy_lookup(struct sock
*sk
, int dir
, struct flowi
*fl
)
685 struct xfrm_policy
*pol
;
687 read_lock_bh(&xfrm_policy_lock
);
688 if ((pol
= sk
->sk_policy
[dir
]) != NULL
) {
689 int match
= xfrm_selector_match(&pol
->selector
, fl
,
694 err
= security_xfrm_policy_lookup(pol
, fl
->secid
, policy_to_flow_dir(dir
));
701 read_unlock_bh(&xfrm_policy_lock
);
705 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
707 struct xfrm_policy
**p_list
= XFRM_POLICY_LISTS(pol
->type
);
709 pol
->next
= p_list
[dir
];
714 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
717 struct xfrm_policy
**polp
;
719 for (polp
= XFRM_POLICY_LISTHEADP(pol
->type
, dir
);
720 *polp
!= NULL
; polp
= &(*polp
)->next
) {
729 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
731 write_lock_bh(&xfrm_policy_lock
);
732 pol
= __xfrm_policy_unlink(pol
, dir
);
733 write_unlock_bh(&xfrm_policy_lock
);
735 if (dir
< XFRM_POLICY_MAX
)
736 atomic_inc(&flow_cache_genid
);
737 xfrm_policy_kill(pol
);
742 EXPORT_SYMBOL(xfrm_policy_delete
);
744 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
746 struct xfrm_policy
*old_pol
;
748 #ifdef CONFIG_XFRM_SUB_POLICY
749 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
753 write_lock_bh(&xfrm_policy_lock
);
754 old_pol
= sk
->sk_policy
[dir
];
755 sk
->sk_policy
[dir
] = pol
;
757 pol
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
758 pol
->index
= xfrm_gen_index(pol
->type
, XFRM_POLICY_MAX
+dir
);
759 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+dir
);
762 __xfrm_policy_unlink(old_pol
, XFRM_POLICY_MAX
+dir
);
763 write_unlock_bh(&xfrm_policy_lock
);
766 xfrm_policy_kill(old_pol
);
771 static struct xfrm_policy
*clone_policy(struct xfrm_policy
*old
, int dir
)
773 struct xfrm_policy
*newp
= xfrm_policy_alloc(GFP_ATOMIC
);
776 newp
->selector
= old
->selector
;
777 if (security_xfrm_policy_clone(old
, newp
)) {
779 return NULL
; /* ENOMEM */
781 newp
->lft
= old
->lft
;
782 newp
->curlft
= old
->curlft
;
783 newp
->action
= old
->action
;
784 newp
->flags
= old
->flags
;
785 newp
->xfrm_nr
= old
->xfrm_nr
;
786 newp
->index
= old
->index
;
787 newp
->type
= old
->type
;
788 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
789 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
790 write_lock_bh(&xfrm_policy_lock
);
791 __xfrm_policy_link(newp
, XFRM_POLICY_MAX
+dir
);
792 write_unlock_bh(&xfrm_policy_lock
);
798 int __xfrm_sk_clone_policy(struct sock
*sk
)
800 struct xfrm_policy
*p0
= sk
->sk_policy
[0],
801 *p1
= sk
->sk_policy
[1];
803 sk
->sk_policy
[0] = sk
->sk_policy
[1] = NULL
;
804 if (p0
&& (sk
->sk_policy
[0] = clone_policy(p0
, 0)) == NULL
)
806 if (p1
&& (sk
->sk_policy
[1] = clone_policy(p1
, 1)) == NULL
)
811 /* Resolve list of templates for the flow, given policy. */
814 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, struct flowi
*fl
,
815 struct xfrm_state
**xfrm
,
816 unsigned short family
)
820 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
821 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
823 for (nx
=0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
824 struct xfrm_state
*x
;
825 xfrm_address_t
*remote
= daddr
;
826 xfrm_address_t
*local
= saddr
;
827 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
829 if (tmpl
->mode
== XFRM_MODE_TUNNEL
) {
830 remote
= &tmpl
->id
.daddr
;
831 local
= &tmpl
->saddr
;
834 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
, family
);
836 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
843 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
854 for (nx
--; nx
>=0; nx
--)
855 xfrm_state_put(xfrm
[nx
]);
860 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, struct flowi
*fl
,
861 struct xfrm_state
**xfrm
,
862 unsigned short family
)
864 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
865 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
871 for (i
= 0; i
< npols
; i
++) {
872 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
877 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
885 /* found states are sorted for outbound processing */
887 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
892 for (cnx
--; cnx
>=0; cnx
--)
893 xfrm_state_put(tpp
[cnx
]);
898 /* Check that the bundle accepts the flow and its components are
902 static struct dst_entry
*
903 xfrm_find_bundle(struct flowi
*fl
, struct xfrm_policy
*policy
, unsigned short family
)
906 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
907 if (unlikely(afinfo
== NULL
))
908 return ERR_PTR(-EINVAL
);
909 x
= afinfo
->find_bundle(fl
, policy
);
910 xfrm_policy_put_afinfo(afinfo
);
914 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
915 * all the metrics... Shortly, bundle a bundle.
919 xfrm_bundle_create(struct xfrm_policy
*policy
, struct xfrm_state
**xfrm
, int nx
,
920 struct flowi
*fl
, struct dst_entry
**dst_p
,
921 unsigned short family
)
924 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
925 if (unlikely(afinfo
== NULL
))
927 err
= afinfo
->bundle_create(policy
, xfrm
, nx
, fl
, dst_p
);
928 xfrm_policy_put_afinfo(afinfo
);
933 static int stale_bundle(struct dst_entry
*dst
);
935 /* Main function: finds/creates a bundle for given flow.
937 * At the moment we eat a raw IP route. Mostly to speed up lookups
938 * on interfaces with disabled IPsec.
940 int xfrm_lookup(struct dst_entry
**dst_p
, struct flowi
*fl
,
941 struct sock
*sk
, int flags
)
943 struct xfrm_policy
*policy
;
944 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
949 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
950 struct dst_entry
*dst
, *dst_orig
= *dst_p
;
955 u8 dir
= policy_to_flow_dir(XFRM_POLICY_OUT
);
958 genid
= atomic_read(&flow_cache_genid
);
960 for (pi
= 0; pi
< ARRAY_SIZE(pols
); pi
++)
966 if (sk
&& sk
->sk_policy
[1])
967 policy
= xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
);
970 /* To accelerate a bit... */
971 if ((dst_orig
->flags
& DST_NOXFRM
) || xfrm_policy_lists_empty(XFRM_POLICY_OUT
))
974 policy
= flow_cache_lookup(fl
, dst_orig
->ops
->family
,
975 dir
, xfrm_policy_lookup
);
981 family
= dst_orig
->ops
->family
;
982 policy
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
985 xfrm_nr
+= pols
[0]->xfrm_nr
;
987 switch (policy
->action
) {
988 case XFRM_POLICY_BLOCK
:
989 /* Prohibit the flow */
993 case XFRM_POLICY_ALLOW
:
994 #ifndef CONFIG_XFRM_SUB_POLICY
995 if (policy
->xfrm_nr
== 0) {
996 /* Flow passes not transformed. */
997 xfrm_pol_put(policy
);
1002 /* Try to find matching bundle.
1004 * LATER: help from flow cache. It is optional, this
1005 * is required only for output policy.
1007 dst
= xfrm_find_bundle(fl
, policy
, family
);
1016 #ifdef CONFIG_XFRM_SUB_POLICY
1017 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
1018 pols
[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN
,
1022 if (pols
[1]->action
== XFRM_POLICY_BLOCK
) {
1027 xfrm_nr
+= pols
[1]->xfrm_nr
;
1032 * Because neither flowi nor bundle information knows about
1033 * transformation template size. On more than one policy usage
1034 * we can realize whether all of them is bypass or not after
1035 * they are searched. See above not-transformed bypass
1036 * is surrounded by non-sub policy configuration, too.
1039 /* Flow passes not transformed. */
1040 xfrm_pols_put(pols
, npols
);
1045 nx
= xfrm_tmpl_resolve(pols
, npols
, fl
, xfrm
, family
);
1047 if (unlikely(nx
<0)) {
1049 if (err
== -EAGAIN
&& flags
) {
1050 DECLARE_WAITQUEUE(wait
, current
);
1052 add_wait_queue(&km_waitq
, &wait
);
1053 set_current_state(TASK_INTERRUPTIBLE
);
1055 set_current_state(TASK_RUNNING
);
1056 remove_wait_queue(&km_waitq
, &wait
);
1058 nx
= xfrm_tmpl_resolve(pols
, npols
, fl
, xfrm
, family
);
1060 if (nx
== -EAGAIN
&& signal_pending(current
)) {
1064 if (nx
== -EAGAIN
||
1065 genid
!= atomic_read(&flow_cache_genid
)) {
1066 xfrm_pols_put(pols
, npols
);
1075 /* Flow passes not transformed. */
1076 xfrm_pols_put(pols
, npols
);
1081 err
= xfrm_bundle_create(policy
, xfrm
, nx
, fl
, &dst
, family
);
1083 if (unlikely(err
)) {
1085 for (i
=0; i
<nx
; i
++)
1086 xfrm_state_put(xfrm
[i
]);
1090 for (pi
= 0; pi
< npols
; pi
++) {
1091 read_lock_bh(&pols
[pi
]->lock
);
1092 pol_dead
|= pols
[pi
]->dead
;
1093 read_unlock_bh(&pols
[pi
]->lock
);
1096 write_lock_bh(&policy
->lock
);
1097 if (unlikely(pol_dead
|| stale_bundle(dst
))) {
1098 /* Wow! While we worked on resolving, this
1099 * policy has gone. Retry. It is not paranoia,
1100 * we just cannot enlist new bundle to dead object.
1101 * We can't enlist stable bundles either.
1103 write_unlock_bh(&policy
->lock
);
1107 err
= -EHOSTUNREACH
;
1110 dst
->next
= policy
->bundles
;
1111 policy
->bundles
= dst
;
1113 write_unlock_bh(&policy
->lock
);
1116 dst_release(dst_orig
);
1117 xfrm_pols_put(pols
, npols
);
1121 dst_release(dst_orig
);
1122 xfrm_pols_put(pols
, npols
);
1126 EXPORT_SYMBOL(xfrm_lookup
);
1129 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, struct flowi
*fl
)
1131 struct xfrm_state
*x
;
1134 if (!skb
->sp
|| idx
< 0 || idx
>= skb
->sp
->len
)
1136 x
= skb
->sp
->xvec
[idx
];
1137 if (!x
->type
->reject
)
1140 err
= x
->type
->reject(x
, skb
, fl
);
1145 /* When skb is transformed back to its "native" form, we have to
1146 * check policy restrictions. At the moment we make this in maximally
1147 * stupid way. Shame on me. :-) Of course, connected sockets must
1148 * have policy cached at them.
1152 xfrm_state_ok(struct xfrm_tmpl
*tmpl
, struct xfrm_state
*x
,
1153 unsigned short family
)
1155 if (xfrm_state_kern(x
))
1156 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, family
);
1157 return x
->id
.proto
== tmpl
->id
.proto
&&
1158 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
1159 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
1160 x
->props
.mode
== tmpl
->mode
&&
1161 ((tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
1162 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
1163 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
1164 xfrm_state_addr_cmp(tmpl
, x
, family
));
1168 * 0 or more than 0 is returned when validation is succeeded (either bypass
1169 * because of optional transport mode, or next index of the mathced secpath
1170 * state with the template.
1171 * -1 is returned when no matching template is found.
1172 * Otherwise "-2 - errored_index" is returned.
1175 xfrm_policy_ok(struct xfrm_tmpl
*tmpl
, struct sec_path
*sp
, int start
,
1176 unsigned short family
)
1180 if (tmpl
->optional
) {
1181 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
1185 for (; idx
< sp
->len
; idx
++) {
1186 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
))
1188 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1198 xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
, unsigned short family
)
1200 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1203 if (unlikely(afinfo
== NULL
))
1204 return -EAFNOSUPPORT
;
1206 afinfo
->decode_session(skb
, fl
);
1207 err
= security_xfrm_decode_session(skb
, &fl
->secid
);
1208 xfrm_policy_put_afinfo(afinfo
);
1211 EXPORT_SYMBOL(xfrm_decode_session
);
1213 static inline int secpath_has_nontransport(struct sec_path
*sp
, int k
, int *idxp
)
1215 for (; k
< sp
->len
; k
++) {
1216 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1226 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
1227 unsigned short family
)
1229 struct xfrm_policy
*pol
;
1230 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
1235 u8 fl_dir
= policy_to_flow_dir(dir
);
1237 int *xerr_idxp
= &xerr_idx
;
1239 if (xfrm_decode_session(skb
, &fl
, family
) < 0)
1241 nf_nat_decode_session(skb
, &fl
, family
);
1243 /* First, check used SA against their selectors. */
1247 for (i
=skb
->sp
->len
-1; i
>=0; i
--) {
1248 struct xfrm_state
*x
= skb
->sp
->xvec
[i
];
1249 if (!xfrm_selector_match(&x
->sel
, &fl
, family
))
1255 if (sk
&& sk
->sk_policy
[dir
])
1256 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
);
1259 pol
= flow_cache_lookup(&fl
, family
, fl_dir
,
1260 xfrm_policy_lookup
);
1263 if (skb
->sp
&& secpath_has_nontransport(skb
->sp
, 0, xerr_idxp
)) {
1264 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
1270 pol
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
1274 #ifdef CONFIG_XFRM_SUB_POLICY
1275 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
1276 pols
[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN
,
1280 pols
[1]->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
1286 if (pol
->action
== XFRM_POLICY_ALLOW
) {
1287 struct sec_path
*sp
;
1288 static struct sec_path dummy
;
1289 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
1290 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
1291 struct xfrm_tmpl
**tpp
= tp
;
1295 if ((sp
= skb
->sp
) == NULL
)
1298 for (pi
= 0; pi
< npols
; pi
++) {
1299 if (pols
[pi
] != pol
&&
1300 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
)
1302 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
)
1304 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
1305 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
1309 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
);
1313 /* For each tunnel xfrm, find the first matching tmpl.
1314 * For each tmpl before that, find corresponding xfrm.
1315 * Order is _important_. Later we will implement
1316 * some barriers, but at the moment barriers
1317 * are implied between each two transformations.
1319 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
1320 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
);
1322 if (k
< -1 && xerr_idxp
)
1323 *xerr_idxp
= -(2+k
);
1328 if (secpath_has_nontransport(sp
, k
, xerr_idxp
))
1331 xfrm_pols_put(pols
, npols
);
1336 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
1338 xfrm_pols_put(pols
, npols
);
1341 EXPORT_SYMBOL(__xfrm_policy_check
);
1343 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
1347 if (xfrm_decode_session(skb
, &fl
, family
) < 0)
1350 return xfrm_lookup(&skb
->dst
, &fl
, NULL
, 0) == 0;
1352 EXPORT_SYMBOL(__xfrm_route_forward
);
1354 /* Optimize later using cookies and generation ids. */
1356 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
1358 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
1359 * to "-1" to force all XFRM destinations to get validated by
1360 * dst_ops->check on every use. We do this because when a
1361 * normal route referenced by an XFRM dst is obsoleted we do
1362 * not go looking around for all parent referencing XFRM dsts
1363 * so that we can invalidate them. It is just too much work.
1364 * Instead we make the checks here on every use. For example:
1366 * XFRM dst A --> IPv4 dst X
1368 * X is the "xdst->route" of A (X is also the "dst->path" of A
1369 * in this example). If X is marked obsolete, "A" will not
1370 * notice. That's what we are validating here via the
1371 * stale_bundle() check.
1373 * When a policy's bundle is pruned, we dst_free() the XFRM
1374 * dst which causes it's ->obsolete field to be set to a
1375 * positive non-zero integer. If an XFRM dst has been pruned
1376 * like this, we want to force a new route lookup.
1378 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
1384 static int stale_bundle(struct dst_entry
*dst
)
1386 return !xfrm_bundle_ok((struct xfrm_dst
*)dst
, NULL
, AF_UNSPEC
, 0);
1389 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
1391 while ((dst
= dst
->child
) && dst
->xfrm
&& dst
->dev
== dev
) {
1392 dst
->dev
= &loopback_dev
;
1393 dev_hold(&loopback_dev
);
1397 EXPORT_SYMBOL(xfrm_dst_ifdown
);
1399 static void xfrm_link_failure(struct sk_buff
*skb
)
1401 /* Impossible. Such dst must be popped before reaches point of failure. */
1405 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
1408 if (dst
->obsolete
) {
1416 static void xfrm_prune_bundles(int (*func
)(struct dst_entry
*))
1419 struct xfrm_policy
*pol
;
1420 struct dst_entry
*dst
, **dstp
, *gc_list
= NULL
;
1422 read_lock_bh(&xfrm_policy_lock
);
1423 for (i
=0; i
<2*XFRM_POLICY_MAX
; i
++) {
1424 #ifdef CONFIG_XFRM_SUB_POLICY
1425 for (pol
= xfrm_policy_list_sub
[i
]; pol
; pol
= pol
->next
) {
1426 write_lock(&pol
->lock
);
1427 dstp
= &pol
->bundles
;
1428 while ((dst
=*dstp
) != NULL
) {
1431 dst
->next
= gc_list
;
1437 write_unlock(&pol
->lock
);
1441 for (pol
= xfrm_policy_list
[i
]; pol
; pol
= pol
->next
) {
1442 write_lock(&pol
->lock
);
1443 dstp
= &pol
->bundles
;
1444 while ((dst
=*dstp
) != NULL
) {
1447 dst
->next
= gc_list
;
1453 write_unlock(&pol
->lock
);
1456 read_unlock_bh(&xfrm_policy_lock
);
1460 gc_list
= dst
->next
;
1465 static int unused_bundle(struct dst_entry
*dst
)
1467 return !atomic_read(&dst
->__refcnt
);
1470 static void __xfrm_garbage_collect(void)
1472 xfrm_prune_bundles(unused_bundle
);
1475 static int xfrm_flush_bundles(void)
1477 xfrm_prune_bundles(stale_bundle
);
1481 void xfrm_init_pmtu(struct dst_entry
*dst
)
1484 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1485 u32 pmtu
, route_mtu_cached
;
1487 pmtu
= dst_mtu(dst
->child
);
1488 xdst
->child_mtu_cached
= pmtu
;
1490 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
1492 route_mtu_cached
= dst_mtu(xdst
->route
);
1493 xdst
->route_mtu_cached
= route_mtu_cached
;
1495 if (pmtu
> route_mtu_cached
)
1496 pmtu
= route_mtu_cached
;
1498 dst
->metrics
[RTAX_MTU
-1] = pmtu
;
1499 } while ((dst
= dst
->next
));
1502 EXPORT_SYMBOL(xfrm_init_pmtu
);
1504 /* Check that the bundle accepts the flow and its components are
1508 int xfrm_bundle_ok(struct xfrm_dst
*first
, struct flowi
*fl
, int family
, int strict
)
1510 struct dst_entry
*dst
= &first
->u
.dst
;
1511 struct xfrm_dst
*last
;
1514 if (!dst_check(dst
->path
, ((struct xfrm_dst
*)dst
)->path_cookie
) ||
1515 (dst
->dev
&& !netif_running(dst
->dev
)))
1521 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1523 if (fl
&& !xfrm_selector_match(&dst
->xfrm
->sel
, fl
, family
))
1525 if (fl
&& !security_xfrm_flow_state_match(fl
, dst
->xfrm
))
1527 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
1529 if (xdst
->genid
!= dst
->xfrm
->genid
)
1532 if (strict
&& fl
&& dst
->xfrm
->props
.mode
!= XFRM_MODE_TUNNEL
&&
1533 !xfrm_state_addr_flow_check(dst
->xfrm
, fl
, family
))
1536 mtu
= dst_mtu(dst
->child
);
1537 if (xdst
->child_mtu_cached
!= mtu
) {
1539 xdst
->child_mtu_cached
= mtu
;
1542 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
1544 mtu
= dst_mtu(xdst
->route
);
1545 if (xdst
->route_mtu_cached
!= mtu
) {
1547 xdst
->route_mtu_cached
= mtu
;
1551 } while (dst
->xfrm
);
1556 mtu
= last
->child_mtu_cached
;
1560 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
1561 if (mtu
> last
->route_mtu_cached
)
1562 mtu
= last
->route_mtu_cached
;
1563 dst
->metrics
[RTAX_MTU
-1] = mtu
;
1568 last
= last
->u
.next
;
1569 last
->child_mtu_cached
= mtu
;
1575 EXPORT_SYMBOL(xfrm_bundle_ok
);
1577 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo
*afinfo
)
1580 if (unlikely(afinfo
== NULL
))
1582 if (unlikely(afinfo
->family
>= NPROTO
))
1583 return -EAFNOSUPPORT
;
1584 write_lock_bh(&xfrm_policy_afinfo_lock
);
1585 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
))
1588 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
1589 if (likely(dst_ops
->kmem_cachep
== NULL
))
1590 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
1591 if (likely(dst_ops
->check
== NULL
))
1592 dst_ops
->check
= xfrm_dst_check
;
1593 if (likely(dst_ops
->negative_advice
== NULL
))
1594 dst_ops
->negative_advice
= xfrm_negative_advice
;
1595 if (likely(dst_ops
->link_failure
== NULL
))
1596 dst_ops
->link_failure
= xfrm_link_failure
;
1597 if (likely(afinfo
->garbage_collect
== NULL
))
1598 afinfo
->garbage_collect
= __xfrm_garbage_collect
;
1599 xfrm_policy_afinfo
[afinfo
->family
] = afinfo
;
1601 write_unlock_bh(&xfrm_policy_afinfo_lock
);
1604 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
1606 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo
*afinfo
)
1609 if (unlikely(afinfo
== NULL
))
1611 if (unlikely(afinfo
->family
>= NPROTO
))
1612 return -EAFNOSUPPORT
;
1613 write_lock_bh(&xfrm_policy_afinfo_lock
);
1614 if (likely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
)) {
1615 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != afinfo
))
1618 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
1619 xfrm_policy_afinfo
[afinfo
->family
] = NULL
;
1620 dst_ops
->kmem_cachep
= NULL
;
1621 dst_ops
->check
= NULL
;
1622 dst_ops
->negative_advice
= NULL
;
1623 dst_ops
->link_failure
= NULL
;
1624 afinfo
->garbage_collect
= NULL
;
1627 write_unlock_bh(&xfrm_policy_afinfo_lock
);
1630 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
1632 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
1634 struct xfrm_policy_afinfo
*afinfo
;
1635 if (unlikely(family
>= NPROTO
))
1637 read_lock(&xfrm_policy_afinfo_lock
);
1638 afinfo
= xfrm_policy_afinfo
[family
];
1639 if (unlikely(!afinfo
))
1640 read_unlock(&xfrm_policy_afinfo_lock
);
1644 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
)
1646 read_unlock(&xfrm_policy_afinfo_lock
);
1649 static struct xfrm_policy_afinfo
*xfrm_policy_lock_afinfo(unsigned int family
)
1651 struct xfrm_policy_afinfo
*afinfo
;
1652 if (unlikely(family
>= NPROTO
))
1654 write_lock_bh(&xfrm_policy_afinfo_lock
);
1655 afinfo
= xfrm_policy_afinfo
[family
];
1656 if (unlikely(!afinfo
))
1657 write_unlock_bh(&xfrm_policy_afinfo_lock
);
1661 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo
*afinfo
)
1663 write_unlock_bh(&xfrm_policy_afinfo_lock
);
1666 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1670 xfrm_flush_bundles();
1675 static struct notifier_block xfrm_dev_notifier
= {
1681 static void __init
xfrm_policy_init(void)
1683 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
1684 sizeof(struct xfrm_dst
),
1685 0, SLAB_HWCACHE_ALIGN
,
1687 if (!xfrm_dst_cache
)
1688 panic("XFRM: failed to allocate xfrm_dst_cache\n");
1690 INIT_WORK(&xfrm_policy_gc_work
, xfrm_policy_gc_task
, NULL
);
1691 register_netdevice_notifier(&xfrm_dev_notifier
);
1694 void __init
xfrm_init(void)