6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 /* Each xfrm_state may be linked to two tables:
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
30 static DEFINE_SPINLOCK(xfrm_state_lock
);
32 /* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
38 static struct list_head xfrm_state_bydst
[XFRM_DST_HSIZE
];
39 static struct list_head xfrm_state_byspi
[XFRM_DST_HSIZE
];
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
42 EXPORT_SYMBOL(km_waitq
);
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
45 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
47 static struct work_struct xfrm_state_gc_work
;
48 static struct list_head xfrm_state_gc_list
= LIST_HEAD_INIT(xfrm_state_gc_list
);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
51 static int xfrm_state_gc_flush_bundles
;
53 static int __xfrm_state_delete(struct xfrm_state
*x
);
55 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
);
56 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
58 static int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
59 static void km_state_expired(struct xfrm_state
*x
, int hard
);
61 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
63 if (del_timer(&x
->timer
))
70 x
->type
->destructor(x
);
71 xfrm_put_type(x
->type
);
76 static void xfrm_state_gc_task(void *data
)
79 struct list_head
*entry
, *tmp
;
80 struct list_head gc_list
= LIST_HEAD_INIT(gc_list
);
82 if (xfrm_state_gc_flush_bundles
) {
83 xfrm_state_gc_flush_bundles
= 0;
87 spin_lock_bh(&xfrm_state_gc_lock
);
88 list_splice_init(&xfrm_state_gc_list
, &gc_list
);
89 spin_unlock_bh(&xfrm_state_gc_lock
);
91 list_for_each_safe(entry
, tmp
, &gc_list
) {
92 x
= list_entry(entry
, struct xfrm_state
, bydst
);
93 xfrm_state_gc_destroy(x
);
98 static inline unsigned long make_jiffies(long secs
)
100 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
101 return MAX_SCHEDULE_TIMEOUT
-1;
106 static void xfrm_timer_handler(unsigned long data
)
108 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
109 unsigned long now
= (unsigned long)xtime
.tv_sec
;
110 long next
= LONG_MAX
;
114 if (x
->km
.state
== XFRM_STATE_DEAD
)
116 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
118 if (x
->lft
.hard_add_expires_seconds
) {
119 long tmo
= x
->lft
.hard_add_expires_seconds
+
120 x
->curlft
.add_time
- now
;
126 if (x
->lft
.hard_use_expires_seconds
) {
127 long tmo
= x
->lft
.hard_use_expires_seconds
+
128 (x
->curlft
.use_time
? : now
) - now
;
136 if (x
->lft
.soft_add_expires_seconds
) {
137 long tmo
= x
->lft
.soft_add_expires_seconds
+
138 x
->curlft
.add_time
- now
;
144 if (x
->lft
.soft_use_expires_seconds
) {
145 long tmo
= x
->lft
.soft_use_expires_seconds
+
146 (x
->curlft
.use_time
? : now
) - now
;
155 km_state_expired(x
, 0);
157 if (next
!= LONG_MAX
&&
158 !mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
)))
163 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
164 x
->km
.state
= XFRM_STATE_EXPIRED
;
169 if (!__xfrm_state_delete(x
) && x
->id
.spi
)
170 km_state_expired(x
, 1);
173 spin_unlock(&x
->lock
);
177 struct xfrm_state
*xfrm_state_alloc(void)
179 struct xfrm_state
*x
;
181 x
= kmalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
184 memset(x
, 0, sizeof(struct xfrm_state
));
185 atomic_set(&x
->refcnt
, 1);
186 atomic_set(&x
->tunnel_users
, 0);
187 INIT_LIST_HEAD(&x
->bydst
);
188 INIT_LIST_HEAD(&x
->byspi
);
189 init_timer(&x
->timer
);
190 x
->timer
.function
= xfrm_timer_handler
;
191 x
->timer
.data
= (unsigned long)x
;
192 x
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
193 x
->lft
.soft_byte_limit
= XFRM_INF
;
194 x
->lft
.soft_packet_limit
= XFRM_INF
;
195 x
->lft
.hard_byte_limit
= XFRM_INF
;
196 x
->lft
.hard_packet_limit
= XFRM_INF
;
197 spin_lock_init(&x
->lock
);
201 EXPORT_SYMBOL(xfrm_state_alloc
);
203 void __xfrm_state_destroy(struct xfrm_state
*x
)
205 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
207 spin_lock_bh(&xfrm_state_gc_lock
);
208 list_add(&x
->bydst
, &xfrm_state_gc_list
);
209 spin_unlock_bh(&xfrm_state_gc_lock
);
210 schedule_work(&xfrm_state_gc_work
);
212 EXPORT_SYMBOL(__xfrm_state_destroy
);
214 static int __xfrm_state_delete(struct xfrm_state
*x
)
218 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
219 x
->km
.state
= XFRM_STATE_DEAD
;
220 spin_lock(&xfrm_state_lock
);
222 atomic_dec(&x
->refcnt
);
225 atomic_dec(&x
->refcnt
);
227 spin_unlock(&xfrm_state_lock
);
228 if (del_timer(&x
->timer
))
229 atomic_dec(&x
->refcnt
);
231 /* The number two in this test is the reference
232 * mentioned in the comment below plus the reference
233 * our caller holds. A larger value means that
234 * there are DSTs attached to this xfrm_state.
236 if (atomic_read(&x
->refcnt
) > 2) {
237 xfrm_state_gc_flush_bundles
= 1;
238 schedule_work(&xfrm_state_gc_work
);
241 /* All xfrm_state objects are created by xfrm_state_alloc.
242 * The xfrm_state_alloc call gives a reference, and that
243 * is what we are dropping here.
245 atomic_dec(&x
->refcnt
);
252 int xfrm_state_delete(struct xfrm_state
*x
)
256 spin_lock_bh(&x
->lock
);
257 err
= __xfrm_state_delete(x
);
258 spin_unlock_bh(&x
->lock
);
262 EXPORT_SYMBOL(xfrm_state_delete
);
264 void xfrm_state_flush(u8 proto
)
267 struct xfrm_state
*x
;
269 spin_lock_bh(&xfrm_state_lock
);
270 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
272 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
273 if (!xfrm_state_kern(x
) &&
274 (proto
== IPSEC_PROTO_ANY
|| x
->id
.proto
== proto
)) {
276 spin_unlock_bh(&xfrm_state_lock
);
278 xfrm_state_delete(x
);
281 spin_lock_bh(&xfrm_state_lock
);
286 spin_unlock_bh(&xfrm_state_lock
);
289 EXPORT_SYMBOL(xfrm_state_flush
);
292 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
293 struct xfrm_tmpl
*tmpl
,
294 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
295 unsigned short family
)
297 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
300 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
301 xfrm_state_put_afinfo(afinfo
);
306 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
307 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
308 struct xfrm_policy
*pol
, int *err
,
309 unsigned short family
)
311 unsigned h
= xfrm_dst_hash(daddr
, family
);
312 struct xfrm_state
*x
, *x0
;
313 int acquire_in_progress
= 0;
315 struct xfrm_state
*best
= NULL
;
316 struct xfrm_state_afinfo
*afinfo
;
318 afinfo
= xfrm_state_get_afinfo(family
);
319 if (afinfo
== NULL
) {
320 *err
= -EAFNOSUPPORT
;
324 spin_lock_bh(&xfrm_state_lock
);
325 list_for_each_entry(x
, xfrm_state_bydst
+h
, bydst
) {
326 if (x
->props
.family
== family
&&
327 x
->props
.reqid
== tmpl
->reqid
&&
328 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
329 tmpl
->mode
== x
->props
.mode
&&
330 tmpl
->id
.proto
== x
->id
.proto
&&
331 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
333 1. There is a valid state with matching selector.
335 2. Valid state with inappropriate selector. Skip.
337 Entering area of "sysdeps".
339 3. If state is not valid, selector is temporary,
340 it selects only session which triggered
341 previous resolution. Key manager will do
342 something to install a state with proper
345 if (x
->km
.state
== XFRM_STATE_VALID
) {
346 if (!xfrm_selector_match(&x
->sel
, fl
, family
))
349 best
->km
.dying
> x
->km
.dying
||
350 (best
->km
.dying
== x
->km
.dying
&&
351 best
->curlft
.add_time
< x
->curlft
.add_time
))
353 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
354 acquire_in_progress
= 1;
355 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
356 x
->km
.state
== XFRM_STATE_EXPIRED
) {
357 if (xfrm_selector_match(&x
->sel
, fl
, family
))
364 if (!x
&& !error
&& !acquire_in_progress
) {
366 (x0
= afinfo
->state_lookup(daddr
, tmpl
->id
.spi
,
367 tmpl
->id
.proto
)) != NULL
) {
372 x
= xfrm_state_alloc();
377 /* Initialize temporary selector matching only
378 * to current session. */
379 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
381 if (km_query(x
, tmpl
, pol
) == 0) {
382 x
->km
.state
= XFRM_STATE_ACQ
;
383 list_add_tail(&x
->bydst
, xfrm_state_bydst
+h
);
386 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
387 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
390 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
392 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
393 add_timer(&x
->timer
);
395 x
->km
.state
= XFRM_STATE_DEAD
;
405 *err
= acquire_in_progress
? -EAGAIN
: error
;
406 spin_unlock_bh(&xfrm_state_lock
);
407 xfrm_state_put_afinfo(afinfo
);
411 static void __xfrm_state_insert(struct xfrm_state
*x
)
413 unsigned h
= xfrm_dst_hash(&x
->id
.daddr
, x
->props
.family
);
415 list_add(&x
->bydst
, xfrm_state_bydst
+h
);
418 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
420 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
423 if (!mod_timer(&x
->timer
, jiffies
+ HZ
))
429 void xfrm_state_insert(struct xfrm_state
*x
)
431 spin_lock_bh(&xfrm_state_lock
);
432 __xfrm_state_insert(x
);
433 spin_unlock_bh(&xfrm_state_lock
);
435 xfrm_flush_all_bundles();
437 EXPORT_SYMBOL(xfrm_state_insert
);
439 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
441 int xfrm_state_add(struct xfrm_state
*x
)
443 struct xfrm_state_afinfo
*afinfo
;
444 struct xfrm_state
*x1
;
448 family
= x
->props
.family
;
449 afinfo
= xfrm_state_get_afinfo(family
);
450 if (unlikely(afinfo
== NULL
))
451 return -EAFNOSUPPORT
;
453 spin_lock_bh(&xfrm_state_lock
);
455 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
464 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
465 if (x1
&& xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
)) {
472 x1
= afinfo
->find_acq(
473 x
->props
.mode
, x
->props
.reqid
, x
->id
.proto
,
474 &x
->id
.daddr
, &x
->props
.saddr
, 0);
476 __xfrm_state_insert(x
);
480 spin_unlock_bh(&xfrm_state_lock
);
481 xfrm_state_put_afinfo(afinfo
);
484 xfrm_flush_all_bundles();
487 xfrm_state_delete(x1
);
493 EXPORT_SYMBOL(xfrm_state_add
);
495 int xfrm_state_update(struct xfrm_state
*x
)
497 struct xfrm_state_afinfo
*afinfo
;
498 struct xfrm_state
*x1
;
501 afinfo
= xfrm_state_get_afinfo(x
->props
.family
);
502 if (unlikely(afinfo
== NULL
))
503 return -EAFNOSUPPORT
;
505 spin_lock_bh(&xfrm_state_lock
);
506 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
512 if (xfrm_state_kern(x1
)) {
518 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
519 __xfrm_state_insert(x
);
525 spin_unlock_bh(&xfrm_state_lock
);
526 xfrm_state_put_afinfo(afinfo
);
532 xfrm_state_delete(x1
);
538 spin_lock_bh(&x1
->lock
);
539 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
540 if (x
->encap
&& x1
->encap
)
541 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
542 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
545 if (!mod_timer(&x1
->timer
, jiffies
+ HZ
))
547 if (x1
->curlft
.use_time
)
548 xfrm_state_check_expire(x1
);
552 spin_unlock_bh(&x1
->lock
);
558 EXPORT_SYMBOL(xfrm_state_update
);
560 int xfrm_state_check_expire(struct xfrm_state
*x
)
562 if (!x
->curlft
.use_time
)
563 x
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
565 if (x
->km
.state
!= XFRM_STATE_VALID
)
568 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
569 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
570 x
->km
.state
= XFRM_STATE_EXPIRED
;
571 if (!mod_timer(&x
->timer
, jiffies
))
577 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
578 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
580 km_state_expired(x
, 0);
584 EXPORT_SYMBOL(xfrm_state_check_expire
);
586 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
588 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
592 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
594 /* Check tail too... */
598 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
600 int err
= xfrm_state_check_expire(x
);
603 err
= xfrm_state_check_space(x
, skb
);
607 EXPORT_SYMBOL(xfrm_state_check
);
610 xfrm_state_lookup(xfrm_address_t
*daddr
, u32 spi
, u8 proto
,
611 unsigned short family
)
613 struct xfrm_state
*x
;
614 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
618 spin_lock_bh(&xfrm_state_lock
);
619 x
= afinfo
->state_lookup(daddr
, spi
, proto
);
620 spin_unlock_bh(&xfrm_state_lock
);
621 xfrm_state_put_afinfo(afinfo
);
624 EXPORT_SYMBOL(xfrm_state_lookup
);
627 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
628 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
629 int create
, unsigned short family
)
631 struct xfrm_state
*x
;
632 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
636 spin_lock_bh(&xfrm_state_lock
);
637 x
= afinfo
->find_acq(mode
, reqid
, proto
, daddr
, saddr
, create
);
638 spin_unlock_bh(&xfrm_state_lock
);
639 xfrm_state_put_afinfo(afinfo
);
642 EXPORT_SYMBOL(xfrm_find_acq
);
644 /* Silly enough, but I'm lazy to build resolution list */
646 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
649 struct xfrm_state
*x
;
651 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
652 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
653 if (x
->km
.seq
== seq
&& x
->km
.state
== XFRM_STATE_ACQ
) {
662 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
664 struct xfrm_state
*x
;
666 spin_lock_bh(&xfrm_state_lock
);
667 x
= __xfrm_find_acq_byseq(seq
);
668 spin_unlock_bh(&xfrm_state_lock
);
671 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
673 u32
xfrm_get_acqseq(void)
677 static DEFINE_SPINLOCK(acqseq_lock
);
679 spin_lock_bh(&acqseq_lock
);
680 res
= (++acqseq
? : ++acqseq
);
681 spin_unlock_bh(&acqseq_lock
);
684 EXPORT_SYMBOL(xfrm_get_acqseq
);
687 xfrm_alloc_spi(struct xfrm_state
*x
, u32 minspi
, u32 maxspi
)
690 struct xfrm_state
*x0
;
695 if (minspi
== maxspi
) {
696 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
704 minspi
= ntohl(minspi
);
705 maxspi
= ntohl(maxspi
);
706 for (h
=0; h
<maxspi
-minspi
+1; h
++) {
707 spi
= minspi
+ net_random()%(maxspi
-minspi
+1);
708 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
710 x
->id
.spi
= htonl(spi
);
717 spin_lock_bh(&xfrm_state_lock
);
718 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
719 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
721 spin_unlock_bh(&xfrm_state_lock
);
725 EXPORT_SYMBOL(xfrm_alloc_spi
);
727 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
731 struct xfrm_state
*x
;
735 spin_lock_bh(&xfrm_state_lock
);
736 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
737 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
738 if (proto
== IPSEC_PROTO_ANY
|| x
->id
.proto
== proto
)
747 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
748 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
749 if (proto
!= IPSEC_PROTO_ANY
&& x
->id
.proto
!= proto
)
751 err
= func(x
, --count
, data
);
757 spin_unlock_bh(&xfrm_state_lock
);
760 EXPORT_SYMBOL(xfrm_state_walk
);
762 int xfrm_replay_check(struct xfrm_state
*x
, u32 seq
)
768 if (unlikely(seq
== 0))
771 if (likely(seq
> x
->replay
.seq
))
774 diff
= x
->replay
.seq
- seq
;
775 if (diff
>= x
->props
.replay_window
) {
776 x
->stats
.replay_window
++;
780 if (x
->replay
.bitmap
& (1U << diff
)) {
786 EXPORT_SYMBOL(xfrm_replay_check
);
788 void xfrm_replay_advance(struct xfrm_state
*x
, u32 seq
)
794 if (seq
> x
->replay
.seq
) {
795 diff
= seq
- x
->replay
.seq
;
796 if (diff
< x
->props
.replay_window
)
797 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
799 x
->replay
.bitmap
= 1;
802 diff
= x
->replay
.seq
- seq
;
803 x
->replay
.bitmap
|= (1U << diff
);
806 EXPORT_SYMBOL(xfrm_replay_advance
);
808 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
809 static DEFINE_RWLOCK(xfrm_km_lock
);
811 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
815 read_lock(&xfrm_km_lock
);
816 list_for_each_entry(km
, &xfrm_km_list
, list
)
817 if (km
->notify_policy
)
818 km
->notify_policy(xp
, dir
, c
);
819 read_unlock(&xfrm_km_lock
);
822 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
825 read_lock(&xfrm_km_lock
);
826 list_for_each_entry(km
, &xfrm_km_list
, list
)
829 read_unlock(&xfrm_km_lock
);
832 EXPORT_SYMBOL(km_policy_notify
);
833 EXPORT_SYMBOL(km_state_notify
);
835 static void km_state_expired(struct xfrm_state
*x
, int hard
)
840 c
.event
= XFRM_MSG_EXPIRE
;
841 km_state_notify(x
, &c
);
848 * We send to all registered managers regardless of failure
849 * We are happy with one success
851 static int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
853 int err
= -EINVAL
, acqret
;
856 read_lock(&xfrm_km_lock
);
857 list_for_each_entry(km
, &xfrm_km_list
, list
) {
858 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
862 read_unlock(&xfrm_km_lock
);
866 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, u16 sport
)
871 read_lock(&xfrm_km_lock
);
872 list_for_each_entry(km
, &xfrm_km_list
, list
) {
874 err
= km
->new_mapping(x
, ipaddr
, sport
);
878 read_unlock(&xfrm_km_lock
);
881 EXPORT_SYMBOL(km_new_mapping
);
883 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
)
888 c
.event
= XFRM_MSG_POLEXPIRE
;
889 km_policy_notify(pol
, dir
, &c
);
895 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
900 struct xfrm_policy
*pol
= NULL
;
902 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
905 data
= kmalloc(optlen
, GFP_KERNEL
);
910 if (copy_from_user(data
, optval
, optlen
))
914 read_lock(&xfrm_km_lock
);
915 list_for_each_entry(km
, &xfrm_km_list
, list
) {
916 pol
= km
->compile_policy(sk
->sk_family
, optname
, data
,
921 read_unlock(&xfrm_km_lock
);
924 xfrm_sk_policy_insert(sk
, err
, pol
);
933 EXPORT_SYMBOL(xfrm_user_policy
);
935 int xfrm_register_km(struct xfrm_mgr
*km
)
937 write_lock_bh(&xfrm_km_lock
);
938 list_add_tail(&km
->list
, &xfrm_km_list
);
939 write_unlock_bh(&xfrm_km_lock
);
942 EXPORT_SYMBOL(xfrm_register_km
);
944 int xfrm_unregister_km(struct xfrm_mgr
*km
)
946 write_lock_bh(&xfrm_km_lock
);
948 write_unlock_bh(&xfrm_km_lock
);
951 EXPORT_SYMBOL(xfrm_unregister_km
);
953 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
956 if (unlikely(afinfo
== NULL
))
958 if (unlikely(afinfo
->family
>= NPROTO
))
959 return -EAFNOSUPPORT
;
960 write_lock(&xfrm_state_afinfo_lock
);
961 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
964 afinfo
->state_bydst
= xfrm_state_bydst
;
965 afinfo
->state_byspi
= xfrm_state_byspi
;
966 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
968 write_unlock(&xfrm_state_afinfo_lock
);
971 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
973 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
976 if (unlikely(afinfo
== NULL
))
978 if (unlikely(afinfo
->family
>= NPROTO
))
979 return -EAFNOSUPPORT
;
980 write_lock(&xfrm_state_afinfo_lock
);
981 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
982 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
985 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
986 afinfo
->state_byspi
= NULL
;
987 afinfo
->state_bydst
= NULL
;
990 write_unlock(&xfrm_state_afinfo_lock
);
993 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
995 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
997 struct xfrm_state_afinfo
*afinfo
;
998 if (unlikely(family
>= NPROTO
))
1000 read_lock(&xfrm_state_afinfo_lock
);
1001 afinfo
= xfrm_state_afinfo
[family
];
1002 if (likely(afinfo
!= NULL
))
1003 read_lock(&afinfo
->lock
);
1004 read_unlock(&xfrm_state_afinfo_lock
);
1008 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1010 if (unlikely(afinfo
== NULL
))
1012 read_unlock(&afinfo
->lock
);
1015 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1016 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1019 struct xfrm_state
*t
= x
->tunnel
;
1021 if (atomic_read(&t
->tunnel_users
) == 2)
1022 xfrm_state_delete(t
);
1023 atomic_dec(&t
->tunnel_users
);
1028 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1031 * This function is NOT optimal. For example, with ESP it will give an
1032 * MTU that's usually two bytes short of being optimal. However, it will
1033 * usually give an answer that's a multiple of 4 provided the input is
1034 * also a multiple of 4.
1036 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1040 res
-= x
->props
.header_len
;
1048 spin_lock_bh(&x
->lock
);
1049 if (x
->km
.state
== XFRM_STATE_VALID
&&
1050 x
->type
&& x
->type
->get_max_size
)
1051 m
= x
->type
->get_max_size(x
, m
);
1053 m
+= x
->props
.header_len
;
1054 spin_unlock_bh(&x
->lock
);
1064 EXPORT_SYMBOL(xfrm_state_mtu
);
1066 int xfrm_init_state(struct xfrm_state
*x
)
1068 struct xfrm_state_afinfo
*afinfo
;
1069 int family
= x
->props
.family
;
1072 err
= -EAFNOSUPPORT
;
1073 afinfo
= xfrm_state_get_afinfo(family
);
1078 if (afinfo
->init_flags
)
1079 err
= afinfo
->init_flags(x
);
1081 xfrm_state_put_afinfo(afinfo
);
1086 err
= -EPROTONOSUPPORT
;
1087 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1088 if (x
->type
== NULL
)
1091 err
= x
->type
->init_state(x
);
1095 x
->km
.state
= XFRM_STATE_VALID
;
1101 EXPORT_SYMBOL(xfrm_init_state
);
1103 void __init
xfrm_state_init(void)
1107 for (i
=0; i
<XFRM_DST_HSIZE
; i
++) {
1108 INIT_LIST_HEAD(&xfrm_state_bydst
[i
]);
1109 INIT_LIST_HEAD(&xfrm_state_byspi
[i
]);
1111 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
, NULL
);