6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
24 EXPORT_SYMBOL(xfrm_nl
);
26 u32 sysctl_xfrm_aevent_etime
= XFRM_AE_ETIME
;
27 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
29 u32 sysctl_xfrm_aevent_rseqth
= XFRM_AE_SEQT_SIZE
;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
32 /* Each xfrm_state may be linked to two tables:
34 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
35 2. Hash table by daddr to find what SAs exist for given
36 destination/tunnel endpoint. (output)
39 static DEFINE_SPINLOCK(xfrm_state_lock
);
41 /* Hash table to find appropriate SA towards given target (endpoint
42 * of tunnel or destination of transport mode) allowed by selector.
44 * Main use is finding SA after policy selected tunnel or transport mode.
45 * Also, it can be used by ah/esp icmp error handler to find offending SA.
47 static struct list_head xfrm_state_bydst
[XFRM_DST_HSIZE
];
48 static struct list_head xfrm_state_byspi
[XFRM_DST_HSIZE
];
50 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
51 EXPORT_SYMBOL(km_waitq
);
53 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
54 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
56 static struct work_struct xfrm_state_gc_work
;
57 static struct list_head xfrm_state_gc_list
= LIST_HEAD_INIT(xfrm_state_gc_list
);
58 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
60 static int xfrm_state_gc_flush_bundles
;
62 int __xfrm_state_delete(struct xfrm_state
*x
);
64 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
);
65 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
67 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
68 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
70 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
72 if (del_timer(&x
->timer
))
74 if (del_timer(&x
->rtimer
))
81 xfrm_put_mode(x
->mode
);
83 x
->type
->destructor(x
);
84 xfrm_put_type(x
->type
);
86 security_xfrm_state_free(x
);
90 static void xfrm_state_gc_task(void *data
)
93 struct list_head
*entry
, *tmp
;
94 struct list_head gc_list
= LIST_HEAD_INIT(gc_list
);
96 if (xfrm_state_gc_flush_bundles
) {
97 xfrm_state_gc_flush_bundles
= 0;
101 spin_lock_bh(&xfrm_state_gc_lock
);
102 list_splice_init(&xfrm_state_gc_list
, &gc_list
);
103 spin_unlock_bh(&xfrm_state_gc_lock
);
105 list_for_each_safe(entry
, tmp
, &gc_list
) {
106 x
= list_entry(entry
, struct xfrm_state
, bydst
);
107 xfrm_state_gc_destroy(x
);
112 static inline unsigned long make_jiffies(long secs
)
114 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
115 return MAX_SCHEDULE_TIMEOUT
-1;
120 static void xfrm_timer_handler(unsigned long data
)
122 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
123 unsigned long now
= (unsigned long)xtime
.tv_sec
;
124 long next
= LONG_MAX
;
128 if (x
->km
.state
== XFRM_STATE_DEAD
)
130 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
132 if (x
->lft
.hard_add_expires_seconds
) {
133 long tmo
= x
->lft
.hard_add_expires_seconds
+
134 x
->curlft
.add_time
- now
;
140 if (x
->lft
.hard_use_expires_seconds
) {
141 long tmo
= x
->lft
.hard_use_expires_seconds
+
142 (x
->curlft
.use_time
? : now
) - now
;
150 if (x
->lft
.soft_add_expires_seconds
) {
151 long tmo
= x
->lft
.soft_add_expires_seconds
+
152 x
->curlft
.add_time
- now
;
158 if (x
->lft
.soft_use_expires_seconds
) {
159 long tmo
= x
->lft
.soft_use_expires_seconds
+
160 (x
->curlft
.use_time
? : now
) - now
;
169 km_state_expired(x
, 0, 0);
171 if (next
!= LONG_MAX
&&
172 !mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
)))
177 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
178 x
->km
.state
= XFRM_STATE_EXPIRED
;
183 if (!__xfrm_state_delete(x
) && x
->id
.spi
)
184 km_state_expired(x
, 1, 0);
187 spin_unlock(&x
->lock
);
191 static void xfrm_replay_timer_handler(unsigned long data
);
193 struct xfrm_state
*xfrm_state_alloc(void)
195 struct xfrm_state
*x
;
197 x
= kmalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
200 memset(x
, 0, sizeof(struct xfrm_state
));
201 atomic_set(&x
->refcnt
, 1);
202 atomic_set(&x
->tunnel_users
, 0);
203 INIT_LIST_HEAD(&x
->bydst
);
204 INIT_LIST_HEAD(&x
->byspi
);
205 init_timer(&x
->timer
);
206 x
->timer
.function
= xfrm_timer_handler
;
207 x
->timer
.data
= (unsigned long)x
;
208 init_timer(&x
->rtimer
);
209 x
->rtimer
.function
= xfrm_replay_timer_handler
;
210 x
->rtimer
.data
= (unsigned long)x
;
211 x
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
212 x
->lft
.soft_byte_limit
= XFRM_INF
;
213 x
->lft
.soft_packet_limit
= XFRM_INF
;
214 x
->lft
.hard_byte_limit
= XFRM_INF
;
215 x
->lft
.hard_packet_limit
= XFRM_INF
;
216 x
->replay_maxage
= 0;
217 x
->replay_maxdiff
= 0;
218 spin_lock_init(&x
->lock
);
222 EXPORT_SYMBOL(xfrm_state_alloc
);
224 void __xfrm_state_destroy(struct xfrm_state
*x
)
226 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
228 spin_lock_bh(&xfrm_state_gc_lock
);
229 list_add(&x
->bydst
, &xfrm_state_gc_list
);
230 spin_unlock_bh(&xfrm_state_gc_lock
);
231 schedule_work(&xfrm_state_gc_work
);
233 EXPORT_SYMBOL(__xfrm_state_destroy
);
235 int __xfrm_state_delete(struct xfrm_state
*x
)
239 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
240 x
->km
.state
= XFRM_STATE_DEAD
;
241 spin_lock(&xfrm_state_lock
);
248 spin_unlock(&xfrm_state_lock
);
249 if (del_timer(&x
->timer
))
251 if (del_timer(&x
->rtimer
))
254 /* The number two in this test is the reference
255 * mentioned in the comment below plus the reference
256 * our caller holds. A larger value means that
257 * there are DSTs attached to this xfrm_state.
259 if (atomic_read(&x
->refcnt
) > 2) {
260 xfrm_state_gc_flush_bundles
= 1;
261 schedule_work(&xfrm_state_gc_work
);
264 /* All xfrm_state objects are created by xfrm_state_alloc.
265 * The xfrm_state_alloc call gives a reference, and that
266 * is what we are dropping here.
274 EXPORT_SYMBOL(__xfrm_state_delete
);
276 int xfrm_state_delete(struct xfrm_state
*x
)
280 spin_lock_bh(&x
->lock
);
281 err
= __xfrm_state_delete(x
);
282 spin_unlock_bh(&x
->lock
);
286 EXPORT_SYMBOL(xfrm_state_delete
);
288 void xfrm_state_flush(u8 proto
)
291 struct xfrm_state
*x
;
293 spin_lock_bh(&xfrm_state_lock
);
294 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
296 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
297 if (!xfrm_state_kern(x
) &&
298 (proto
== IPSEC_PROTO_ANY
|| x
->id
.proto
== proto
)) {
300 spin_unlock_bh(&xfrm_state_lock
);
302 xfrm_state_delete(x
);
305 spin_lock_bh(&xfrm_state_lock
);
310 spin_unlock_bh(&xfrm_state_lock
);
313 EXPORT_SYMBOL(xfrm_state_flush
);
316 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
317 struct xfrm_tmpl
*tmpl
,
318 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
319 unsigned short family
)
321 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
324 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
325 xfrm_state_put_afinfo(afinfo
);
330 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
331 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
332 struct xfrm_policy
*pol
, int *err
,
333 unsigned short family
)
335 unsigned h
= xfrm_dst_hash(daddr
, family
);
336 struct xfrm_state
*x
, *x0
;
337 int acquire_in_progress
= 0;
339 struct xfrm_state
*best
= NULL
;
340 struct xfrm_state_afinfo
*afinfo
;
342 afinfo
= xfrm_state_get_afinfo(family
);
343 if (afinfo
== NULL
) {
344 *err
= -EAFNOSUPPORT
;
348 spin_lock_bh(&xfrm_state_lock
);
349 list_for_each_entry(x
, xfrm_state_bydst
+h
, bydst
) {
350 if (x
->props
.family
== family
&&
351 x
->props
.reqid
== tmpl
->reqid
&&
352 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
353 tmpl
->mode
== x
->props
.mode
&&
354 tmpl
->id
.proto
== x
->id
.proto
&&
355 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
357 1. There is a valid state with matching selector.
359 2. Valid state with inappropriate selector. Skip.
361 Entering area of "sysdeps".
363 3. If state is not valid, selector is temporary,
364 it selects only session which triggered
365 previous resolution. Key manager will do
366 something to install a state with proper
369 if (x
->km
.state
== XFRM_STATE_VALID
) {
370 if (!xfrm_selector_match(&x
->sel
, fl
, family
) ||
371 !xfrm_sec_ctx_match(pol
->security
, x
->security
))
374 best
->km
.dying
> x
->km
.dying
||
375 (best
->km
.dying
== x
->km
.dying
&&
376 best
->curlft
.add_time
< x
->curlft
.add_time
))
378 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
379 acquire_in_progress
= 1;
380 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
381 x
->km
.state
== XFRM_STATE_EXPIRED
) {
382 if (xfrm_selector_match(&x
->sel
, fl
, family
) &&
383 xfrm_sec_ctx_match(pol
->security
, x
->security
))
390 if (!x
&& !error
&& !acquire_in_progress
) {
392 (x0
= afinfo
->state_lookup(daddr
, tmpl
->id
.spi
,
393 tmpl
->id
.proto
)) != NULL
) {
398 x
= xfrm_state_alloc();
403 /* Initialize temporary selector matching only
404 * to current session. */
405 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
407 if (km_query(x
, tmpl
, pol
) == 0) {
408 x
->km
.state
= XFRM_STATE_ACQ
;
409 list_add_tail(&x
->bydst
, xfrm_state_bydst
+h
);
412 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
413 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
416 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
418 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
419 add_timer(&x
->timer
);
421 x
->km
.state
= XFRM_STATE_DEAD
;
431 *err
= acquire_in_progress
? -EAGAIN
: error
;
432 spin_unlock_bh(&xfrm_state_lock
);
433 xfrm_state_put_afinfo(afinfo
);
437 static void __xfrm_state_insert(struct xfrm_state
*x
)
439 unsigned h
= xfrm_dst_hash(&x
->id
.daddr
, x
->props
.family
);
441 list_add(&x
->bydst
, xfrm_state_bydst
+h
);
444 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
446 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
449 if (!mod_timer(&x
->timer
, jiffies
+ HZ
))
452 if (x
->replay_maxage
&&
453 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
459 void xfrm_state_insert(struct xfrm_state
*x
)
461 spin_lock_bh(&xfrm_state_lock
);
462 __xfrm_state_insert(x
);
463 spin_unlock_bh(&xfrm_state_lock
);
465 xfrm_flush_all_bundles();
467 EXPORT_SYMBOL(xfrm_state_insert
);
469 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
471 int xfrm_state_add(struct xfrm_state
*x
)
473 struct xfrm_state_afinfo
*afinfo
;
474 struct xfrm_state
*x1
;
478 family
= x
->props
.family
;
479 afinfo
= xfrm_state_get_afinfo(family
);
480 if (unlikely(afinfo
== NULL
))
481 return -EAFNOSUPPORT
;
483 spin_lock_bh(&xfrm_state_lock
);
485 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
494 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
495 if (x1
&& xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
)) {
502 x1
= afinfo
->find_acq(
503 x
->props
.mode
, x
->props
.reqid
, x
->id
.proto
,
504 &x
->id
.daddr
, &x
->props
.saddr
, 0);
506 __xfrm_state_insert(x
);
510 spin_unlock_bh(&xfrm_state_lock
);
511 xfrm_state_put_afinfo(afinfo
);
514 xfrm_flush_all_bundles();
517 xfrm_state_delete(x1
);
523 EXPORT_SYMBOL(xfrm_state_add
);
525 int xfrm_state_update(struct xfrm_state
*x
)
527 struct xfrm_state_afinfo
*afinfo
;
528 struct xfrm_state
*x1
;
531 afinfo
= xfrm_state_get_afinfo(x
->props
.family
);
532 if (unlikely(afinfo
== NULL
))
533 return -EAFNOSUPPORT
;
535 spin_lock_bh(&xfrm_state_lock
);
536 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
542 if (xfrm_state_kern(x1
)) {
548 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
549 __xfrm_state_insert(x
);
555 spin_unlock_bh(&xfrm_state_lock
);
556 xfrm_state_put_afinfo(afinfo
);
562 xfrm_state_delete(x1
);
568 spin_lock_bh(&x1
->lock
);
569 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
570 if (x
->encap
&& x1
->encap
)
571 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
572 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
575 if (!mod_timer(&x1
->timer
, jiffies
+ HZ
))
577 if (x1
->curlft
.use_time
)
578 xfrm_state_check_expire(x1
);
582 spin_unlock_bh(&x1
->lock
);
588 EXPORT_SYMBOL(xfrm_state_update
);
590 int xfrm_state_check_expire(struct xfrm_state
*x
)
592 if (!x
->curlft
.use_time
)
593 x
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
595 if (x
->km
.state
!= XFRM_STATE_VALID
)
598 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
599 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
600 x
->km
.state
= XFRM_STATE_EXPIRED
;
601 if (!mod_timer(&x
->timer
, jiffies
))
607 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
608 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
610 km_state_expired(x
, 0, 0);
614 EXPORT_SYMBOL(xfrm_state_check_expire
);
616 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
618 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
622 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
624 /* Check tail too... */
628 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
630 int err
= xfrm_state_check_expire(x
);
633 err
= xfrm_state_check_space(x
, skb
);
637 EXPORT_SYMBOL(xfrm_state_check
);
640 xfrm_state_lookup(xfrm_address_t
*daddr
, u32 spi
, u8 proto
,
641 unsigned short family
)
643 struct xfrm_state
*x
;
644 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
648 spin_lock_bh(&xfrm_state_lock
);
649 x
= afinfo
->state_lookup(daddr
, spi
, proto
);
650 spin_unlock_bh(&xfrm_state_lock
);
651 xfrm_state_put_afinfo(afinfo
);
654 EXPORT_SYMBOL(xfrm_state_lookup
);
657 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
658 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
659 int create
, unsigned short family
)
661 struct xfrm_state
*x
;
662 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
666 spin_lock_bh(&xfrm_state_lock
);
667 x
= afinfo
->find_acq(mode
, reqid
, proto
, daddr
, saddr
, create
);
668 spin_unlock_bh(&xfrm_state_lock
);
669 xfrm_state_put_afinfo(afinfo
);
672 EXPORT_SYMBOL(xfrm_find_acq
);
674 /* Silly enough, but I'm lazy to build resolution list */
676 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
679 struct xfrm_state
*x
;
681 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
682 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
683 if (x
->km
.seq
== seq
&& x
->km
.state
== XFRM_STATE_ACQ
) {
692 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
694 struct xfrm_state
*x
;
696 spin_lock_bh(&xfrm_state_lock
);
697 x
= __xfrm_find_acq_byseq(seq
);
698 spin_unlock_bh(&xfrm_state_lock
);
701 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
703 u32
xfrm_get_acqseq(void)
707 static DEFINE_SPINLOCK(acqseq_lock
);
709 spin_lock_bh(&acqseq_lock
);
710 res
= (++acqseq
? : ++acqseq
);
711 spin_unlock_bh(&acqseq_lock
);
714 EXPORT_SYMBOL(xfrm_get_acqseq
);
717 xfrm_alloc_spi(struct xfrm_state
*x
, u32 minspi
, u32 maxspi
)
720 struct xfrm_state
*x0
;
725 if (minspi
== maxspi
) {
726 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
734 minspi
= ntohl(minspi
);
735 maxspi
= ntohl(maxspi
);
736 for (h
=0; h
<maxspi
-minspi
+1; h
++) {
737 spi
= minspi
+ net_random()%(maxspi
-minspi
+1);
738 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
740 x
->id
.spi
= htonl(spi
);
747 spin_lock_bh(&xfrm_state_lock
);
748 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
749 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
751 spin_unlock_bh(&xfrm_state_lock
);
755 EXPORT_SYMBOL(xfrm_alloc_spi
);
757 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
761 struct xfrm_state
*x
;
765 spin_lock_bh(&xfrm_state_lock
);
766 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
767 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
768 if (proto
== IPSEC_PROTO_ANY
|| x
->id
.proto
== proto
)
777 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
778 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
779 if (proto
!= IPSEC_PROTO_ANY
&& x
->id
.proto
!= proto
)
781 err
= func(x
, --count
, data
);
787 spin_unlock_bh(&xfrm_state_lock
);
790 EXPORT_SYMBOL(xfrm_state_walk
);
793 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
796 /* we send notify messages in case
797 * 1. we updated on of the sequence numbers, and the seqno difference
798 * is at least x->replay_maxdiff, in this case we also update the
799 * timeout of our timer function
800 * 2. if x->replay_maxage has elapsed since last update,
801 * and there were changes
803 * The state structure must be locked!
807 case XFRM_REPLAY_UPDATE
:
808 if (x
->replay_maxdiff
&&
809 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
810 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
811 if (x
->xflags
& XFRM_TIME_DEFER
)
812 event
= XFRM_REPLAY_TIMEOUT
;
819 case XFRM_REPLAY_TIMEOUT
:
820 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
821 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
822 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
823 x
->xflags
|= XFRM_TIME_DEFER
;
830 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
831 c
.event
= XFRM_MSG_NEWAE
;
832 c
.data
.aevent
= event
;
833 km_state_notify(x
, &c
);
835 if (x
->replay_maxage
&&
836 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
)) {
838 x
->xflags
&= ~XFRM_TIME_DEFER
;
841 EXPORT_SYMBOL(xfrm_replay_notify
);
843 static void xfrm_replay_timer_handler(unsigned long data
)
845 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
849 if (x
->km
.state
== XFRM_STATE_VALID
) {
850 if (xfrm_aevent_is_on())
851 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
853 x
->xflags
|= XFRM_TIME_DEFER
;
856 spin_unlock(&x
->lock
);
860 int xfrm_replay_check(struct xfrm_state
*x
, u32 seq
)
866 if (unlikely(seq
== 0))
869 if (likely(seq
> x
->replay
.seq
))
872 diff
= x
->replay
.seq
- seq
;
873 if (diff
>= x
->props
.replay_window
) {
874 x
->stats
.replay_window
++;
878 if (x
->replay
.bitmap
& (1U << diff
)) {
884 EXPORT_SYMBOL(xfrm_replay_check
);
886 void xfrm_replay_advance(struct xfrm_state
*x
, u32 seq
)
892 if (seq
> x
->replay
.seq
) {
893 diff
= seq
- x
->replay
.seq
;
894 if (diff
< x
->props
.replay_window
)
895 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
897 x
->replay
.bitmap
= 1;
900 diff
= x
->replay
.seq
- seq
;
901 x
->replay
.bitmap
|= (1U << diff
);
904 if (xfrm_aevent_is_on())
905 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
907 EXPORT_SYMBOL(xfrm_replay_advance
);
909 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
910 static DEFINE_RWLOCK(xfrm_km_lock
);
912 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
916 read_lock(&xfrm_km_lock
);
917 list_for_each_entry(km
, &xfrm_km_list
, list
)
918 if (km
->notify_policy
)
919 km
->notify_policy(xp
, dir
, c
);
920 read_unlock(&xfrm_km_lock
);
923 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
926 read_lock(&xfrm_km_lock
);
927 list_for_each_entry(km
, &xfrm_km_list
, list
)
930 read_unlock(&xfrm_km_lock
);
933 EXPORT_SYMBOL(km_policy_notify
);
934 EXPORT_SYMBOL(km_state_notify
);
936 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
942 c
.event
= XFRM_MSG_EXPIRE
;
943 km_state_notify(x
, &c
);
949 EXPORT_SYMBOL(km_state_expired
);
951 * We send to all registered managers regardless of failure
952 * We are happy with one success
954 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
956 int err
= -EINVAL
, acqret
;
959 read_lock(&xfrm_km_lock
);
960 list_for_each_entry(km
, &xfrm_km_list
, list
) {
961 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
965 read_unlock(&xfrm_km_lock
);
968 EXPORT_SYMBOL(km_query
);
970 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, u16 sport
)
975 read_lock(&xfrm_km_lock
);
976 list_for_each_entry(km
, &xfrm_km_list
, list
) {
978 err
= km
->new_mapping(x
, ipaddr
, sport
);
982 read_unlock(&xfrm_km_lock
);
985 EXPORT_SYMBOL(km_new_mapping
);
987 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
993 c
.event
= XFRM_MSG_POLEXPIRE
;
994 km_policy_notify(pol
, dir
, &c
);
999 EXPORT_SYMBOL(km_policy_expired
);
1001 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1005 struct xfrm_mgr
*km
;
1006 struct xfrm_policy
*pol
= NULL
;
1008 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1011 data
= kmalloc(optlen
, GFP_KERNEL
);
1016 if (copy_from_user(data
, optval
, optlen
))
1020 read_lock(&xfrm_km_lock
);
1021 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1022 pol
= km
->compile_policy(sk
->sk_family
, optname
, data
,
1027 read_unlock(&xfrm_km_lock
);
1030 xfrm_sk_policy_insert(sk
, err
, pol
);
1039 EXPORT_SYMBOL(xfrm_user_policy
);
1041 int xfrm_register_km(struct xfrm_mgr
*km
)
1043 write_lock_bh(&xfrm_km_lock
);
1044 list_add_tail(&km
->list
, &xfrm_km_list
);
1045 write_unlock_bh(&xfrm_km_lock
);
1048 EXPORT_SYMBOL(xfrm_register_km
);
1050 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1052 write_lock_bh(&xfrm_km_lock
);
1053 list_del(&km
->list
);
1054 write_unlock_bh(&xfrm_km_lock
);
1057 EXPORT_SYMBOL(xfrm_unregister_km
);
1059 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1062 if (unlikely(afinfo
== NULL
))
1064 if (unlikely(afinfo
->family
>= NPROTO
))
1065 return -EAFNOSUPPORT
;
1066 write_lock_bh(&xfrm_state_afinfo_lock
);
1067 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1070 afinfo
->state_bydst
= xfrm_state_bydst
;
1071 afinfo
->state_byspi
= xfrm_state_byspi
;
1072 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1074 write_unlock_bh(&xfrm_state_afinfo_lock
);
1077 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1079 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1082 if (unlikely(afinfo
== NULL
))
1084 if (unlikely(afinfo
->family
>= NPROTO
))
1085 return -EAFNOSUPPORT
;
1086 write_lock_bh(&xfrm_state_afinfo_lock
);
1087 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1088 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1091 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1092 afinfo
->state_byspi
= NULL
;
1093 afinfo
->state_bydst
= NULL
;
1096 write_unlock_bh(&xfrm_state_afinfo_lock
);
1099 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1101 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
1103 struct xfrm_state_afinfo
*afinfo
;
1104 if (unlikely(family
>= NPROTO
))
1106 read_lock(&xfrm_state_afinfo_lock
);
1107 afinfo
= xfrm_state_afinfo
[family
];
1108 if (unlikely(!afinfo
))
1109 read_unlock(&xfrm_state_afinfo_lock
);
1113 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1115 read_unlock(&xfrm_state_afinfo_lock
);
1118 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1119 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1122 struct xfrm_state
*t
= x
->tunnel
;
1124 if (atomic_read(&t
->tunnel_users
) == 2)
1125 xfrm_state_delete(t
);
1126 atomic_dec(&t
->tunnel_users
);
1131 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1134 * This function is NOT optimal. For example, with ESP it will give an
1135 * MTU that's usually two bytes short of being optimal. However, it will
1136 * usually give an answer that's a multiple of 4 provided the input is
1137 * also a multiple of 4.
1139 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1143 res
-= x
->props
.header_len
;
1151 spin_lock_bh(&x
->lock
);
1152 if (x
->km
.state
== XFRM_STATE_VALID
&&
1153 x
->type
&& x
->type
->get_max_size
)
1154 m
= x
->type
->get_max_size(x
, m
);
1156 m
+= x
->props
.header_len
;
1157 spin_unlock_bh(&x
->lock
);
1167 int xfrm_init_state(struct xfrm_state
*x
)
1169 struct xfrm_state_afinfo
*afinfo
;
1170 int family
= x
->props
.family
;
1173 err
= -EAFNOSUPPORT
;
1174 afinfo
= xfrm_state_get_afinfo(family
);
1179 if (afinfo
->init_flags
)
1180 err
= afinfo
->init_flags(x
);
1182 xfrm_state_put_afinfo(afinfo
);
1187 err
= -EPROTONOSUPPORT
;
1188 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1189 if (x
->type
== NULL
)
1192 err
= x
->type
->init_state(x
);
1196 x
->mode
= xfrm_get_mode(x
->props
.mode
, family
);
1197 if (x
->mode
== NULL
)
1200 x
->km
.state
= XFRM_STATE_VALID
;
1206 EXPORT_SYMBOL(xfrm_init_state
);
1208 void __init
xfrm_state_init(void)
1212 for (i
=0; i
<XFRM_DST_HSIZE
; i
++) {
1213 INIT_LIST_HEAD(&xfrm_state_bydst
[i
]);
1214 INIT_LIST_HEAD(&xfrm_state_byspi
[i
]);
1216 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
, NULL
);