6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
24 EXPORT_SYMBOL(xfrm_nl
);
26 u32 sysctl_xfrm_aevent_etime
= XFRM_AE_ETIME
;
27 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
29 u32 sysctl_xfrm_aevent_rseqth
= XFRM_AE_SEQT_SIZE
;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
32 /* Each xfrm_state may be linked to two tables:
34 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
35 2. Hash table by daddr to find what SAs exist for given
36 destination/tunnel endpoint. (output)
39 static DEFINE_SPINLOCK(xfrm_state_lock
);
41 /* Hash table to find appropriate SA towards given target (endpoint
42 * of tunnel or destination of transport mode) allowed by selector.
44 * Main use is finding SA after policy selected tunnel or transport mode.
45 * Also, it can be used by ah/esp icmp error handler to find offending SA.
47 static struct list_head xfrm_state_bydst
[XFRM_DST_HSIZE
];
48 static struct list_head xfrm_state_byspi
[XFRM_DST_HSIZE
];
50 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
51 EXPORT_SYMBOL(km_waitq
);
53 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
54 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
56 static struct work_struct xfrm_state_gc_work
;
57 static struct list_head xfrm_state_gc_list
= LIST_HEAD_INIT(xfrm_state_gc_list
);
58 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
60 static int xfrm_state_gc_flush_bundles
;
62 int __xfrm_state_delete(struct xfrm_state
*x
);
64 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
);
65 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
67 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
68 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
70 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
72 if (del_timer(&x
->timer
))
74 if (del_timer(&x
->rtimer
))
81 xfrm_put_mode(x
->mode
);
83 x
->type
->destructor(x
);
84 xfrm_put_type(x
->type
);
86 security_xfrm_state_free(x
);
90 static void xfrm_state_gc_task(void *data
)
93 struct list_head
*entry
, *tmp
;
94 struct list_head gc_list
= LIST_HEAD_INIT(gc_list
);
96 if (xfrm_state_gc_flush_bundles
) {
97 xfrm_state_gc_flush_bundles
= 0;
101 spin_lock_bh(&xfrm_state_gc_lock
);
102 list_splice_init(&xfrm_state_gc_list
, &gc_list
);
103 spin_unlock_bh(&xfrm_state_gc_lock
);
105 list_for_each_safe(entry
, tmp
, &gc_list
) {
106 x
= list_entry(entry
, struct xfrm_state
, bydst
);
107 xfrm_state_gc_destroy(x
);
112 static inline unsigned long make_jiffies(long secs
)
114 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
115 return MAX_SCHEDULE_TIMEOUT
-1;
120 static void xfrm_timer_handler(unsigned long data
)
122 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
123 unsigned long now
= (unsigned long)xtime
.tv_sec
;
124 long next
= LONG_MAX
;
128 if (x
->km
.state
== XFRM_STATE_DEAD
)
130 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
132 if (x
->lft
.hard_add_expires_seconds
) {
133 long tmo
= x
->lft
.hard_add_expires_seconds
+
134 x
->curlft
.add_time
- now
;
140 if (x
->lft
.hard_use_expires_seconds
) {
141 long tmo
= x
->lft
.hard_use_expires_seconds
+
142 (x
->curlft
.use_time
? : now
) - now
;
150 if (x
->lft
.soft_add_expires_seconds
) {
151 long tmo
= x
->lft
.soft_add_expires_seconds
+
152 x
->curlft
.add_time
- now
;
158 if (x
->lft
.soft_use_expires_seconds
) {
159 long tmo
= x
->lft
.soft_use_expires_seconds
+
160 (x
->curlft
.use_time
? : now
) - now
;
169 km_state_expired(x
, 0, 0);
171 if (next
!= LONG_MAX
&&
172 !mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
)))
177 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
178 x
->km
.state
= XFRM_STATE_EXPIRED
;
183 if (!__xfrm_state_delete(x
) && x
->id
.spi
)
184 km_state_expired(x
, 1, 0);
187 spin_unlock(&x
->lock
);
191 static void xfrm_replay_timer_handler(unsigned long data
);
193 struct xfrm_state
*xfrm_state_alloc(void)
195 struct xfrm_state
*x
;
197 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
200 atomic_set(&x
->refcnt
, 1);
201 atomic_set(&x
->tunnel_users
, 0);
202 INIT_LIST_HEAD(&x
->bydst
);
203 INIT_LIST_HEAD(&x
->byspi
);
204 init_timer(&x
->timer
);
205 x
->timer
.function
= xfrm_timer_handler
;
206 x
->timer
.data
= (unsigned long)x
;
207 init_timer(&x
->rtimer
);
208 x
->rtimer
.function
= xfrm_replay_timer_handler
;
209 x
->rtimer
.data
= (unsigned long)x
;
210 x
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
211 x
->lft
.soft_byte_limit
= XFRM_INF
;
212 x
->lft
.soft_packet_limit
= XFRM_INF
;
213 x
->lft
.hard_byte_limit
= XFRM_INF
;
214 x
->lft
.hard_packet_limit
= XFRM_INF
;
215 x
->replay_maxage
= 0;
216 x
->replay_maxdiff
= 0;
217 spin_lock_init(&x
->lock
);
221 EXPORT_SYMBOL(xfrm_state_alloc
);
223 void __xfrm_state_destroy(struct xfrm_state
*x
)
225 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
227 spin_lock_bh(&xfrm_state_gc_lock
);
228 list_add(&x
->bydst
, &xfrm_state_gc_list
);
229 spin_unlock_bh(&xfrm_state_gc_lock
);
230 schedule_work(&xfrm_state_gc_work
);
232 EXPORT_SYMBOL(__xfrm_state_destroy
);
234 int __xfrm_state_delete(struct xfrm_state
*x
)
238 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
239 x
->km
.state
= XFRM_STATE_DEAD
;
240 spin_lock(&xfrm_state_lock
);
247 spin_unlock(&xfrm_state_lock
);
248 if (del_timer(&x
->timer
))
250 if (del_timer(&x
->rtimer
))
253 /* The number two in this test is the reference
254 * mentioned in the comment below plus the reference
255 * our caller holds. A larger value means that
256 * there are DSTs attached to this xfrm_state.
258 if (atomic_read(&x
->refcnt
) > 2) {
259 xfrm_state_gc_flush_bundles
= 1;
260 schedule_work(&xfrm_state_gc_work
);
263 /* All xfrm_state objects are created by xfrm_state_alloc.
264 * The xfrm_state_alloc call gives a reference, and that
265 * is what we are dropping here.
273 EXPORT_SYMBOL(__xfrm_state_delete
);
275 int xfrm_state_delete(struct xfrm_state
*x
)
279 spin_lock_bh(&x
->lock
);
280 err
= __xfrm_state_delete(x
);
281 spin_unlock_bh(&x
->lock
);
285 EXPORT_SYMBOL(xfrm_state_delete
);
287 void xfrm_state_flush(u8 proto
)
290 struct xfrm_state
*x
;
292 spin_lock_bh(&xfrm_state_lock
);
293 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
295 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
296 if (!xfrm_state_kern(x
) &&
297 (proto
== IPSEC_PROTO_ANY
|| x
->id
.proto
== proto
)) {
299 spin_unlock_bh(&xfrm_state_lock
);
301 xfrm_state_delete(x
);
304 spin_lock_bh(&xfrm_state_lock
);
309 spin_unlock_bh(&xfrm_state_lock
);
312 EXPORT_SYMBOL(xfrm_state_flush
);
315 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
316 struct xfrm_tmpl
*tmpl
,
317 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
318 unsigned short family
)
320 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
323 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
324 xfrm_state_put_afinfo(afinfo
);
329 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
330 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
331 struct xfrm_policy
*pol
, int *err
,
332 unsigned short family
)
334 unsigned h
= xfrm_dst_hash(daddr
, family
);
335 struct xfrm_state
*x
, *x0
;
336 int acquire_in_progress
= 0;
338 struct xfrm_state
*best
= NULL
;
339 struct xfrm_state_afinfo
*afinfo
;
341 afinfo
= xfrm_state_get_afinfo(family
);
342 if (afinfo
== NULL
) {
343 *err
= -EAFNOSUPPORT
;
347 spin_lock_bh(&xfrm_state_lock
);
348 list_for_each_entry(x
, xfrm_state_bydst
+h
, bydst
) {
349 if (x
->props
.family
== family
&&
350 x
->props
.reqid
== tmpl
->reqid
&&
351 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
352 tmpl
->mode
== x
->props
.mode
&&
353 tmpl
->id
.proto
== x
->id
.proto
&&
354 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
356 1. There is a valid state with matching selector.
358 2. Valid state with inappropriate selector. Skip.
360 Entering area of "sysdeps".
362 3. If state is not valid, selector is temporary,
363 it selects only session which triggered
364 previous resolution. Key manager will do
365 something to install a state with proper
368 if (x
->km
.state
== XFRM_STATE_VALID
) {
369 if (!xfrm_selector_match(&x
->sel
, fl
, family
) ||
370 !security_xfrm_state_pol_flow_match(x
, pol
, fl
))
373 best
->km
.dying
> x
->km
.dying
||
374 (best
->km
.dying
== x
->km
.dying
&&
375 best
->curlft
.add_time
< x
->curlft
.add_time
))
377 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
378 acquire_in_progress
= 1;
379 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
380 x
->km
.state
== XFRM_STATE_EXPIRED
) {
381 if (xfrm_selector_match(&x
->sel
, fl
, family
) &&
382 security_xfrm_state_pol_flow_match(x
, pol
, fl
))
389 if (!x
&& !error
&& !acquire_in_progress
) {
391 (x0
= afinfo
->state_lookup(daddr
, tmpl
->id
.spi
,
392 tmpl
->id
.proto
)) != NULL
) {
397 x
= xfrm_state_alloc();
402 /* Initialize temporary selector matching only
403 * to current session. */
404 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
406 error
= security_xfrm_state_alloc_acquire(x
, pol
->security
, fl
->secid
);
408 x
->km
.state
= XFRM_STATE_DEAD
;
414 if (km_query(x
, tmpl
, pol
) == 0) {
415 x
->km
.state
= XFRM_STATE_ACQ
;
416 list_add_tail(&x
->bydst
, xfrm_state_bydst
+h
);
419 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
420 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
423 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
425 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
426 add_timer(&x
->timer
);
428 x
->km
.state
= XFRM_STATE_DEAD
;
438 *err
= acquire_in_progress
? -EAGAIN
: error
;
439 spin_unlock_bh(&xfrm_state_lock
);
440 xfrm_state_put_afinfo(afinfo
);
444 static void __xfrm_state_insert(struct xfrm_state
*x
)
446 unsigned h
= xfrm_dst_hash(&x
->id
.daddr
, x
->props
.family
);
448 list_add(&x
->bydst
, xfrm_state_bydst
+h
);
451 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
453 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
456 if (!mod_timer(&x
->timer
, jiffies
+ HZ
))
459 if (x
->replay_maxage
&&
460 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
466 void xfrm_state_insert(struct xfrm_state
*x
)
468 spin_lock_bh(&xfrm_state_lock
);
469 __xfrm_state_insert(x
);
470 spin_unlock_bh(&xfrm_state_lock
);
472 xfrm_flush_all_bundles();
474 EXPORT_SYMBOL(xfrm_state_insert
);
476 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
478 int xfrm_state_add(struct xfrm_state
*x
)
480 struct xfrm_state_afinfo
*afinfo
;
481 struct xfrm_state
*x1
;
485 family
= x
->props
.family
;
486 afinfo
= xfrm_state_get_afinfo(family
);
487 if (unlikely(afinfo
== NULL
))
488 return -EAFNOSUPPORT
;
490 spin_lock_bh(&xfrm_state_lock
);
492 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
501 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
502 if (x1
&& xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
)) {
509 x1
= afinfo
->find_acq(
510 x
->props
.mode
, x
->props
.reqid
, x
->id
.proto
,
511 &x
->id
.daddr
, &x
->props
.saddr
, 0);
513 __xfrm_state_insert(x
);
517 spin_unlock_bh(&xfrm_state_lock
);
518 xfrm_state_put_afinfo(afinfo
);
521 xfrm_flush_all_bundles();
524 xfrm_state_delete(x1
);
530 EXPORT_SYMBOL(xfrm_state_add
);
532 int xfrm_state_update(struct xfrm_state
*x
)
534 struct xfrm_state_afinfo
*afinfo
;
535 struct xfrm_state
*x1
;
538 afinfo
= xfrm_state_get_afinfo(x
->props
.family
);
539 if (unlikely(afinfo
== NULL
))
540 return -EAFNOSUPPORT
;
542 spin_lock_bh(&xfrm_state_lock
);
543 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
549 if (xfrm_state_kern(x1
)) {
555 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
556 __xfrm_state_insert(x
);
562 spin_unlock_bh(&xfrm_state_lock
);
563 xfrm_state_put_afinfo(afinfo
);
569 xfrm_state_delete(x1
);
575 spin_lock_bh(&x1
->lock
);
576 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
577 if (x
->encap
&& x1
->encap
)
578 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
579 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
582 if (!mod_timer(&x1
->timer
, jiffies
+ HZ
))
584 if (x1
->curlft
.use_time
)
585 xfrm_state_check_expire(x1
);
589 spin_unlock_bh(&x1
->lock
);
595 EXPORT_SYMBOL(xfrm_state_update
);
597 int xfrm_state_check_expire(struct xfrm_state
*x
)
599 if (!x
->curlft
.use_time
)
600 x
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
602 if (x
->km
.state
!= XFRM_STATE_VALID
)
605 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
606 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
607 x
->km
.state
= XFRM_STATE_EXPIRED
;
608 if (!mod_timer(&x
->timer
, jiffies
))
614 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
615 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
617 km_state_expired(x
, 0, 0);
621 EXPORT_SYMBOL(xfrm_state_check_expire
);
623 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
625 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
629 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
631 /* Check tail too... */
635 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
637 int err
= xfrm_state_check_expire(x
);
640 err
= xfrm_state_check_space(x
, skb
);
644 EXPORT_SYMBOL(xfrm_state_check
);
647 xfrm_state_lookup(xfrm_address_t
*daddr
, u32 spi
, u8 proto
,
648 unsigned short family
)
650 struct xfrm_state
*x
;
651 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
655 spin_lock_bh(&xfrm_state_lock
);
656 x
= afinfo
->state_lookup(daddr
, spi
, proto
);
657 spin_unlock_bh(&xfrm_state_lock
);
658 xfrm_state_put_afinfo(afinfo
);
661 EXPORT_SYMBOL(xfrm_state_lookup
);
664 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
665 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
666 int create
, unsigned short family
)
668 struct xfrm_state
*x
;
669 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
673 spin_lock_bh(&xfrm_state_lock
);
674 x
= afinfo
->find_acq(mode
, reqid
, proto
, daddr
, saddr
, create
);
675 spin_unlock_bh(&xfrm_state_lock
);
676 xfrm_state_put_afinfo(afinfo
);
679 EXPORT_SYMBOL(xfrm_find_acq
);
681 /* Silly enough, but I'm lazy to build resolution list */
683 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
686 struct xfrm_state
*x
;
688 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
689 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
690 if (x
->km
.seq
== seq
&& x
->km
.state
== XFRM_STATE_ACQ
) {
699 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
701 struct xfrm_state
*x
;
703 spin_lock_bh(&xfrm_state_lock
);
704 x
= __xfrm_find_acq_byseq(seq
);
705 spin_unlock_bh(&xfrm_state_lock
);
708 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
710 u32
xfrm_get_acqseq(void)
714 static DEFINE_SPINLOCK(acqseq_lock
);
716 spin_lock_bh(&acqseq_lock
);
717 res
= (++acqseq
? : ++acqseq
);
718 spin_unlock_bh(&acqseq_lock
);
721 EXPORT_SYMBOL(xfrm_get_acqseq
);
724 xfrm_alloc_spi(struct xfrm_state
*x
, u32 minspi
, u32 maxspi
)
727 struct xfrm_state
*x0
;
732 if (minspi
== maxspi
) {
733 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
741 minspi
= ntohl(minspi
);
742 maxspi
= ntohl(maxspi
);
743 for (h
=0; h
<maxspi
-minspi
+1; h
++) {
744 spi
= minspi
+ net_random()%(maxspi
-minspi
+1);
745 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
747 x
->id
.spi
= htonl(spi
);
754 spin_lock_bh(&xfrm_state_lock
);
755 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
756 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
758 spin_unlock_bh(&xfrm_state_lock
);
762 EXPORT_SYMBOL(xfrm_alloc_spi
);
764 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
768 struct xfrm_state
*x
;
772 spin_lock_bh(&xfrm_state_lock
);
773 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
774 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
775 if (proto
== IPSEC_PROTO_ANY
|| x
->id
.proto
== proto
)
784 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
785 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
786 if (proto
!= IPSEC_PROTO_ANY
&& x
->id
.proto
!= proto
)
788 err
= func(x
, --count
, data
);
794 spin_unlock_bh(&xfrm_state_lock
);
797 EXPORT_SYMBOL(xfrm_state_walk
);
800 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
803 /* we send notify messages in case
804 * 1. we updated on of the sequence numbers, and the seqno difference
805 * is at least x->replay_maxdiff, in this case we also update the
806 * timeout of our timer function
807 * 2. if x->replay_maxage has elapsed since last update,
808 * and there were changes
810 * The state structure must be locked!
814 case XFRM_REPLAY_UPDATE
:
815 if (x
->replay_maxdiff
&&
816 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
817 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
818 if (x
->xflags
& XFRM_TIME_DEFER
)
819 event
= XFRM_REPLAY_TIMEOUT
;
826 case XFRM_REPLAY_TIMEOUT
:
827 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
828 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
829 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
830 x
->xflags
|= XFRM_TIME_DEFER
;
837 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
838 c
.event
= XFRM_MSG_NEWAE
;
839 c
.data
.aevent
= event
;
840 km_state_notify(x
, &c
);
842 if (x
->replay_maxage
&&
843 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
)) {
845 x
->xflags
&= ~XFRM_TIME_DEFER
;
848 EXPORT_SYMBOL(xfrm_replay_notify
);
850 static void xfrm_replay_timer_handler(unsigned long data
)
852 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
856 if (x
->km
.state
== XFRM_STATE_VALID
) {
857 if (xfrm_aevent_is_on())
858 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
860 x
->xflags
|= XFRM_TIME_DEFER
;
863 spin_unlock(&x
->lock
);
867 int xfrm_replay_check(struct xfrm_state
*x
, u32 seq
)
873 if (unlikely(seq
== 0))
876 if (likely(seq
> x
->replay
.seq
))
879 diff
= x
->replay
.seq
- seq
;
880 if (diff
>= x
->props
.replay_window
) {
881 x
->stats
.replay_window
++;
885 if (x
->replay
.bitmap
& (1U << diff
)) {
891 EXPORT_SYMBOL(xfrm_replay_check
);
893 void xfrm_replay_advance(struct xfrm_state
*x
, u32 seq
)
899 if (seq
> x
->replay
.seq
) {
900 diff
= seq
- x
->replay
.seq
;
901 if (diff
< x
->props
.replay_window
)
902 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
904 x
->replay
.bitmap
= 1;
907 diff
= x
->replay
.seq
- seq
;
908 x
->replay
.bitmap
|= (1U << diff
);
911 if (xfrm_aevent_is_on())
912 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
914 EXPORT_SYMBOL(xfrm_replay_advance
);
916 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
917 static DEFINE_RWLOCK(xfrm_km_lock
);
919 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
923 read_lock(&xfrm_km_lock
);
924 list_for_each_entry(km
, &xfrm_km_list
, list
)
925 if (km
->notify_policy
)
926 km
->notify_policy(xp
, dir
, c
);
927 read_unlock(&xfrm_km_lock
);
930 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
933 read_lock(&xfrm_km_lock
);
934 list_for_each_entry(km
, &xfrm_km_list
, list
)
937 read_unlock(&xfrm_km_lock
);
940 EXPORT_SYMBOL(km_policy_notify
);
941 EXPORT_SYMBOL(km_state_notify
);
943 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
949 c
.event
= XFRM_MSG_EXPIRE
;
950 km_state_notify(x
, &c
);
956 EXPORT_SYMBOL(km_state_expired
);
958 * We send to all registered managers regardless of failure
959 * We are happy with one success
961 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
963 int err
= -EINVAL
, acqret
;
966 read_lock(&xfrm_km_lock
);
967 list_for_each_entry(km
, &xfrm_km_list
, list
) {
968 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
972 read_unlock(&xfrm_km_lock
);
975 EXPORT_SYMBOL(km_query
);
977 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, u16 sport
)
982 read_lock(&xfrm_km_lock
);
983 list_for_each_entry(km
, &xfrm_km_list
, list
) {
985 err
= km
->new_mapping(x
, ipaddr
, sport
);
989 read_unlock(&xfrm_km_lock
);
992 EXPORT_SYMBOL(km_new_mapping
);
994 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
1000 c
.event
= XFRM_MSG_POLEXPIRE
;
1001 km_policy_notify(pol
, dir
, &c
);
1006 EXPORT_SYMBOL(km_policy_expired
);
1008 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1012 struct xfrm_mgr
*km
;
1013 struct xfrm_policy
*pol
= NULL
;
1015 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1018 data
= kmalloc(optlen
, GFP_KERNEL
);
1023 if (copy_from_user(data
, optval
, optlen
))
1027 read_lock(&xfrm_km_lock
);
1028 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1029 pol
= km
->compile_policy(sk
, optname
, data
,
1034 read_unlock(&xfrm_km_lock
);
1037 xfrm_sk_policy_insert(sk
, err
, pol
);
1046 EXPORT_SYMBOL(xfrm_user_policy
);
1048 int xfrm_register_km(struct xfrm_mgr
*km
)
1050 write_lock_bh(&xfrm_km_lock
);
1051 list_add_tail(&km
->list
, &xfrm_km_list
);
1052 write_unlock_bh(&xfrm_km_lock
);
1055 EXPORT_SYMBOL(xfrm_register_km
);
1057 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1059 write_lock_bh(&xfrm_km_lock
);
1060 list_del(&km
->list
);
1061 write_unlock_bh(&xfrm_km_lock
);
1064 EXPORT_SYMBOL(xfrm_unregister_km
);
1066 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1069 if (unlikely(afinfo
== NULL
))
1071 if (unlikely(afinfo
->family
>= NPROTO
))
1072 return -EAFNOSUPPORT
;
1073 write_lock_bh(&xfrm_state_afinfo_lock
);
1074 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1077 afinfo
->state_bydst
= xfrm_state_bydst
;
1078 afinfo
->state_byspi
= xfrm_state_byspi
;
1079 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1081 write_unlock_bh(&xfrm_state_afinfo_lock
);
1084 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1086 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1089 if (unlikely(afinfo
== NULL
))
1091 if (unlikely(afinfo
->family
>= NPROTO
))
1092 return -EAFNOSUPPORT
;
1093 write_lock_bh(&xfrm_state_afinfo_lock
);
1094 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1095 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1098 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1099 afinfo
->state_byspi
= NULL
;
1100 afinfo
->state_bydst
= NULL
;
1103 write_unlock_bh(&xfrm_state_afinfo_lock
);
1106 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1108 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
1110 struct xfrm_state_afinfo
*afinfo
;
1111 if (unlikely(family
>= NPROTO
))
1113 read_lock(&xfrm_state_afinfo_lock
);
1114 afinfo
= xfrm_state_afinfo
[family
];
1115 if (unlikely(!afinfo
))
1116 read_unlock(&xfrm_state_afinfo_lock
);
1120 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1122 read_unlock(&xfrm_state_afinfo_lock
);
1125 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1126 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1129 struct xfrm_state
*t
= x
->tunnel
;
1131 if (atomic_read(&t
->tunnel_users
) == 2)
1132 xfrm_state_delete(t
);
1133 atomic_dec(&t
->tunnel_users
);
1138 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1141 * This function is NOT optimal. For example, with ESP it will give an
1142 * MTU that's usually two bytes short of being optimal. However, it will
1143 * usually give an answer that's a multiple of 4 provided the input is
1144 * also a multiple of 4.
1146 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1150 res
-= x
->props
.header_len
;
1158 spin_lock_bh(&x
->lock
);
1159 if (x
->km
.state
== XFRM_STATE_VALID
&&
1160 x
->type
&& x
->type
->get_max_size
)
1161 m
= x
->type
->get_max_size(x
, m
);
1163 m
+= x
->props
.header_len
;
1164 spin_unlock_bh(&x
->lock
);
1174 int xfrm_init_state(struct xfrm_state
*x
)
1176 struct xfrm_state_afinfo
*afinfo
;
1177 int family
= x
->props
.family
;
1180 err
= -EAFNOSUPPORT
;
1181 afinfo
= xfrm_state_get_afinfo(family
);
1186 if (afinfo
->init_flags
)
1187 err
= afinfo
->init_flags(x
);
1189 xfrm_state_put_afinfo(afinfo
);
1194 err
= -EPROTONOSUPPORT
;
1195 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1196 if (x
->type
== NULL
)
1199 err
= x
->type
->init_state(x
);
1203 x
->mode
= xfrm_get_mode(x
->props
.mode
, family
);
1204 if (x
->mode
== NULL
)
1207 x
->km
.state
= XFRM_STATE_VALID
;
1213 EXPORT_SYMBOL(xfrm_init_state
);
1215 void __init
xfrm_state_init(void)
1219 for (i
=0; i
<XFRM_DST_HSIZE
; i
++) {
1220 INIT_LIST_HEAD(&xfrm_state_bydst
[i
]);
1221 INIT_LIST_HEAD(&xfrm_state_byspi
[i
]);
1223 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
, NULL
);