6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
23 #include <linux/audit.h>
24 #include <linux/cache.h>
26 #include "xfrm_hash.h"
29 EXPORT_SYMBOL(xfrm_nl
);
31 u32 sysctl_xfrm_aevent_etime __read_mostly
= XFRM_AE_ETIME
;
32 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
34 u32 sysctl_xfrm_aevent_rseqth __read_mostly
= XFRM_AE_SEQT_SIZE
;
35 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
37 u32 sysctl_xfrm_acq_expires __read_mostly
= 30;
39 /* Each xfrm_state may be linked to two tables:
41 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
42 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
43 destination/tunnel endpoint. (output)
46 static DEFINE_SPINLOCK(xfrm_state_lock
);
48 /* Hash table to find appropriate SA towards given target (endpoint
49 * of tunnel or destination of transport mode) allowed by selector.
51 * Main use is finding SA after policy selected tunnel or transport mode.
52 * Also, it can be used by ah/esp icmp error handler to find offending SA.
54 static struct hlist_head
*xfrm_state_bydst __read_mostly
;
55 static struct hlist_head
*xfrm_state_bysrc __read_mostly
;
56 static struct hlist_head
*xfrm_state_byspi __read_mostly
;
57 static unsigned int xfrm_state_hmask __read_mostly
;
58 static unsigned int xfrm_state_hashmax __read_mostly
= 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num
;
60 static unsigned int xfrm_state_genid
;
62 static inline unsigned int xfrm_dst_hash(xfrm_address_t
*daddr
,
63 xfrm_address_t
*saddr
,
65 unsigned short family
)
67 return __xfrm_dst_hash(daddr
, saddr
, reqid
, family
, xfrm_state_hmask
);
70 static inline unsigned int xfrm_src_hash(xfrm_address_t
*daddr
,
71 xfrm_address_t
*saddr
,
72 unsigned short family
)
74 return __xfrm_src_hash(daddr
, saddr
, family
, xfrm_state_hmask
);
77 static inline unsigned int
78 xfrm_spi_hash(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
, unsigned short family
)
80 return __xfrm_spi_hash(daddr
, spi
, proto
, family
, xfrm_state_hmask
);
83 static void xfrm_hash_transfer(struct hlist_head
*list
,
84 struct hlist_head
*ndsttable
,
85 struct hlist_head
*nsrctable
,
86 struct hlist_head
*nspitable
,
87 unsigned int nhashmask
)
89 struct hlist_node
*entry
, *tmp
;
92 hlist_for_each_entry_safe(x
, entry
, tmp
, list
, bydst
) {
95 h
= __xfrm_dst_hash(&x
->id
.daddr
, &x
->props
.saddr
,
96 x
->props
.reqid
, x
->props
.family
,
98 hlist_add_head(&x
->bydst
, ndsttable
+h
);
100 h
= __xfrm_src_hash(&x
->id
.daddr
, &x
->props
.saddr
,
103 hlist_add_head(&x
->bysrc
, nsrctable
+h
);
106 h
= __xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
,
107 x
->id
.proto
, x
->props
.family
,
109 hlist_add_head(&x
->byspi
, nspitable
+h
);
114 static unsigned long xfrm_hash_new_size(void)
116 return ((xfrm_state_hmask
+ 1) << 1) *
117 sizeof(struct hlist_head
);
120 static DEFINE_MUTEX(hash_resize_mutex
);
122 static void xfrm_hash_resize(struct work_struct
*__unused
)
124 struct hlist_head
*ndst
, *nsrc
, *nspi
, *odst
, *osrc
, *ospi
;
125 unsigned long nsize
, osize
;
126 unsigned int nhashmask
, ohashmask
;
129 mutex_lock(&hash_resize_mutex
);
131 nsize
= xfrm_hash_new_size();
132 ndst
= xfrm_hash_alloc(nsize
);
135 nsrc
= xfrm_hash_alloc(nsize
);
137 xfrm_hash_free(ndst
, nsize
);
140 nspi
= xfrm_hash_alloc(nsize
);
142 xfrm_hash_free(ndst
, nsize
);
143 xfrm_hash_free(nsrc
, nsize
);
147 spin_lock_bh(&xfrm_state_lock
);
149 nhashmask
= (nsize
/ sizeof(struct hlist_head
)) - 1U;
150 for (i
= xfrm_state_hmask
; i
>= 0; i
--)
151 xfrm_hash_transfer(xfrm_state_bydst
+i
, ndst
, nsrc
, nspi
,
154 odst
= xfrm_state_bydst
;
155 osrc
= xfrm_state_bysrc
;
156 ospi
= xfrm_state_byspi
;
157 ohashmask
= xfrm_state_hmask
;
159 xfrm_state_bydst
= ndst
;
160 xfrm_state_bysrc
= nsrc
;
161 xfrm_state_byspi
= nspi
;
162 xfrm_state_hmask
= nhashmask
;
164 spin_unlock_bh(&xfrm_state_lock
);
166 osize
= (ohashmask
+ 1) * sizeof(struct hlist_head
);
167 xfrm_hash_free(odst
, osize
);
168 xfrm_hash_free(osrc
, osize
);
169 xfrm_hash_free(ospi
, osize
);
172 mutex_unlock(&hash_resize_mutex
);
175 static DECLARE_WORK(xfrm_hash_work
, xfrm_hash_resize
);
177 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
178 EXPORT_SYMBOL(km_waitq
);
180 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
181 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
183 static struct work_struct xfrm_state_gc_work
;
184 static HLIST_HEAD(xfrm_state_gc_list
);
185 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
187 int __xfrm_state_delete(struct xfrm_state
*x
);
189 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
190 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
192 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
194 del_timer_sync(&x
->timer
);
195 del_timer_sync(&x
->rtimer
);
202 xfrm_put_mode(x
->mode
);
204 x
->type
->destructor(x
);
205 xfrm_put_type(x
->type
);
207 security_xfrm_state_free(x
);
211 static void xfrm_state_gc_task(struct work_struct
*data
)
213 struct xfrm_state
*x
;
214 struct hlist_node
*entry
, *tmp
;
215 struct hlist_head gc_list
;
217 spin_lock_bh(&xfrm_state_gc_lock
);
218 gc_list
.first
= xfrm_state_gc_list
.first
;
219 INIT_HLIST_HEAD(&xfrm_state_gc_list
);
220 spin_unlock_bh(&xfrm_state_gc_lock
);
222 hlist_for_each_entry_safe(x
, entry
, tmp
, &gc_list
, bydst
)
223 xfrm_state_gc_destroy(x
);
228 static inline unsigned long make_jiffies(long secs
)
230 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
231 return MAX_SCHEDULE_TIMEOUT
-1;
236 static void xfrm_timer_handler(unsigned long data
)
238 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
239 unsigned long now
= get_seconds();
240 long next
= LONG_MAX
;
245 if (x
->km
.state
== XFRM_STATE_DEAD
)
247 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
249 if (x
->lft
.hard_add_expires_seconds
) {
250 long tmo
= x
->lft
.hard_add_expires_seconds
+
251 x
->curlft
.add_time
- now
;
257 if (x
->lft
.hard_use_expires_seconds
) {
258 long tmo
= x
->lft
.hard_use_expires_seconds
+
259 (x
->curlft
.use_time
? : now
) - now
;
267 if (x
->lft
.soft_add_expires_seconds
) {
268 long tmo
= x
->lft
.soft_add_expires_seconds
+
269 x
->curlft
.add_time
- now
;
275 if (x
->lft
.soft_use_expires_seconds
) {
276 long tmo
= x
->lft
.soft_use_expires_seconds
+
277 (x
->curlft
.use_time
? : now
) - now
;
286 km_state_expired(x
, 0, 0);
288 if (next
!= LONG_MAX
)
289 mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
));
294 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
295 x
->km
.state
= XFRM_STATE_EXPIRED
;
301 err
= __xfrm_state_delete(x
);
302 if (!err
&& x
->id
.spi
)
303 km_state_expired(x
, 1, 0);
305 xfrm_audit_log(audit_get_loginuid(current
->audit_context
), 0,
306 AUDIT_MAC_IPSEC_DELSA
, err
? 0 : 1, NULL
, x
);
309 spin_unlock(&x
->lock
);
312 static void xfrm_replay_timer_handler(unsigned long data
);
314 struct xfrm_state
*xfrm_state_alloc(void)
316 struct xfrm_state
*x
;
318 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
321 atomic_set(&x
->refcnt
, 1);
322 atomic_set(&x
->tunnel_users
, 0);
323 INIT_HLIST_NODE(&x
->bydst
);
324 INIT_HLIST_NODE(&x
->bysrc
);
325 INIT_HLIST_NODE(&x
->byspi
);
326 init_timer(&x
->timer
);
327 x
->timer
.function
= xfrm_timer_handler
;
328 x
->timer
.data
= (unsigned long)x
;
329 init_timer(&x
->rtimer
);
330 x
->rtimer
.function
= xfrm_replay_timer_handler
;
331 x
->rtimer
.data
= (unsigned long)x
;
332 x
->curlft
.add_time
= get_seconds();
333 x
->lft
.soft_byte_limit
= XFRM_INF
;
334 x
->lft
.soft_packet_limit
= XFRM_INF
;
335 x
->lft
.hard_byte_limit
= XFRM_INF
;
336 x
->lft
.hard_packet_limit
= XFRM_INF
;
337 x
->replay_maxage
= 0;
338 x
->replay_maxdiff
= 0;
339 spin_lock_init(&x
->lock
);
343 EXPORT_SYMBOL(xfrm_state_alloc
);
345 void __xfrm_state_destroy(struct xfrm_state
*x
)
347 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
349 spin_lock_bh(&xfrm_state_gc_lock
);
350 hlist_add_head(&x
->bydst
, &xfrm_state_gc_list
);
351 spin_unlock_bh(&xfrm_state_gc_lock
);
352 schedule_work(&xfrm_state_gc_work
);
354 EXPORT_SYMBOL(__xfrm_state_destroy
);
356 int __xfrm_state_delete(struct xfrm_state
*x
)
360 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
361 x
->km
.state
= XFRM_STATE_DEAD
;
362 spin_lock(&xfrm_state_lock
);
363 hlist_del(&x
->bydst
);
364 hlist_del(&x
->bysrc
);
366 hlist_del(&x
->byspi
);
368 spin_unlock(&xfrm_state_lock
);
370 /* All xfrm_state objects are created by xfrm_state_alloc.
371 * The xfrm_state_alloc call gives a reference, and that
372 * is what we are dropping here.
380 EXPORT_SYMBOL(__xfrm_state_delete
);
382 int xfrm_state_delete(struct xfrm_state
*x
)
386 spin_lock_bh(&x
->lock
);
387 err
= __xfrm_state_delete(x
);
388 spin_unlock_bh(&x
->lock
);
392 EXPORT_SYMBOL(xfrm_state_delete
);
394 #ifdef CONFIG_SECURITY_NETWORK_XFRM
396 xfrm_state_flush_secctx_check(u8 proto
, struct xfrm_audit
*audit_info
)
400 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
401 struct hlist_node
*entry
;
402 struct xfrm_state
*x
;
404 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
405 if (xfrm_id_proto_match(x
->id
.proto
, proto
) &&
406 (err
= security_xfrm_state_delete(x
)) != 0) {
407 xfrm_audit_log(audit_info
->loginuid
,
409 AUDIT_MAC_IPSEC_DELSA
,
421 xfrm_state_flush_secctx_check(u8 proto
, struct xfrm_audit
*audit_info
)
427 int xfrm_state_flush(u8 proto
, struct xfrm_audit
*audit_info
)
431 spin_lock_bh(&xfrm_state_lock
);
432 err
= xfrm_state_flush_secctx_check(proto
, audit_info
);
436 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
437 struct hlist_node
*entry
;
438 struct xfrm_state
*x
;
440 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
441 if (!xfrm_state_kern(x
) &&
442 xfrm_id_proto_match(x
->id
.proto
, proto
)) {
444 spin_unlock_bh(&xfrm_state_lock
);
446 err
= xfrm_state_delete(x
);
447 xfrm_audit_log(audit_info
->loginuid
,
449 AUDIT_MAC_IPSEC_DELSA
,
450 err
? 0 : 1, NULL
, x
);
453 spin_lock_bh(&xfrm_state_lock
);
461 spin_unlock_bh(&xfrm_state_lock
);
465 EXPORT_SYMBOL(xfrm_state_flush
);
467 void xfrm_sad_getinfo(struct xfrmk_sadinfo
*si
)
469 spin_lock_bh(&xfrm_state_lock
);
470 si
->sadcnt
= xfrm_state_num
;
471 si
->sadhcnt
= xfrm_state_hmask
;
472 si
->sadhmcnt
= xfrm_state_hashmax
;
473 spin_unlock_bh(&xfrm_state_lock
);
475 EXPORT_SYMBOL(xfrm_sad_getinfo
);
478 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
479 struct xfrm_tmpl
*tmpl
,
480 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
481 unsigned short family
)
483 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
486 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
487 xfrm_state_put_afinfo(afinfo
);
491 static struct xfrm_state
*__xfrm_state_lookup(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
, unsigned short family
)
493 unsigned int h
= xfrm_spi_hash(daddr
, spi
, proto
, family
);
494 struct xfrm_state
*x
;
495 struct hlist_node
*entry
;
497 hlist_for_each_entry(x
, entry
, xfrm_state_byspi
+h
, byspi
) {
498 if (x
->props
.family
!= family
||
500 x
->id
.proto
!= proto
)
505 if (x
->id
.daddr
.a4
!= daddr
->a4
)
509 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
523 static struct xfrm_state
*__xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, u8 proto
, unsigned short family
)
525 unsigned int h
= xfrm_src_hash(daddr
, saddr
, family
);
526 struct xfrm_state
*x
;
527 struct hlist_node
*entry
;
529 hlist_for_each_entry(x
, entry
, xfrm_state_bysrc
+h
, bysrc
) {
530 if (x
->props
.family
!= family
||
531 x
->id
.proto
!= proto
)
536 if (x
->id
.daddr
.a4
!= daddr
->a4
||
537 x
->props
.saddr
.a4
!= saddr
->a4
)
541 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
544 !ipv6_addr_equal((struct in6_addr
*)saddr
,
558 static inline struct xfrm_state
*
559 __xfrm_state_locate(struct xfrm_state
*x
, int use_spi
, int family
)
562 return __xfrm_state_lookup(&x
->id
.daddr
, x
->id
.spi
,
563 x
->id
.proto
, family
);
565 return __xfrm_state_lookup_byaddr(&x
->id
.daddr
,
567 x
->id
.proto
, family
);
570 static void xfrm_hash_grow_check(int have_hash_collision
)
572 if (have_hash_collision
&&
573 (xfrm_state_hmask
+ 1) < xfrm_state_hashmax
&&
574 xfrm_state_num
> xfrm_state_hmask
)
575 schedule_work(&xfrm_hash_work
);
579 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
580 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
581 struct xfrm_policy
*pol
, int *err
,
582 unsigned short family
)
584 unsigned int h
= xfrm_dst_hash(daddr
, saddr
, tmpl
->reqid
, family
);
585 struct hlist_node
*entry
;
586 struct xfrm_state
*x
, *x0
;
587 int acquire_in_progress
= 0;
589 struct xfrm_state
*best
= NULL
;
591 spin_lock_bh(&xfrm_state_lock
);
592 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
593 if (x
->props
.family
== family
&&
594 x
->props
.reqid
== tmpl
->reqid
&&
595 !(x
->props
.flags
& XFRM_STATE_WILDRECV
) &&
596 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
597 tmpl
->mode
== x
->props
.mode
&&
598 tmpl
->id
.proto
== x
->id
.proto
&&
599 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
601 1. There is a valid state with matching selector.
603 2. Valid state with inappropriate selector. Skip.
605 Entering area of "sysdeps".
607 3. If state is not valid, selector is temporary,
608 it selects only session which triggered
609 previous resolution. Key manager will do
610 something to install a state with proper
613 if (x
->km
.state
== XFRM_STATE_VALID
) {
614 if (!xfrm_selector_match(&x
->sel
, fl
, family
) ||
615 !security_xfrm_state_pol_flow_match(x
, pol
, fl
))
618 best
->km
.dying
> x
->km
.dying
||
619 (best
->km
.dying
== x
->km
.dying
&&
620 best
->curlft
.add_time
< x
->curlft
.add_time
))
622 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
623 acquire_in_progress
= 1;
624 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
625 x
->km
.state
== XFRM_STATE_EXPIRED
) {
626 if (xfrm_selector_match(&x
->sel
, fl
, family
) &&
627 security_xfrm_state_pol_flow_match(x
, pol
, fl
))
634 if (!x
&& !error
&& !acquire_in_progress
) {
636 (x0
= __xfrm_state_lookup(daddr
, tmpl
->id
.spi
,
637 tmpl
->id
.proto
, family
)) != NULL
) {
642 x
= xfrm_state_alloc();
647 /* Initialize temporary selector matching only
648 * to current session. */
649 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
651 error
= security_xfrm_state_alloc_acquire(x
, pol
->security
, fl
->secid
);
653 x
->km
.state
= XFRM_STATE_DEAD
;
659 if (km_query(x
, tmpl
, pol
) == 0) {
660 x
->km
.state
= XFRM_STATE_ACQ
;
661 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
662 h
= xfrm_src_hash(daddr
, saddr
, family
);
663 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
665 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
666 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
668 x
->lft
.hard_add_expires_seconds
= sysctl_xfrm_acq_expires
;
669 x
->timer
.expires
= jiffies
+ sysctl_xfrm_acq_expires
*HZ
;
670 add_timer(&x
->timer
);
672 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
674 x
->km
.state
= XFRM_STATE_DEAD
;
684 *err
= acquire_in_progress
? -EAGAIN
: error
;
685 spin_unlock_bh(&xfrm_state_lock
);
689 static void __xfrm_state_insert(struct xfrm_state
*x
)
693 x
->genid
= ++xfrm_state_genid
;
695 h
= xfrm_dst_hash(&x
->id
.daddr
, &x
->props
.saddr
,
696 x
->props
.reqid
, x
->props
.family
);
697 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
699 h
= xfrm_src_hash(&x
->id
.daddr
, &x
->props
.saddr
, x
->props
.family
);
700 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
703 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
,
706 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
709 mod_timer(&x
->timer
, jiffies
+ HZ
);
710 if (x
->replay_maxage
)
711 mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
);
717 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
720 /* xfrm_state_lock is held */
721 static void __xfrm_state_bump_genids(struct xfrm_state
*xnew
)
723 unsigned short family
= xnew
->props
.family
;
724 u32 reqid
= xnew
->props
.reqid
;
725 struct xfrm_state
*x
;
726 struct hlist_node
*entry
;
729 h
= xfrm_dst_hash(&xnew
->id
.daddr
, &xnew
->props
.saddr
, reqid
, family
);
730 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
731 if (x
->props
.family
== family
&&
732 x
->props
.reqid
== reqid
&&
733 !xfrm_addr_cmp(&x
->id
.daddr
, &xnew
->id
.daddr
, family
) &&
734 !xfrm_addr_cmp(&x
->props
.saddr
, &xnew
->props
.saddr
, family
))
735 x
->genid
= xfrm_state_genid
;
739 void xfrm_state_insert(struct xfrm_state
*x
)
741 spin_lock_bh(&xfrm_state_lock
);
742 __xfrm_state_bump_genids(x
);
743 __xfrm_state_insert(x
);
744 spin_unlock_bh(&xfrm_state_lock
);
746 EXPORT_SYMBOL(xfrm_state_insert
);
748 /* xfrm_state_lock is held */
749 static struct xfrm_state
*__find_acq_core(unsigned short family
, u8 mode
, u32 reqid
, u8 proto
, xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, int create
)
751 unsigned int h
= xfrm_dst_hash(daddr
, saddr
, reqid
, family
);
752 struct hlist_node
*entry
;
753 struct xfrm_state
*x
;
755 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
756 if (x
->props
.reqid
!= reqid
||
757 x
->props
.mode
!= mode
||
758 x
->props
.family
!= family
||
759 x
->km
.state
!= XFRM_STATE_ACQ
||
761 x
->id
.proto
!= proto
)
766 if (x
->id
.daddr
.a4
!= daddr
->a4
||
767 x
->props
.saddr
.a4
!= saddr
->a4
)
771 if (!ipv6_addr_equal((struct in6_addr
*)x
->id
.daddr
.a6
,
772 (struct in6_addr
*)daddr
) ||
773 !ipv6_addr_equal((struct in6_addr
*)
775 (struct in6_addr
*)saddr
))
787 x
= xfrm_state_alloc();
791 x
->sel
.daddr
.a4
= daddr
->a4
;
792 x
->sel
.saddr
.a4
= saddr
->a4
;
793 x
->sel
.prefixlen_d
= 32;
794 x
->sel
.prefixlen_s
= 32;
795 x
->props
.saddr
.a4
= saddr
->a4
;
796 x
->id
.daddr
.a4
= daddr
->a4
;
800 ipv6_addr_copy((struct in6_addr
*)x
->sel
.daddr
.a6
,
801 (struct in6_addr
*)daddr
);
802 ipv6_addr_copy((struct in6_addr
*)x
->sel
.saddr
.a6
,
803 (struct in6_addr
*)saddr
);
804 x
->sel
.prefixlen_d
= 128;
805 x
->sel
.prefixlen_s
= 128;
806 ipv6_addr_copy((struct in6_addr
*)x
->props
.saddr
.a6
,
807 (struct in6_addr
*)saddr
);
808 ipv6_addr_copy((struct in6_addr
*)x
->id
.daddr
.a6
,
809 (struct in6_addr
*)daddr
);
813 x
->km
.state
= XFRM_STATE_ACQ
;
815 x
->props
.family
= family
;
816 x
->props
.mode
= mode
;
817 x
->props
.reqid
= reqid
;
818 x
->lft
.hard_add_expires_seconds
= sysctl_xfrm_acq_expires
;
820 x
->timer
.expires
= jiffies
+ sysctl_xfrm_acq_expires
*HZ
;
821 add_timer(&x
->timer
);
822 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
823 h
= xfrm_src_hash(daddr
, saddr
, family
);
824 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
829 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
835 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
837 int xfrm_state_add(struct xfrm_state
*x
)
839 struct xfrm_state
*x1
;
842 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
844 family
= x
->props
.family
;
846 spin_lock_bh(&xfrm_state_lock
);
848 x1
= __xfrm_state_locate(x
, use_spi
, family
);
856 if (use_spi
&& x
->km
.seq
) {
857 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
858 if (x1
&& ((x1
->id
.proto
!= x
->id
.proto
) ||
859 xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
))) {
866 x1
= __find_acq_core(family
, x
->props
.mode
, x
->props
.reqid
,
868 &x
->id
.daddr
, &x
->props
.saddr
, 0);
870 __xfrm_state_bump_genids(x
);
871 __xfrm_state_insert(x
);
875 spin_unlock_bh(&xfrm_state_lock
);
878 xfrm_state_delete(x1
);
884 EXPORT_SYMBOL(xfrm_state_add
);
886 #ifdef CONFIG_XFRM_MIGRATE
887 struct xfrm_state
*xfrm_state_clone(struct xfrm_state
*orig
, int *errp
)
890 struct xfrm_state
*x
= xfrm_state_alloc();
894 memcpy(&x
->id
, &orig
->id
, sizeof(x
->id
));
895 memcpy(&x
->sel
, &orig
->sel
, sizeof(x
->sel
));
896 memcpy(&x
->lft
, &orig
->lft
, sizeof(x
->lft
));
897 x
->props
.mode
= orig
->props
.mode
;
898 x
->props
.replay_window
= orig
->props
.replay_window
;
899 x
->props
.reqid
= orig
->props
.reqid
;
900 x
->props
.family
= orig
->props
.family
;
901 x
->props
.saddr
= orig
->props
.saddr
;
904 x
->aalg
= xfrm_algo_clone(orig
->aalg
);
908 x
->props
.aalgo
= orig
->props
.aalgo
;
911 x
->ealg
= xfrm_algo_clone(orig
->ealg
);
915 x
->props
.ealgo
= orig
->props
.ealgo
;
918 x
->calg
= xfrm_algo_clone(orig
->calg
);
922 x
->props
.calgo
= orig
->props
.calgo
;
925 x
->encap
= kmemdup(orig
->encap
, sizeof(*x
->encap
), GFP_KERNEL
);
931 x
->coaddr
= kmemdup(orig
->coaddr
, sizeof(*x
->coaddr
),
937 err
= xfrm_init_state(x
);
941 x
->props
.flags
= orig
->props
.flags
;
943 x
->curlft
.add_time
= orig
->curlft
.add_time
;
944 x
->km
.state
= orig
->km
.state
;
945 x
->km
.seq
= orig
->km
.seq
;
962 EXPORT_SYMBOL(xfrm_state_clone
);
964 /* xfrm_state_lock is held */
965 struct xfrm_state
* xfrm_migrate_state_find(struct xfrm_migrate
*m
)
968 struct xfrm_state
*x
;
969 struct hlist_node
*entry
;
972 h
= xfrm_dst_hash(&m
->old_daddr
, &m
->old_saddr
,
973 m
->reqid
, m
->old_family
);
974 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
975 if (x
->props
.mode
!= m
->mode
||
976 x
->id
.proto
!= m
->proto
)
978 if (m
->reqid
&& x
->props
.reqid
!= m
->reqid
)
980 if (xfrm_addr_cmp(&x
->id
.daddr
, &m
->old_daddr
,
982 xfrm_addr_cmp(&x
->props
.saddr
, &m
->old_saddr
,
989 h
= xfrm_src_hash(&m
->old_daddr
, &m
->old_saddr
,
991 hlist_for_each_entry(x
, entry
, xfrm_state_bysrc
+h
, bysrc
) {
992 if (x
->props
.mode
!= m
->mode
||
993 x
->id
.proto
!= m
->proto
)
995 if (xfrm_addr_cmp(&x
->id
.daddr
, &m
->old_daddr
,
997 xfrm_addr_cmp(&x
->props
.saddr
, &m
->old_saddr
,
1007 EXPORT_SYMBOL(xfrm_migrate_state_find
);
1009 struct xfrm_state
* xfrm_state_migrate(struct xfrm_state
*x
,
1010 struct xfrm_migrate
*m
)
1012 struct xfrm_state
*xc
;
1015 xc
= xfrm_state_clone(x
, &err
);
1019 memcpy(&xc
->id
.daddr
, &m
->new_daddr
, sizeof(xc
->id
.daddr
));
1020 memcpy(&xc
->props
.saddr
, &m
->new_saddr
, sizeof(xc
->props
.saddr
));
1023 if (!xfrm_addr_cmp(&x
->id
.daddr
, &m
->new_daddr
, m
->new_family
)) {
1024 /* a care is needed when the destination address of the
1025 state is to be updated as it is a part of triplet */
1026 xfrm_state_insert(xc
);
1028 if ((err
= xfrm_state_add(xc
)) < 0)
1037 EXPORT_SYMBOL(xfrm_state_migrate
);
1040 int xfrm_state_update(struct xfrm_state
*x
)
1042 struct xfrm_state
*x1
;
1044 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
1046 spin_lock_bh(&xfrm_state_lock
);
1047 x1
= __xfrm_state_locate(x
, use_spi
, x
->props
.family
);
1053 if (xfrm_state_kern(x1
)) {
1059 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
1060 __xfrm_state_insert(x
);
1066 spin_unlock_bh(&xfrm_state_lock
);
1072 xfrm_state_delete(x1
);
1078 spin_lock_bh(&x1
->lock
);
1079 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
1080 if (x
->encap
&& x1
->encap
)
1081 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
1082 if (x
->coaddr
&& x1
->coaddr
) {
1083 memcpy(x1
->coaddr
, x
->coaddr
, sizeof(*x1
->coaddr
));
1085 if (!use_spi
&& memcmp(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
)))
1086 memcpy(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
));
1087 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
1090 mod_timer(&x1
->timer
, jiffies
+ HZ
);
1091 if (x1
->curlft
.use_time
)
1092 xfrm_state_check_expire(x1
);
1096 spin_unlock_bh(&x1
->lock
);
1102 EXPORT_SYMBOL(xfrm_state_update
);
1104 int xfrm_state_check_expire(struct xfrm_state
*x
)
1106 if (!x
->curlft
.use_time
)
1107 x
->curlft
.use_time
= get_seconds();
1109 if (x
->km
.state
!= XFRM_STATE_VALID
)
1112 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
1113 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
1114 x
->km
.state
= XFRM_STATE_EXPIRED
;
1115 mod_timer(&x
->timer
, jiffies
);
1120 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
1121 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
1123 km_state_expired(x
, 0, 0);
1127 EXPORT_SYMBOL(xfrm_state_check_expire
);
1129 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
1131 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
1132 - skb_headroom(skb
);
1135 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
1137 /* Check tail too... */
1141 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
1143 int err
= xfrm_state_check_expire(x
);
1146 err
= xfrm_state_check_space(x
, skb
);
1150 EXPORT_SYMBOL(xfrm_state_check
);
1153 xfrm_state_lookup(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
,
1154 unsigned short family
)
1156 struct xfrm_state
*x
;
1158 spin_lock_bh(&xfrm_state_lock
);
1159 x
= __xfrm_state_lookup(daddr
, spi
, proto
, family
);
1160 spin_unlock_bh(&xfrm_state_lock
);
1163 EXPORT_SYMBOL(xfrm_state_lookup
);
1166 xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
1167 u8 proto
, unsigned short family
)
1169 struct xfrm_state
*x
;
1171 spin_lock_bh(&xfrm_state_lock
);
1172 x
= __xfrm_state_lookup_byaddr(daddr
, saddr
, proto
, family
);
1173 spin_unlock_bh(&xfrm_state_lock
);
1176 EXPORT_SYMBOL(xfrm_state_lookup_byaddr
);
1179 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
1180 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
1181 int create
, unsigned short family
)
1183 struct xfrm_state
*x
;
1185 spin_lock_bh(&xfrm_state_lock
);
1186 x
= __find_acq_core(family
, mode
, reqid
, proto
, daddr
, saddr
, create
);
1187 spin_unlock_bh(&xfrm_state_lock
);
1191 EXPORT_SYMBOL(xfrm_find_acq
);
1193 #ifdef CONFIG_XFRM_SUB_POLICY
1195 xfrm_tmpl_sort(struct xfrm_tmpl
**dst
, struct xfrm_tmpl
**src
, int n
,
1196 unsigned short family
)
1199 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1201 return -EAFNOSUPPORT
;
1203 spin_lock_bh(&xfrm_state_lock
);
1204 if (afinfo
->tmpl_sort
)
1205 err
= afinfo
->tmpl_sort(dst
, src
, n
);
1206 spin_unlock_bh(&xfrm_state_lock
);
1207 xfrm_state_put_afinfo(afinfo
);
1210 EXPORT_SYMBOL(xfrm_tmpl_sort
);
1213 xfrm_state_sort(struct xfrm_state
**dst
, struct xfrm_state
**src
, int n
,
1214 unsigned short family
)
1217 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1219 return -EAFNOSUPPORT
;
1221 spin_lock_bh(&xfrm_state_lock
);
1222 if (afinfo
->state_sort
)
1223 err
= afinfo
->state_sort(dst
, src
, n
);
1224 spin_unlock_bh(&xfrm_state_lock
);
1225 xfrm_state_put_afinfo(afinfo
);
1228 EXPORT_SYMBOL(xfrm_state_sort
);
1231 /* Silly enough, but I'm lazy to build resolution list */
1233 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
1237 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1238 struct hlist_node
*entry
;
1239 struct xfrm_state
*x
;
1241 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1242 if (x
->km
.seq
== seq
&&
1243 x
->km
.state
== XFRM_STATE_ACQ
) {
1252 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
1254 struct xfrm_state
*x
;
1256 spin_lock_bh(&xfrm_state_lock
);
1257 x
= __xfrm_find_acq_byseq(seq
);
1258 spin_unlock_bh(&xfrm_state_lock
);
1261 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
1263 u32
xfrm_get_acqseq(void)
1267 static DEFINE_SPINLOCK(acqseq_lock
);
1269 spin_lock_bh(&acqseq_lock
);
1270 res
= (++acqseq
? : ++acqseq
);
1271 spin_unlock_bh(&acqseq_lock
);
1274 EXPORT_SYMBOL(xfrm_get_acqseq
);
1277 xfrm_alloc_spi(struct xfrm_state
*x
, __be32 minspi
, __be32 maxspi
)
1280 struct xfrm_state
*x0
;
1285 if (minspi
== maxspi
) {
1286 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
1294 u32 low
= ntohl(minspi
);
1295 u32 high
= ntohl(maxspi
);
1296 for (h
=0; h
<high
-low
+1; h
++) {
1297 spi
= low
+ net_random()%(high
-low
+1);
1298 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
1300 x
->id
.spi
= htonl(spi
);
1307 spin_lock_bh(&xfrm_state_lock
);
1308 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
1309 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
1310 spin_unlock_bh(&xfrm_state_lock
);
1314 EXPORT_SYMBOL(xfrm_alloc_spi
);
1316 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
1320 struct xfrm_state
*x
, *last
= NULL
;
1321 struct hlist_node
*entry
;
1325 spin_lock_bh(&xfrm_state_lock
);
1326 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1327 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1328 if (!xfrm_id_proto_match(x
->id
.proto
, proto
))
1331 err
= func(last
, count
, data
);
1343 err
= func(last
, 0, data
);
1345 spin_unlock_bh(&xfrm_state_lock
);
1348 EXPORT_SYMBOL(xfrm_state_walk
);
1351 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
1354 /* we send notify messages in case
1355 * 1. we updated on of the sequence numbers, and the seqno difference
1356 * is at least x->replay_maxdiff, in this case we also update the
1357 * timeout of our timer function
1358 * 2. if x->replay_maxage has elapsed since last update,
1359 * and there were changes
1361 * The state structure must be locked!
1365 case XFRM_REPLAY_UPDATE
:
1366 if (x
->replay_maxdiff
&&
1367 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
1368 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
1369 if (x
->xflags
& XFRM_TIME_DEFER
)
1370 event
= XFRM_REPLAY_TIMEOUT
;
1377 case XFRM_REPLAY_TIMEOUT
:
1378 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
1379 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
1380 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
1381 x
->xflags
|= XFRM_TIME_DEFER
;
1388 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
1389 c
.event
= XFRM_MSG_NEWAE
;
1390 c
.data
.aevent
= event
;
1391 km_state_notify(x
, &c
);
1393 if (x
->replay_maxage
&&
1394 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
1395 x
->xflags
&= ~XFRM_TIME_DEFER
;
1397 EXPORT_SYMBOL(xfrm_replay_notify
);
1399 static void xfrm_replay_timer_handler(unsigned long data
)
1401 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
1403 spin_lock(&x
->lock
);
1405 if (x
->km
.state
== XFRM_STATE_VALID
) {
1406 if (xfrm_aevent_is_on())
1407 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
1409 x
->xflags
|= XFRM_TIME_DEFER
;
1412 spin_unlock(&x
->lock
);
1415 int xfrm_replay_check(struct xfrm_state
*x
, __be32 net_seq
)
1418 u32 seq
= ntohl(net_seq
);
1420 if (unlikely(seq
== 0))
1423 if (likely(seq
> x
->replay
.seq
))
1426 diff
= x
->replay
.seq
- seq
;
1427 if (diff
>= min_t(unsigned int, x
->props
.replay_window
,
1428 sizeof(x
->replay
.bitmap
) * 8)) {
1429 x
->stats
.replay_window
++;
1433 if (x
->replay
.bitmap
& (1U << diff
)) {
1439 EXPORT_SYMBOL(xfrm_replay_check
);
1441 void xfrm_replay_advance(struct xfrm_state
*x
, __be32 net_seq
)
1444 u32 seq
= ntohl(net_seq
);
1446 if (seq
> x
->replay
.seq
) {
1447 diff
= seq
- x
->replay
.seq
;
1448 if (diff
< x
->props
.replay_window
)
1449 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
1451 x
->replay
.bitmap
= 1;
1452 x
->replay
.seq
= seq
;
1454 diff
= x
->replay
.seq
- seq
;
1455 x
->replay
.bitmap
|= (1U << diff
);
1458 if (xfrm_aevent_is_on())
1459 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
1461 EXPORT_SYMBOL(xfrm_replay_advance
);
1463 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
1464 static DEFINE_RWLOCK(xfrm_km_lock
);
1466 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
1468 struct xfrm_mgr
*km
;
1470 read_lock(&xfrm_km_lock
);
1471 list_for_each_entry(km
, &xfrm_km_list
, list
)
1472 if (km
->notify_policy
)
1473 km
->notify_policy(xp
, dir
, c
);
1474 read_unlock(&xfrm_km_lock
);
1477 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
1479 struct xfrm_mgr
*km
;
1480 read_lock(&xfrm_km_lock
);
1481 list_for_each_entry(km
, &xfrm_km_list
, list
)
1484 read_unlock(&xfrm_km_lock
);
1487 EXPORT_SYMBOL(km_policy_notify
);
1488 EXPORT_SYMBOL(km_state_notify
);
1490 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
1496 c
.event
= XFRM_MSG_EXPIRE
;
1497 km_state_notify(x
, &c
);
1503 EXPORT_SYMBOL(km_state_expired
);
1505 * We send to all registered managers regardless of failure
1506 * We are happy with one success
1508 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
1510 int err
= -EINVAL
, acqret
;
1511 struct xfrm_mgr
*km
;
1513 read_lock(&xfrm_km_lock
);
1514 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1515 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
1519 read_unlock(&xfrm_km_lock
);
1522 EXPORT_SYMBOL(km_query
);
1524 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, __be16 sport
)
1527 struct xfrm_mgr
*km
;
1529 read_lock(&xfrm_km_lock
);
1530 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1531 if (km
->new_mapping
)
1532 err
= km
->new_mapping(x
, ipaddr
, sport
);
1536 read_unlock(&xfrm_km_lock
);
1539 EXPORT_SYMBOL(km_new_mapping
);
1541 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
1547 c
.event
= XFRM_MSG_POLEXPIRE
;
1548 km_policy_notify(pol
, dir
, &c
);
1553 EXPORT_SYMBOL(km_policy_expired
);
1555 int km_migrate(struct xfrm_selector
*sel
, u8 dir
, u8 type
,
1556 struct xfrm_migrate
*m
, int num_migrate
)
1560 struct xfrm_mgr
*km
;
1562 read_lock(&xfrm_km_lock
);
1563 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1565 ret
= km
->migrate(sel
, dir
, type
, m
, num_migrate
);
1570 read_unlock(&xfrm_km_lock
);
1573 EXPORT_SYMBOL(km_migrate
);
1575 int km_report(u8 proto
, struct xfrm_selector
*sel
, xfrm_address_t
*addr
)
1579 struct xfrm_mgr
*km
;
1581 read_lock(&xfrm_km_lock
);
1582 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1584 ret
= km
->report(proto
, sel
, addr
);
1589 read_unlock(&xfrm_km_lock
);
1592 EXPORT_SYMBOL(km_report
);
1594 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1598 struct xfrm_mgr
*km
;
1599 struct xfrm_policy
*pol
= NULL
;
1601 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1604 data
= kmalloc(optlen
, GFP_KERNEL
);
1609 if (copy_from_user(data
, optval
, optlen
))
1613 read_lock(&xfrm_km_lock
);
1614 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1615 pol
= km
->compile_policy(sk
, optname
, data
,
1620 read_unlock(&xfrm_km_lock
);
1623 xfrm_sk_policy_insert(sk
, err
, pol
);
1632 EXPORT_SYMBOL(xfrm_user_policy
);
1634 int xfrm_register_km(struct xfrm_mgr
*km
)
1636 write_lock_bh(&xfrm_km_lock
);
1637 list_add_tail(&km
->list
, &xfrm_km_list
);
1638 write_unlock_bh(&xfrm_km_lock
);
1641 EXPORT_SYMBOL(xfrm_register_km
);
1643 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1645 write_lock_bh(&xfrm_km_lock
);
1646 list_del(&km
->list
);
1647 write_unlock_bh(&xfrm_km_lock
);
1650 EXPORT_SYMBOL(xfrm_unregister_km
);
1652 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1655 if (unlikely(afinfo
== NULL
))
1657 if (unlikely(afinfo
->family
>= NPROTO
))
1658 return -EAFNOSUPPORT
;
1659 write_lock_bh(&xfrm_state_afinfo_lock
);
1660 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1663 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1664 write_unlock_bh(&xfrm_state_afinfo_lock
);
1667 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1669 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1672 if (unlikely(afinfo
== NULL
))
1674 if (unlikely(afinfo
->family
>= NPROTO
))
1675 return -EAFNOSUPPORT
;
1676 write_lock_bh(&xfrm_state_afinfo_lock
);
1677 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1678 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1681 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1683 write_unlock_bh(&xfrm_state_afinfo_lock
);
1686 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1688 struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
1690 struct xfrm_state_afinfo
*afinfo
;
1691 if (unlikely(family
>= NPROTO
))
1693 read_lock(&xfrm_state_afinfo_lock
);
1694 afinfo
= xfrm_state_afinfo
[family
];
1695 if (unlikely(!afinfo
))
1696 read_unlock(&xfrm_state_afinfo_lock
);
1700 void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1702 read_unlock(&xfrm_state_afinfo_lock
);
1705 EXPORT_SYMBOL(xfrm_state_get_afinfo
);
1706 EXPORT_SYMBOL(xfrm_state_put_afinfo
);
1708 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1709 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1712 struct xfrm_state
*t
= x
->tunnel
;
1714 if (atomic_read(&t
->tunnel_users
) == 2)
1715 xfrm_state_delete(t
);
1716 atomic_dec(&t
->tunnel_users
);
1721 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1723 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1727 spin_lock_bh(&x
->lock
);
1728 if (x
->km
.state
== XFRM_STATE_VALID
&&
1729 x
->type
&& x
->type
->get_mtu
)
1730 res
= x
->type
->get_mtu(x
, mtu
);
1732 res
= mtu
- x
->props
.header_len
;
1733 spin_unlock_bh(&x
->lock
);
1737 int xfrm_init_state(struct xfrm_state
*x
)
1739 struct xfrm_state_afinfo
*afinfo
;
1740 int family
= x
->props
.family
;
1743 err
= -EAFNOSUPPORT
;
1744 afinfo
= xfrm_state_get_afinfo(family
);
1749 if (afinfo
->init_flags
)
1750 err
= afinfo
->init_flags(x
);
1752 xfrm_state_put_afinfo(afinfo
);
1757 err
= -EPROTONOSUPPORT
;
1758 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1759 if (x
->type
== NULL
)
1762 err
= x
->type
->init_state(x
);
1766 x
->mode
= xfrm_get_mode(x
->props
.mode
, family
);
1767 if (x
->mode
== NULL
)
1770 x
->km
.state
= XFRM_STATE_VALID
;
1776 EXPORT_SYMBOL(xfrm_init_state
);
1778 void __init
xfrm_state_init(void)
1782 sz
= sizeof(struct hlist_head
) * 8;
1784 xfrm_state_bydst
= xfrm_hash_alloc(sz
);
1785 xfrm_state_bysrc
= xfrm_hash_alloc(sz
);
1786 xfrm_state_byspi
= xfrm_hash_alloc(sz
);
1787 if (!xfrm_state_bydst
|| !xfrm_state_bysrc
|| !xfrm_state_byspi
)
1788 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1789 xfrm_state_hmask
= ((sz
/ sizeof(struct hlist_head
)) - 1);
1791 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
);