6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
23 #include <linux/audit.h>
25 #include "xfrm_hash.h"
28 EXPORT_SYMBOL(xfrm_nl
);
30 u32 sysctl_xfrm_aevent_etime
= XFRM_AE_ETIME
;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
33 u32 sysctl_xfrm_aevent_rseqth
= XFRM_AE_SEQT_SIZE
;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
36 /* Each xfrm_state may be linked to two tables:
38 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40 destination/tunnel endpoint. (output)
43 static DEFINE_SPINLOCK(xfrm_state_lock
);
45 /* Hash table to find appropriate SA towards given target (endpoint
46 * of tunnel or destination of transport mode) allowed by selector.
48 * Main use is finding SA after policy selected tunnel or transport mode.
49 * Also, it can be used by ah/esp icmp error handler to find offending SA.
51 static struct hlist_head
*xfrm_state_bydst __read_mostly
;
52 static struct hlist_head
*xfrm_state_bysrc __read_mostly
;
53 static struct hlist_head
*xfrm_state_byspi __read_mostly
;
54 static unsigned int xfrm_state_hmask __read_mostly
;
55 static unsigned int xfrm_state_hashmax __read_mostly
= 1 * 1024 * 1024;
56 static unsigned int xfrm_state_num
;
57 static unsigned int xfrm_state_genid
;
59 static inline unsigned int xfrm_dst_hash(xfrm_address_t
*daddr
,
60 xfrm_address_t
*saddr
,
62 unsigned short family
)
64 return __xfrm_dst_hash(daddr
, saddr
, reqid
, family
, xfrm_state_hmask
);
67 static inline unsigned int xfrm_src_hash(xfrm_address_t
*daddr
,
68 xfrm_address_t
*saddr
,
69 unsigned short family
)
71 return __xfrm_src_hash(daddr
, saddr
, family
, xfrm_state_hmask
);
74 static inline unsigned int
75 xfrm_spi_hash(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
, unsigned short family
)
77 return __xfrm_spi_hash(daddr
, spi
, proto
, family
, xfrm_state_hmask
);
80 static void xfrm_hash_transfer(struct hlist_head
*list
,
81 struct hlist_head
*ndsttable
,
82 struct hlist_head
*nsrctable
,
83 struct hlist_head
*nspitable
,
84 unsigned int nhashmask
)
86 struct hlist_node
*entry
, *tmp
;
89 hlist_for_each_entry_safe(x
, entry
, tmp
, list
, bydst
) {
92 h
= __xfrm_dst_hash(&x
->id
.daddr
, &x
->props
.saddr
,
93 x
->props
.reqid
, x
->props
.family
,
95 hlist_add_head(&x
->bydst
, ndsttable
+h
);
97 h
= __xfrm_src_hash(&x
->id
.daddr
, &x
->props
.saddr
,
100 hlist_add_head(&x
->bysrc
, nsrctable
+h
);
103 h
= __xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
,
104 x
->id
.proto
, x
->props
.family
,
106 hlist_add_head(&x
->byspi
, nspitable
+h
);
111 static unsigned long xfrm_hash_new_size(void)
113 return ((xfrm_state_hmask
+ 1) << 1) *
114 sizeof(struct hlist_head
);
117 static DEFINE_MUTEX(hash_resize_mutex
);
119 static void xfrm_hash_resize(struct work_struct
*__unused
)
121 struct hlist_head
*ndst
, *nsrc
, *nspi
, *odst
, *osrc
, *ospi
;
122 unsigned long nsize
, osize
;
123 unsigned int nhashmask
, ohashmask
;
126 mutex_lock(&hash_resize_mutex
);
128 nsize
= xfrm_hash_new_size();
129 ndst
= xfrm_hash_alloc(nsize
);
132 nsrc
= xfrm_hash_alloc(nsize
);
134 xfrm_hash_free(ndst
, nsize
);
137 nspi
= xfrm_hash_alloc(nsize
);
139 xfrm_hash_free(ndst
, nsize
);
140 xfrm_hash_free(nsrc
, nsize
);
144 spin_lock_bh(&xfrm_state_lock
);
146 nhashmask
= (nsize
/ sizeof(struct hlist_head
)) - 1U;
147 for (i
= xfrm_state_hmask
; i
>= 0; i
--)
148 xfrm_hash_transfer(xfrm_state_bydst
+i
, ndst
, nsrc
, nspi
,
151 odst
= xfrm_state_bydst
;
152 osrc
= xfrm_state_bysrc
;
153 ospi
= xfrm_state_byspi
;
154 ohashmask
= xfrm_state_hmask
;
156 xfrm_state_bydst
= ndst
;
157 xfrm_state_bysrc
= nsrc
;
158 xfrm_state_byspi
= nspi
;
159 xfrm_state_hmask
= nhashmask
;
161 spin_unlock_bh(&xfrm_state_lock
);
163 osize
= (ohashmask
+ 1) * sizeof(struct hlist_head
);
164 xfrm_hash_free(odst
, osize
);
165 xfrm_hash_free(osrc
, osize
);
166 xfrm_hash_free(ospi
, osize
);
169 mutex_unlock(&hash_resize_mutex
);
172 static DECLARE_WORK(xfrm_hash_work
, xfrm_hash_resize
);
174 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
175 EXPORT_SYMBOL(km_waitq
);
177 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
178 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
180 static struct work_struct xfrm_state_gc_work
;
181 static HLIST_HEAD(xfrm_state_gc_list
);
182 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
184 int __xfrm_state_delete(struct xfrm_state
*x
);
186 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
);
187 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
189 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
190 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
192 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
194 del_timer_sync(&x
->timer
);
195 del_timer_sync(&x
->rtimer
);
202 xfrm_put_mode(x
->mode
);
204 x
->type
->destructor(x
);
205 xfrm_put_type(x
->type
);
207 security_xfrm_state_free(x
);
211 static void xfrm_state_gc_task(struct work_struct
*data
)
213 struct xfrm_state
*x
;
214 struct hlist_node
*entry
, *tmp
;
215 struct hlist_head gc_list
;
217 spin_lock_bh(&xfrm_state_gc_lock
);
218 gc_list
.first
= xfrm_state_gc_list
.first
;
219 INIT_HLIST_HEAD(&xfrm_state_gc_list
);
220 spin_unlock_bh(&xfrm_state_gc_lock
);
222 hlist_for_each_entry_safe(x
, entry
, tmp
, &gc_list
, bydst
)
223 xfrm_state_gc_destroy(x
);
228 static inline unsigned long make_jiffies(long secs
)
230 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
231 return MAX_SCHEDULE_TIMEOUT
-1;
236 static void xfrm_timer_handler(unsigned long data
)
238 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
239 unsigned long now
= (unsigned long)xtime
.tv_sec
;
240 long next
= LONG_MAX
;
245 if (x
->km
.state
== XFRM_STATE_DEAD
)
247 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
249 if (x
->lft
.hard_add_expires_seconds
) {
250 long tmo
= x
->lft
.hard_add_expires_seconds
+
251 x
->curlft
.add_time
- now
;
257 if (x
->lft
.hard_use_expires_seconds
) {
258 long tmo
= x
->lft
.hard_use_expires_seconds
+
259 (x
->curlft
.use_time
? : now
) - now
;
267 if (x
->lft
.soft_add_expires_seconds
) {
268 long tmo
= x
->lft
.soft_add_expires_seconds
+
269 x
->curlft
.add_time
- now
;
275 if (x
->lft
.soft_use_expires_seconds
) {
276 long tmo
= x
->lft
.soft_use_expires_seconds
+
277 (x
->curlft
.use_time
? : now
) - now
;
286 km_state_expired(x
, 0, 0);
288 if (next
!= LONG_MAX
)
289 mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
));
294 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
295 x
->km
.state
= XFRM_STATE_EXPIRED
;
301 err
= __xfrm_state_delete(x
);
302 if (!err
&& x
->id
.spi
)
303 km_state_expired(x
, 1, 0);
305 xfrm_audit_log(audit_get_loginuid(current
->audit_context
), 0,
306 AUDIT_MAC_IPSEC_DELSA
, err
? 0 : 1, NULL
, x
);
309 spin_unlock(&x
->lock
);
312 static void xfrm_replay_timer_handler(unsigned long data
);
314 struct xfrm_state
*xfrm_state_alloc(void)
316 struct xfrm_state
*x
;
318 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
321 atomic_set(&x
->refcnt
, 1);
322 atomic_set(&x
->tunnel_users
, 0);
323 INIT_HLIST_NODE(&x
->bydst
);
324 INIT_HLIST_NODE(&x
->bysrc
);
325 INIT_HLIST_NODE(&x
->byspi
);
326 init_timer(&x
->timer
);
327 x
->timer
.function
= xfrm_timer_handler
;
328 x
->timer
.data
= (unsigned long)x
;
329 init_timer(&x
->rtimer
);
330 x
->rtimer
.function
= xfrm_replay_timer_handler
;
331 x
->rtimer
.data
= (unsigned long)x
;
332 x
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
333 x
->lft
.soft_byte_limit
= XFRM_INF
;
334 x
->lft
.soft_packet_limit
= XFRM_INF
;
335 x
->lft
.hard_byte_limit
= XFRM_INF
;
336 x
->lft
.hard_packet_limit
= XFRM_INF
;
337 x
->replay_maxage
= 0;
338 x
->replay_maxdiff
= 0;
339 spin_lock_init(&x
->lock
);
343 EXPORT_SYMBOL(xfrm_state_alloc
);
345 void __xfrm_state_destroy(struct xfrm_state
*x
)
347 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
349 spin_lock_bh(&xfrm_state_gc_lock
);
350 hlist_add_head(&x
->bydst
, &xfrm_state_gc_list
);
351 spin_unlock_bh(&xfrm_state_gc_lock
);
352 schedule_work(&xfrm_state_gc_work
);
354 EXPORT_SYMBOL(__xfrm_state_destroy
);
356 int __xfrm_state_delete(struct xfrm_state
*x
)
360 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
361 x
->km
.state
= XFRM_STATE_DEAD
;
362 spin_lock(&xfrm_state_lock
);
363 hlist_del(&x
->bydst
);
364 hlist_del(&x
->bysrc
);
366 hlist_del(&x
->byspi
);
368 spin_unlock(&xfrm_state_lock
);
370 /* All xfrm_state objects are created by xfrm_state_alloc.
371 * The xfrm_state_alloc call gives a reference, and that
372 * is what we are dropping here.
380 EXPORT_SYMBOL(__xfrm_state_delete
);
382 int xfrm_state_delete(struct xfrm_state
*x
)
386 spin_lock_bh(&x
->lock
);
387 err
= __xfrm_state_delete(x
);
388 spin_unlock_bh(&x
->lock
);
392 EXPORT_SYMBOL(xfrm_state_delete
);
394 void xfrm_state_flush(u8 proto
, struct xfrm_audit
*audit_info
)
399 spin_lock_bh(&xfrm_state_lock
);
400 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
401 struct hlist_node
*entry
;
402 struct xfrm_state
*x
;
404 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
405 if (!xfrm_state_kern(x
) &&
406 xfrm_id_proto_match(x
->id
.proto
, proto
)) {
408 spin_unlock_bh(&xfrm_state_lock
);
410 err
= xfrm_state_delete(x
);
411 xfrm_audit_log(audit_info
->loginuid
,
413 AUDIT_MAC_IPSEC_DELSA
,
414 err
? 0 : 1, NULL
, x
);
417 spin_lock_bh(&xfrm_state_lock
);
422 spin_unlock_bh(&xfrm_state_lock
);
425 EXPORT_SYMBOL(xfrm_state_flush
);
428 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
429 struct xfrm_tmpl
*tmpl
,
430 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
431 unsigned short family
)
433 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
436 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
437 xfrm_state_put_afinfo(afinfo
);
441 static struct xfrm_state
*__xfrm_state_lookup(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
, unsigned short family
)
443 unsigned int h
= xfrm_spi_hash(daddr
, spi
, proto
, family
);
444 struct xfrm_state
*x
;
445 struct hlist_node
*entry
;
447 hlist_for_each_entry(x
, entry
, xfrm_state_byspi
+h
, byspi
) {
448 if (x
->props
.family
!= family
||
450 x
->id
.proto
!= proto
)
455 if (x
->id
.daddr
.a4
!= daddr
->a4
)
459 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
473 static struct xfrm_state
*__xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, u8 proto
, unsigned short family
)
475 unsigned int h
= xfrm_src_hash(daddr
, saddr
, family
);
476 struct xfrm_state
*x
;
477 struct hlist_node
*entry
;
479 hlist_for_each_entry(x
, entry
, xfrm_state_bysrc
+h
, bysrc
) {
480 if (x
->props
.family
!= family
||
481 x
->id
.proto
!= proto
)
486 if (x
->id
.daddr
.a4
!= daddr
->a4
||
487 x
->props
.saddr
.a4
!= saddr
->a4
)
491 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
494 !ipv6_addr_equal((struct in6_addr
*)saddr
,
508 static inline struct xfrm_state
*
509 __xfrm_state_locate(struct xfrm_state
*x
, int use_spi
, int family
)
512 return __xfrm_state_lookup(&x
->id
.daddr
, x
->id
.spi
,
513 x
->id
.proto
, family
);
515 return __xfrm_state_lookup_byaddr(&x
->id
.daddr
,
517 x
->id
.proto
, family
);
520 static void xfrm_hash_grow_check(int have_hash_collision
)
522 if (have_hash_collision
&&
523 (xfrm_state_hmask
+ 1) < xfrm_state_hashmax
&&
524 xfrm_state_num
> xfrm_state_hmask
)
525 schedule_work(&xfrm_hash_work
);
529 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
530 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
531 struct xfrm_policy
*pol
, int *err
,
532 unsigned short family
)
534 unsigned int h
= xfrm_dst_hash(daddr
, saddr
, tmpl
->reqid
, family
);
535 struct hlist_node
*entry
;
536 struct xfrm_state
*x
, *x0
;
537 int acquire_in_progress
= 0;
539 struct xfrm_state
*best
= NULL
;
541 spin_lock_bh(&xfrm_state_lock
);
542 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
543 if (x
->props
.family
== family
&&
544 x
->props
.reqid
== tmpl
->reqid
&&
545 !(x
->props
.flags
& XFRM_STATE_WILDRECV
) &&
546 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
547 tmpl
->mode
== x
->props
.mode
&&
548 tmpl
->id
.proto
== x
->id
.proto
&&
549 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
551 1. There is a valid state with matching selector.
553 2. Valid state with inappropriate selector. Skip.
555 Entering area of "sysdeps".
557 3. If state is not valid, selector is temporary,
558 it selects only session which triggered
559 previous resolution. Key manager will do
560 something to install a state with proper
563 if (x
->km
.state
== XFRM_STATE_VALID
) {
564 if (!xfrm_selector_match(&x
->sel
, fl
, family
) ||
565 !security_xfrm_state_pol_flow_match(x
, pol
, fl
))
568 best
->km
.dying
> x
->km
.dying
||
569 (best
->km
.dying
== x
->km
.dying
&&
570 best
->curlft
.add_time
< x
->curlft
.add_time
))
572 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
573 acquire_in_progress
= 1;
574 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
575 x
->km
.state
== XFRM_STATE_EXPIRED
) {
576 if (xfrm_selector_match(&x
->sel
, fl
, family
) &&
577 security_xfrm_state_pol_flow_match(x
, pol
, fl
))
584 if (!x
&& !error
&& !acquire_in_progress
) {
586 (x0
= __xfrm_state_lookup(daddr
, tmpl
->id
.spi
,
587 tmpl
->id
.proto
, family
)) != NULL
) {
592 x
= xfrm_state_alloc();
597 /* Initialize temporary selector matching only
598 * to current session. */
599 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
601 error
= security_xfrm_state_alloc_acquire(x
, pol
->security
, fl
->secid
);
603 x
->km
.state
= XFRM_STATE_DEAD
;
609 if (km_query(x
, tmpl
, pol
) == 0) {
610 x
->km
.state
= XFRM_STATE_ACQ
;
611 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
612 h
= xfrm_src_hash(daddr
, saddr
, family
);
613 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
615 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
616 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
618 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
619 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
620 add_timer(&x
->timer
);
622 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
624 x
->km
.state
= XFRM_STATE_DEAD
;
634 *err
= acquire_in_progress
? -EAGAIN
: error
;
635 spin_unlock_bh(&xfrm_state_lock
);
639 static void __xfrm_state_insert(struct xfrm_state
*x
)
643 x
->genid
= ++xfrm_state_genid
;
645 h
= xfrm_dst_hash(&x
->id
.daddr
, &x
->props
.saddr
,
646 x
->props
.reqid
, x
->props
.family
);
647 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
649 h
= xfrm_src_hash(&x
->id
.daddr
, &x
->props
.saddr
, x
->props
.family
);
650 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
653 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
,
656 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
659 mod_timer(&x
->timer
, jiffies
+ HZ
);
660 if (x
->replay_maxage
)
661 mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
);
667 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
670 /* xfrm_state_lock is held */
671 static void __xfrm_state_bump_genids(struct xfrm_state
*xnew
)
673 unsigned short family
= xnew
->props
.family
;
674 u32 reqid
= xnew
->props
.reqid
;
675 struct xfrm_state
*x
;
676 struct hlist_node
*entry
;
679 h
= xfrm_dst_hash(&xnew
->id
.daddr
, &xnew
->props
.saddr
, reqid
, family
);
680 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
681 if (x
->props
.family
== family
&&
682 x
->props
.reqid
== reqid
&&
683 !xfrm_addr_cmp(&x
->id
.daddr
, &xnew
->id
.daddr
, family
) &&
684 !xfrm_addr_cmp(&x
->props
.saddr
, &xnew
->props
.saddr
, family
))
685 x
->genid
= xfrm_state_genid
;
689 void xfrm_state_insert(struct xfrm_state
*x
)
691 spin_lock_bh(&xfrm_state_lock
);
692 __xfrm_state_bump_genids(x
);
693 __xfrm_state_insert(x
);
694 spin_unlock_bh(&xfrm_state_lock
);
696 EXPORT_SYMBOL(xfrm_state_insert
);
698 /* xfrm_state_lock is held */
699 static struct xfrm_state
*__find_acq_core(unsigned short family
, u8 mode
, u32 reqid
, u8 proto
, xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, int create
)
701 unsigned int h
= xfrm_dst_hash(daddr
, saddr
, reqid
, family
);
702 struct hlist_node
*entry
;
703 struct xfrm_state
*x
;
705 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
706 if (x
->props
.reqid
!= reqid
||
707 x
->props
.mode
!= mode
||
708 x
->props
.family
!= family
||
709 x
->km
.state
!= XFRM_STATE_ACQ
||
711 x
->id
.proto
!= proto
)
716 if (x
->id
.daddr
.a4
!= daddr
->a4
||
717 x
->props
.saddr
.a4
!= saddr
->a4
)
721 if (!ipv6_addr_equal((struct in6_addr
*)x
->id
.daddr
.a6
,
722 (struct in6_addr
*)daddr
) ||
723 !ipv6_addr_equal((struct in6_addr
*)
725 (struct in6_addr
*)saddr
))
737 x
= xfrm_state_alloc();
741 x
->sel
.daddr
.a4
= daddr
->a4
;
742 x
->sel
.saddr
.a4
= saddr
->a4
;
743 x
->sel
.prefixlen_d
= 32;
744 x
->sel
.prefixlen_s
= 32;
745 x
->props
.saddr
.a4
= saddr
->a4
;
746 x
->id
.daddr
.a4
= daddr
->a4
;
750 ipv6_addr_copy((struct in6_addr
*)x
->sel
.daddr
.a6
,
751 (struct in6_addr
*)daddr
);
752 ipv6_addr_copy((struct in6_addr
*)x
->sel
.saddr
.a6
,
753 (struct in6_addr
*)saddr
);
754 x
->sel
.prefixlen_d
= 128;
755 x
->sel
.prefixlen_s
= 128;
756 ipv6_addr_copy((struct in6_addr
*)x
->props
.saddr
.a6
,
757 (struct in6_addr
*)saddr
);
758 ipv6_addr_copy((struct in6_addr
*)x
->id
.daddr
.a6
,
759 (struct in6_addr
*)daddr
);
763 x
->km
.state
= XFRM_STATE_ACQ
;
765 x
->props
.family
= family
;
766 x
->props
.mode
= mode
;
767 x
->props
.reqid
= reqid
;
768 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
770 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
771 add_timer(&x
->timer
);
772 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
773 h
= xfrm_src_hash(daddr
, saddr
, family
);
774 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
779 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
785 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
787 int xfrm_state_add(struct xfrm_state
*x
)
789 struct xfrm_state
*x1
;
792 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
794 family
= x
->props
.family
;
796 spin_lock_bh(&xfrm_state_lock
);
798 x1
= __xfrm_state_locate(x
, use_spi
, family
);
806 if (use_spi
&& x
->km
.seq
) {
807 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
808 if (x1
&& ((x1
->id
.proto
!= x
->id
.proto
) ||
809 xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
))) {
816 x1
= __find_acq_core(family
, x
->props
.mode
, x
->props
.reqid
,
818 &x
->id
.daddr
, &x
->props
.saddr
, 0);
820 __xfrm_state_bump_genids(x
);
821 __xfrm_state_insert(x
);
825 spin_unlock_bh(&xfrm_state_lock
);
828 xfrm_state_delete(x1
);
834 EXPORT_SYMBOL(xfrm_state_add
);
836 int xfrm_state_update(struct xfrm_state
*x
)
838 struct xfrm_state
*x1
;
840 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
842 spin_lock_bh(&xfrm_state_lock
);
843 x1
= __xfrm_state_locate(x
, use_spi
, x
->props
.family
);
849 if (xfrm_state_kern(x1
)) {
855 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
856 __xfrm_state_insert(x
);
862 spin_unlock_bh(&xfrm_state_lock
);
868 xfrm_state_delete(x1
);
874 spin_lock_bh(&x1
->lock
);
875 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
876 if (x
->encap
&& x1
->encap
)
877 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
878 if (x
->coaddr
&& x1
->coaddr
) {
879 memcpy(x1
->coaddr
, x
->coaddr
, sizeof(*x1
->coaddr
));
881 if (!use_spi
&& memcmp(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
)))
882 memcpy(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
));
883 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
886 mod_timer(&x1
->timer
, jiffies
+ HZ
);
887 if (x1
->curlft
.use_time
)
888 xfrm_state_check_expire(x1
);
892 spin_unlock_bh(&x1
->lock
);
898 EXPORT_SYMBOL(xfrm_state_update
);
900 int xfrm_state_check_expire(struct xfrm_state
*x
)
902 if (!x
->curlft
.use_time
)
903 x
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
905 if (x
->km
.state
!= XFRM_STATE_VALID
)
908 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
909 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
910 x
->km
.state
= XFRM_STATE_EXPIRED
;
911 mod_timer(&x
->timer
, jiffies
);
916 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
917 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
919 km_state_expired(x
, 0, 0);
923 EXPORT_SYMBOL(xfrm_state_check_expire
);
925 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
927 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
931 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
933 /* Check tail too... */
937 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
939 int err
= xfrm_state_check_expire(x
);
942 err
= xfrm_state_check_space(x
, skb
);
946 EXPORT_SYMBOL(xfrm_state_check
);
949 xfrm_state_lookup(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
,
950 unsigned short family
)
952 struct xfrm_state
*x
;
954 spin_lock_bh(&xfrm_state_lock
);
955 x
= __xfrm_state_lookup(daddr
, spi
, proto
, family
);
956 spin_unlock_bh(&xfrm_state_lock
);
959 EXPORT_SYMBOL(xfrm_state_lookup
);
962 xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
963 u8 proto
, unsigned short family
)
965 struct xfrm_state
*x
;
967 spin_lock_bh(&xfrm_state_lock
);
968 x
= __xfrm_state_lookup_byaddr(daddr
, saddr
, proto
, family
);
969 spin_unlock_bh(&xfrm_state_lock
);
972 EXPORT_SYMBOL(xfrm_state_lookup_byaddr
);
975 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
976 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
977 int create
, unsigned short family
)
979 struct xfrm_state
*x
;
981 spin_lock_bh(&xfrm_state_lock
);
982 x
= __find_acq_core(family
, mode
, reqid
, proto
, daddr
, saddr
, create
);
983 spin_unlock_bh(&xfrm_state_lock
);
987 EXPORT_SYMBOL(xfrm_find_acq
);
989 #ifdef CONFIG_XFRM_SUB_POLICY
991 xfrm_tmpl_sort(struct xfrm_tmpl
**dst
, struct xfrm_tmpl
**src
, int n
,
992 unsigned short family
)
995 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
997 return -EAFNOSUPPORT
;
999 spin_lock_bh(&xfrm_state_lock
);
1000 if (afinfo
->tmpl_sort
)
1001 err
= afinfo
->tmpl_sort(dst
, src
, n
);
1002 spin_unlock_bh(&xfrm_state_lock
);
1003 xfrm_state_put_afinfo(afinfo
);
1006 EXPORT_SYMBOL(xfrm_tmpl_sort
);
1009 xfrm_state_sort(struct xfrm_state
**dst
, struct xfrm_state
**src
, int n
,
1010 unsigned short family
)
1013 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1015 return -EAFNOSUPPORT
;
1017 spin_lock_bh(&xfrm_state_lock
);
1018 if (afinfo
->state_sort
)
1019 err
= afinfo
->state_sort(dst
, src
, n
);
1020 spin_unlock_bh(&xfrm_state_lock
);
1021 xfrm_state_put_afinfo(afinfo
);
1024 EXPORT_SYMBOL(xfrm_state_sort
);
1027 /* Silly enough, but I'm lazy to build resolution list */
1029 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
1033 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1034 struct hlist_node
*entry
;
1035 struct xfrm_state
*x
;
1037 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1038 if (x
->km
.seq
== seq
&&
1039 x
->km
.state
== XFRM_STATE_ACQ
) {
1048 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
1050 struct xfrm_state
*x
;
1052 spin_lock_bh(&xfrm_state_lock
);
1053 x
= __xfrm_find_acq_byseq(seq
);
1054 spin_unlock_bh(&xfrm_state_lock
);
1057 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
1059 u32
xfrm_get_acqseq(void)
1063 static DEFINE_SPINLOCK(acqseq_lock
);
1065 spin_lock_bh(&acqseq_lock
);
1066 res
= (++acqseq
? : ++acqseq
);
1067 spin_unlock_bh(&acqseq_lock
);
1070 EXPORT_SYMBOL(xfrm_get_acqseq
);
1073 xfrm_alloc_spi(struct xfrm_state
*x
, __be32 minspi
, __be32 maxspi
)
1076 struct xfrm_state
*x0
;
1081 if (minspi
== maxspi
) {
1082 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
1090 u32 low
= ntohl(minspi
);
1091 u32 high
= ntohl(maxspi
);
1092 for (h
=0; h
<high
-low
+1; h
++) {
1093 spi
= low
+ net_random()%(high
-low
+1);
1094 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
1096 x
->id
.spi
= htonl(spi
);
1103 spin_lock_bh(&xfrm_state_lock
);
1104 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
1105 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
1106 spin_unlock_bh(&xfrm_state_lock
);
1110 EXPORT_SYMBOL(xfrm_alloc_spi
);
1112 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
1116 struct xfrm_state
*x
, *last
= NULL
;
1117 struct hlist_node
*entry
;
1121 spin_lock_bh(&xfrm_state_lock
);
1122 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1123 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1124 if (!xfrm_id_proto_match(x
->id
.proto
, proto
))
1127 err
= func(last
, count
, data
);
1139 err
= func(last
, 0, data
);
1141 spin_unlock_bh(&xfrm_state_lock
);
1144 EXPORT_SYMBOL(xfrm_state_walk
);
1147 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
1150 /* we send notify messages in case
1151 * 1. we updated on of the sequence numbers, and the seqno difference
1152 * is at least x->replay_maxdiff, in this case we also update the
1153 * timeout of our timer function
1154 * 2. if x->replay_maxage has elapsed since last update,
1155 * and there were changes
1157 * The state structure must be locked!
1161 case XFRM_REPLAY_UPDATE
:
1162 if (x
->replay_maxdiff
&&
1163 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
1164 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
1165 if (x
->xflags
& XFRM_TIME_DEFER
)
1166 event
= XFRM_REPLAY_TIMEOUT
;
1173 case XFRM_REPLAY_TIMEOUT
:
1174 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
1175 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
1176 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
1177 x
->xflags
|= XFRM_TIME_DEFER
;
1184 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
1185 c
.event
= XFRM_MSG_NEWAE
;
1186 c
.data
.aevent
= event
;
1187 km_state_notify(x
, &c
);
1189 if (x
->replay_maxage
&&
1190 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
1191 x
->xflags
&= ~XFRM_TIME_DEFER
;
1193 EXPORT_SYMBOL(xfrm_replay_notify
);
1195 static void xfrm_replay_timer_handler(unsigned long data
)
1197 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
1199 spin_lock(&x
->lock
);
1201 if (x
->km
.state
== XFRM_STATE_VALID
) {
1202 if (xfrm_aevent_is_on())
1203 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
1205 x
->xflags
|= XFRM_TIME_DEFER
;
1208 spin_unlock(&x
->lock
);
1211 int xfrm_replay_check(struct xfrm_state
*x
, __be32 net_seq
)
1214 u32 seq
= ntohl(net_seq
);
1216 if (unlikely(seq
== 0))
1219 if (likely(seq
> x
->replay
.seq
))
1222 diff
= x
->replay
.seq
- seq
;
1223 if (diff
>= x
->props
.replay_window
) {
1224 x
->stats
.replay_window
++;
1228 if (x
->replay
.bitmap
& (1U << diff
)) {
1234 EXPORT_SYMBOL(xfrm_replay_check
);
1236 void xfrm_replay_advance(struct xfrm_state
*x
, __be32 net_seq
)
1239 u32 seq
= ntohl(net_seq
);
1241 if (seq
> x
->replay
.seq
) {
1242 diff
= seq
- x
->replay
.seq
;
1243 if (diff
< x
->props
.replay_window
)
1244 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
1246 x
->replay
.bitmap
= 1;
1247 x
->replay
.seq
= seq
;
1249 diff
= x
->replay
.seq
- seq
;
1250 x
->replay
.bitmap
|= (1U << diff
);
1253 if (xfrm_aevent_is_on())
1254 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
1256 EXPORT_SYMBOL(xfrm_replay_advance
);
1258 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
1259 static DEFINE_RWLOCK(xfrm_km_lock
);
1261 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
1263 struct xfrm_mgr
*km
;
1265 read_lock(&xfrm_km_lock
);
1266 list_for_each_entry(km
, &xfrm_km_list
, list
)
1267 if (km
->notify_policy
)
1268 km
->notify_policy(xp
, dir
, c
);
1269 read_unlock(&xfrm_km_lock
);
1272 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
1274 struct xfrm_mgr
*km
;
1275 read_lock(&xfrm_km_lock
);
1276 list_for_each_entry(km
, &xfrm_km_list
, list
)
1279 read_unlock(&xfrm_km_lock
);
1282 EXPORT_SYMBOL(km_policy_notify
);
1283 EXPORT_SYMBOL(km_state_notify
);
1285 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
1291 c
.event
= XFRM_MSG_EXPIRE
;
1292 km_state_notify(x
, &c
);
1298 EXPORT_SYMBOL(km_state_expired
);
1300 * We send to all registered managers regardless of failure
1301 * We are happy with one success
1303 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
1305 int err
= -EINVAL
, acqret
;
1306 struct xfrm_mgr
*km
;
1308 read_lock(&xfrm_km_lock
);
1309 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1310 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
1314 read_unlock(&xfrm_km_lock
);
1317 EXPORT_SYMBOL(km_query
);
1319 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, __be16 sport
)
1322 struct xfrm_mgr
*km
;
1324 read_lock(&xfrm_km_lock
);
1325 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1326 if (km
->new_mapping
)
1327 err
= km
->new_mapping(x
, ipaddr
, sport
);
1331 read_unlock(&xfrm_km_lock
);
1334 EXPORT_SYMBOL(km_new_mapping
);
1336 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
1342 c
.event
= XFRM_MSG_POLEXPIRE
;
1343 km_policy_notify(pol
, dir
, &c
);
1348 EXPORT_SYMBOL(km_policy_expired
);
1350 int km_report(u8 proto
, struct xfrm_selector
*sel
, xfrm_address_t
*addr
)
1354 struct xfrm_mgr
*km
;
1356 read_lock(&xfrm_km_lock
);
1357 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1359 ret
= km
->report(proto
, sel
, addr
);
1364 read_unlock(&xfrm_km_lock
);
1367 EXPORT_SYMBOL(km_report
);
1369 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1373 struct xfrm_mgr
*km
;
1374 struct xfrm_policy
*pol
= NULL
;
1376 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1379 data
= kmalloc(optlen
, GFP_KERNEL
);
1384 if (copy_from_user(data
, optval
, optlen
))
1388 read_lock(&xfrm_km_lock
);
1389 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1390 pol
= km
->compile_policy(sk
, optname
, data
,
1395 read_unlock(&xfrm_km_lock
);
1398 xfrm_sk_policy_insert(sk
, err
, pol
);
1407 EXPORT_SYMBOL(xfrm_user_policy
);
1409 int xfrm_register_km(struct xfrm_mgr
*km
)
1411 write_lock_bh(&xfrm_km_lock
);
1412 list_add_tail(&km
->list
, &xfrm_km_list
);
1413 write_unlock_bh(&xfrm_km_lock
);
1416 EXPORT_SYMBOL(xfrm_register_km
);
1418 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1420 write_lock_bh(&xfrm_km_lock
);
1421 list_del(&km
->list
);
1422 write_unlock_bh(&xfrm_km_lock
);
1425 EXPORT_SYMBOL(xfrm_unregister_km
);
1427 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1430 if (unlikely(afinfo
== NULL
))
1432 if (unlikely(afinfo
->family
>= NPROTO
))
1433 return -EAFNOSUPPORT
;
1434 write_lock_bh(&xfrm_state_afinfo_lock
);
1435 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1438 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1439 write_unlock_bh(&xfrm_state_afinfo_lock
);
1442 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1444 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1447 if (unlikely(afinfo
== NULL
))
1449 if (unlikely(afinfo
->family
>= NPROTO
))
1450 return -EAFNOSUPPORT
;
1451 write_lock_bh(&xfrm_state_afinfo_lock
);
1452 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1453 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1456 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1458 write_unlock_bh(&xfrm_state_afinfo_lock
);
1461 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1463 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
1465 struct xfrm_state_afinfo
*afinfo
;
1466 if (unlikely(family
>= NPROTO
))
1468 read_lock(&xfrm_state_afinfo_lock
);
1469 afinfo
= xfrm_state_afinfo
[family
];
1470 if (unlikely(!afinfo
))
1471 read_unlock(&xfrm_state_afinfo_lock
);
1475 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1477 read_unlock(&xfrm_state_afinfo_lock
);
1480 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1481 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1484 struct xfrm_state
*t
= x
->tunnel
;
1486 if (atomic_read(&t
->tunnel_users
) == 2)
1487 xfrm_state_delete(t
);
1488 atomic_dec(&t
->tunnel_users
);
1493 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1496 * This function is NOT optimal. For example, with ESP it will give an
1497 * MTU that's usually two bytes short of being optimal. However, it will
1498 * usually give an answer that's a multiple of 4 provided the input is
1499 * also a multiple of 4.
1501 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1505 res
-= x
->props
.header_len
;
1513 spin_lock_bh(&x
->lock
);
1514 if (x
->km
.state
== XFRM_STATE_VALID
&&
1515 x
->type
&& x
->type
->get_max_size
)
1516 m
= x
->type
->get_max_size(x
, m
);
1518 m
+= x
->props
.header_len
;
1519 spin_unlock_bh(&x
->lock
);
1529 int xfrm_init_state(struct xfrm_state
*x
)
1531 struct xfrm_state_afinfo
*afinfo
;
1532 int family
= x
->props
.family
;
1535 err
= -EAFNOSUPPORT
;
1536 afinfo
= xfrm_state_get_afinfo(family
);
1541 if (afinfo
->init_flags
)
1542 err
= afinfo
->init_flags(x
);
1544 xfrm_state_put_afinfo(afinfo
);
1549 err
= -EPROTONOSUPPORT
;
1550 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1551 if (x
->type
== NULL
)
1554 err
= x
->type
->init_state(x
);
1558 x
->mode
= xfrm_get_mode(x
->props
.mode
, family
);
1559 if (x
->mode
== NULL
)
1562 x
->km
.state
= XFRM_STATE_VALID
;
1568 EXPORT_SYMBOL(xfrm_init_state
);
1570 void __init
xfrm_state_init(void)
1574 sz
= sizeof(struct hlist_head
) * 8;
1576 xfrm_state_bydst
= xfrm_hash_alloc(sz
);
1577 xfrm_state_bysrc
= xfrm_hash_alloc(sz
);
1578 xfrm_state_byspi
= xfrm_hash_alloc(sz
);
1579 if (!xfrm_state_bydst
|| !xfrm_state_bysrc
|| !xfrm_state_byspi
)
1580 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1581 xfrm_state_hmask
= ((sz
/ sizeof(struct hlist_head
)) - 1);
1583 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
);