2 * xfrm algorithm interface
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/pfkeyv2.h>
16 #include <linux/crypto.h>
18 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
21 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
24 #include <asm/scatterlist.h>
27 * Algorithms supported by IPsec. These entries contain properties which
28 * are used in key negotiation and xfrm processing, and are used to verify
29 * that instantiated crypto transforms have correct parameters for IPsec
32 static struct xfrm_algo_desc aalg_list
[] = {
34 .name
= "digest_null",
44 .sadb_alg_id
= SADB_X_AALG_NULL
,
46 .sadb_alg_minbits
= 0,
61 .sadb_alg_id
= SADB_AALG_MD5HMAC
,
63 .sadb_alg_minbits
= 128,
64 .sadb_alg_maxbits
= 128
78 .sadb_alg_id
= SADB_AALG_SHA1HMAC
,
80 .sadb_alg_minbits
= 160,
81 .sadb_alg_maxbits
= 160
95 .sadb_alg_id
= SADB_X_AALG_SHA2_256HMAC
,
97 .sadb_alg_minbits
= 256,
98 .sadb_alg_maxbits
= 256
112 .sadb_alg_id
= SADB_X_AALG_RIPEMD160HMAC
,
114 .sadb_alg_minbits
= 160,
115 .sadb_alg_maxbits
= 160
120 static struct xfrm_algo_desc ealg_list
[] = {
122 .name
= "cipher_null",
132 .sadb_alg_id
= SADB_EALG_NULL
,
134 .sadb_alg_minbits
= 0,
135 .sadb_alg_maxbits
= 0
149 .sadb_alg_id
= SADB_EALG_DESCBC
,
151 .sadb_alg_minbits
= 64,
152 .sadb_alg_maxbits
= 64
166 .sadb_alg_id
= SADB_EALG_3DESCBC
,
168 .sadb_alg_minbits
= 192,
169 .sadb_alg_maxbits
= 192
183 .sadb_alg_id
= SADB_X_EALG_CASTCBC
,
185 .sadb_alg_minbits
= 40,
186 .sadb_alg_maxbits
= 128
200 .sadb_alg_id
= SADB_X_EALG_BLOWFISHCBC
,
202 .sadb_alg_minbits
= 40,
203 .sadb_alg_maxbits
= 448
217 .sadb_alg_id
= SADB_X_EALG_AESCBC
,
219 .sadb_alg_minbits
= 128,
220 .sadb_alg_maxbits
= 256
234 .sadb_alg_id
= SADB_X_EALG_SERPENTCBC
,
236 .sadb_alg_minbits
= 128,
237 .sadb_alg_maxbits
= 256,
251 .sadb_alg_id
= SADB_X_EALG_TWOFISHCBC
,
253 .sadb_alg_minbits
= 128,
254 .sadb_alg_maxbits
= 256
259 static struct xfrm_algo_desc calg_list
[] = {
267 .desc
= { .sadb_alg_id
= SADB_X_CALG_DEFLATE
}
276 .desc
= { .sadb_alg_id
= SADB_X_CALG_LZS
}
285 .desc
= { .sadb_alg_id
= SADB_X_CALG_LZJH
}
289 static inline int aalg_entries(void)
291 return ARRAY_SIZE(aalg_list
);
294 static inline int ealg_entries(void)
296 return ARRAY_SIZE(ealg_list
);
299 static inline int calg_entries(void)
301 return ARRAY_SIZE(calg_list
);
304 /* Todo: generic iterators */
305 struct xfrm_algo_desc
*xfrm_aalg_get_byid(int alg_id
)
309 for (i
= 0; i
< aalg_entries(); i
++) {
310 if (aalg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
311 if (aalg_list
[i
].available
)
312 return &aalg_list
[i
];
319 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid
);
321 struct xfrm_algo_desc
*xfrm_ealg_get_byid(int alg_id
)
325 for (i
= 0; i
< ealg_entries(); i
++) {
326 if (ealg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
327 if (ealg_list
[i
].available
)
328 return &ealg_list
[i
];
335 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid
);
337 struct xfrm_algo_desc
*xfrm_calg_get_byid(int alg_id
)
341 for (i
= 0; i
< calg_entries(); i
++) {
342 if (calg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
343 if (calg_list
[i
].available
)
344 return &calg_list
[i
];
351 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid
);
353 static struct xfrm_algo_desc
*xfrm_get_byname(struct xfrm_algo_desc
*list
,
354 int entries
, char *name
,
362 for (i
= 0; i
< entries
; i
++) {
363 if (strcmp(name
, list
[i
].name
))
366 if (list
[i
].available
)
372 status
= crypto_alg_available(name
, 0);
376 list
[i
].available
= status
;
382 struct xfrm_algo_desc
*xfrm_aalg_get_byname(char *name
, int probe
)
384 return xfrm_get_byname(aalg_list
, aalg_entries(), name
, probe
);
386 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname
);
388 struct xfrm_algo_desc
*xfrm_ealg_get_byname(char *name
, int probe
)
390 return xfrm_get_byname(ealg_list
, ealg_entries(), name
, probe
);
392 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname
);
394 struct xfrm_algo_desc
*xfrm_calg_get_byname(char *name
, int probe
)
396 return xfrm_get_byname(calg_list
, calg_entries(), name
, probe
);
398 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname
);
400 struct xfrm_algo_desc
*xfrm_aalg_get_byidx(unsigned int idx
)
402 if (idx
>= aalg_entries())
405 return &aalg_list
[idx
];
407 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx
);
409 struct xfrm_algo_desc
*xfrm_ealg_get_byidx(unsigned int idx
)
411 if (idx
>= ealg_entries())
414 return &ealg_list
[idx
];
416 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx
);
419 * Probe for the availability of crypto algorithms, and set the available
420 * flag for any algorithms found on the system. This is typically called by
421 * pfkey during userspace SA add, update or register.
423 void xfrm_probe_algs(void)
428 BUG_ON(in_softirq());
430 for (i
= 0; i
< aalg_entries(); i
++) {
431 status
= crypto_alg_available(aalg_list
[i
].name
, 0);
432 if (aalg_list
[i
].available
!= status
)
433 aalg_list
[i
].available
= status
;
436 for (i
= 0; i
< ealg_entries(); i
++) {
437 status
= crypto_alg_available(ealg_list
[i
].name
, 0);
438 if (ealg_list
[i
].available
!= status
)
439 ealg_list
[i
].available
= status
;
442 for (i
= 0; i
< calg_entries(); i
++) {
443 status
= crypto_alg_available(calg_list
[i
].name
, 0);
444 if (calg_list
[i
].available
!= status
)
445 calg_list
[i
].available
= status
;
449 EXPORT_SYMBOL_GPL(xfrm_probe_algs
);
451 int xfrm_count_auth_supported(void)
455 for (i
= 0, n
= 0; i
< aalg_entries(); i
++)
456 if (aalg_list
[i
].available
)
460 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported
);
462 int xfrm_count_enc_supported(void)
466 for (i
= 0, n
= 0; i
< ealg_entries(); i
++)
467 if (ealg_list
[i
].available
)
471 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported
);
473 /* Move to common area: it is shared with AH. */
475 void skb_icv_walk(const struct sk_buff
*skb
, struct crypto_tfm
*tfm
,
476 int offset
, int len
, icv_update_fn_t icv_update
)
478 int start
= skb_headlen(skb
);
479 int i
, copy
= start
- offset
;
480 struct scatterlist sg
;
482 /* Checksum header. */
487 sg
.page
= virt_to_page(skb
->data
+ offset
);
488 sg
.offset
= (unsigned long)(skb
->data
+ offset
) % PAGE_SIZE
;
491 icv_update(tfm
, &sg
, 1);
493 if ((len
-= copy
) == 0)
498 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
501 BUG_TRAP(start
<= offset
+ len
);
503 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
504 if ((copy
= end
- offset
) > 0) {
505 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
510 sg
.page
= frag
->page
;
511 sg
.offset
= frag
->page_offset
+ offset
-start
;
514 icv_update(tfm
, &sg
, 1);
523 if (skb_shinfo(skb
)->frag_list
) {
524 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
526 for (; list
; list
= list
->next
) {
529 BUG_TRAP(start
<= offset
+ len
);
531 end
= start
+ list
->len
;
532 if ((copy
= end
- offset
) > 0) {
535 skb_icv_walk(list
, tfm
, offset
-start
, copy
, icv_update
);
536 if ((len
-= copy
) == 0)
545 EXPORT_SYMBOL_GPL(skb_icv_walk
);
547 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
549 /* Looking generic it is not used in another places. */
552 skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
)
554 int start
= skb_headlen(skb
);
555 int i
, copy
= start
- offset
;
561 sg
[elt
].page
= virt_to_page(skb
->data
+ offset
);
562 sg
[elt
].offset
= (unsigned long)(skb
->data
+ offset
) % PAGE_SIZE
;
563 sg
[elt
].length
= copy
;
565 if ((len
-= copy
) == 0)
570 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
573 BUG_TRAP(start
<= offset
+ len
);
575 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
576 if ((copy
= end
- offset
) > 0) {
577 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
581 sg
[elt
].page
= frag
->page
;
582 sg
[elt
].offset
= frag
->page_offset
+offset
-start
;
583 sg
[elt
].length
= copy
;
592 if (skb_shinfo(skb
)->frag_list
) {
593 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
595 for (; list
; list
= list
->next
) {
598 BUG_TRAP(start
<= offset
+ len
);
600 end
= start
+ list
->len
;
601 if ((copy
= end
- offset
) > 0) {
604 elt
+= skb_to_sgvec(list
, sg
+elt
, offset
- start
, copy
);
605 if ((len
-= copy
) == 0)
615 EXPORT_SYMBOL_GPL(skb_to_sgvec
);
617 /* Check that skb data bits are writable. If they are not, copy data
618 * to newly created private area. If "tailbits" is given, make sure that
619 * tailbits bytes beyond current end of skb are writable.
621 * Returns amount of elements of scatterlist to load for subsequent
622 * transformations and pointer to writable trailer skb.
625 int skb_cow_data(struct sk_buff
*skb
, int tailbits
, struct sk_buff
**trailer
)
629 struct sk_buff
*skb1
, **skb_p
;
631 /* If skb is cloned or its head is paged, reallocate
632 * head pulling out all the pages (pages are considered not writable
633 * at the moment even if they are anonymous).
635 if ((skb_cloned(skb
) || skb_shinfo(skb
)->nr_frags
) &&
636 __pskb_pull_tail(skb
, skb_pagelen(skb
)-skb_headlen(skb
)) == NULL
)
639 /* Easy case. Most of packets will go this way. */
640 if (!skb_shinfo(skb
)->frag_list
) {
641 /* A little of trouble, not enough of space for trailer.
642 * This should not happen, when stack is tuned to generate
643 * good frames. OK, on miss we reallocate and reserve even more
644 * space, 128 bytes is fair. */
646 if (skb_tailroom(skb
) < tailbits
&&
647 pskb_expand_head(skb
, 0, tailbits
-skb_tailroom(skb
)+128, GFP_ATOMIC
))
655 /* Misery. We are in troubles, going to mincer fragments... */
658 skb_p
= &skb_shinfo(skb
)->frag_list
;
661 while ((skb1
= *skb_p
) != NULL
) {
664 /* The fragment is partially pulled by someone,
665 * this can happen on input. Copy it and everything
668 if (skb_shared(skb1
))
671 /* If the skb is the last, worry about trailer. */
673 if (skb1
->next
== NULL
&& tailbits
) {
674 if (skb_shinfo(skb1
)->nr_frags
||
675 skb_shinfo(skb1
)->frag_list
||
676 skb_tailroom(skb1
) < tailbits
)
677 ntail
= tailbits
+ 128;
683 skb_shinfo(skb1
)->nr_frags
||
684 skb_shinfo(skb1
)->frag_list
) {
685 struct sk_buff
*skb2
;
687 /* Fuck, we are miserable poor guys... */
689 skb2
= skb_copy(skb1
, GFP_ATOMIC
);
691 skb2
= skb_copy_expand(skb1
,
695 if (unlikely(skb2
== NULL
))
699 skb_set_owner_w(skb2
, skb1
->sk
);
701 /* Looking around. Are we still alive?
702 * OK, link new skb, drop old one */
704 skb2
->next
= skb1
->next
;
716 EXPORT_SYMBOL_GPL(skb_cow_data
);
718 void *pskb_put(struct sk_buff
*skb
, struct sk_buff
*tail
, int len
)
721 skb
->data_len
+= len
;
724 return skb_put(tail
, len
);
726 EXPORT_SYMBOL_GPL(pskb_put
);