2 * xfrm algorithm interface
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
23 #include <asm/scatterlist.h>
26 * Algorithms supported by IPsec. These entries contain properties which
27 * are used in key negotiation and xfrm processing, and are used to verify
28 * that instantiated crypto transforms have correct parameters for IPsec
31 static struct xfrm_algo_desc aalg_list
[] = {
33 .name
= "digest_null",
43 .sadb_alg_id
= SADB_X_AALG_NULL
,
45 .sadb_alg_minbits
= 0,
60 .sadb_alg_id
= SADB_AALG_MD5HMAC
,
62 .sadb_alg_minbits
= 128,
63 .sadb_alg_maxbits
= 128
77 .sadb_alg_id
= SADB_AALG_SHA1HMAC
,
79 .sadb_alg_minbits
= 160,
80 .sadb_alg_maxbits
= 160
94 .sadb_alg_id
= SADB_X_AALG_SHA2_256HMAC
,
96 .sadb_alg_minbits
= 256,
97 .sadb_alg_maxbits
= 256
111 .sadb_alg_id
= SADB_X_AALG_RIPEMD160HMAC
,
113 .sadb_alg_minbits
= 160,
114 .sadb_alg_maxbits
= 160
119 static struct xfrm_algo_desc ealg_list
[] = {
121 .name
= "cipher_null",
131 .sadb_alg_id
= SADB_EALG_NULL
,
133 .sadb_alg_minbits
= 0,
134 .sadb_alg_maxbits
= 0
148 .sadb_alg_id
= SADB_EALG_DESCBC
,
150 .sadb_alg_minbits
= 64,
151 .sadb_alg_maxbits
= 64
165 .sadb_alg_id
= SADB_EALG_3DESCBC
,
167 .sadb_alg_minbits
= 192,
168 .sadb_alg_maxbits
= 192
182 .sadb_alg_id
= SADB_X_EALG_CASTCBC
,
184 .sadb_alg_minbits
= 40,
185 .sadb_alg_maxbits
= 128
199 .sadb_alg_id
= SADB_X_EALG_BLOWFISHCBC
,
201 .sadb_alg_minbits
= 40,
202 .sadb_alg_maxbits
= 448
216 .sadb_alg_id
= SADB_X_EALG_AESCBC
,
218 .sadb_alg_minbits
= 128,
219 .sadb_alg_maxbits
= 256
233 .sadb_alg_id
= SADB_X_EALG_SERPENTCBC
,
235 .sadb_alg_minbits
= 128,
236 .sadb_alg_maxbits
= 256,
250 .sadb_alg_id
= SADB_X_EALG_TWOFISHCBC
,
252 .sadb_alg_minbits
= 128,
253 .sadb_alg_maxbits
= 256
258 static struct xfrm_algo_desc calg_list
[] = {
266 .desc
= { .sadb_alg_id
= SADB_X_CALG_DEFLATE
}
275 .desc
= { .sadb_alg_id
= SADB_X_CALG_LZS
}
284 .desc
= { .sadb_alg_id
= SADB_X_CALG_LZJH
}
288 static inline int aalg_entries(void)
290 return ARRAY_SIZE(aalg_list
);
293 static inline int ealg_entries(void)
295 return ARRAY_SIZE(ealg_list
);
298 static inline int calg_entries(void)
300 return ARRAY_SIZE(calg_list
);
303 /* Todo: generic iterators */
304 struct xfrm_algo_desc
*xfrm_aalg_get_byid(int alg_id
)
308 for (i
= 0; i
< aalg_entries(); i
++) {
309 if (aalg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
310 if (aalg_list
[i
].available
)
311 return &aalg_list
[i
];
318 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid
);
320 struct xfrm_algo_desc
*xfrm_ealg_get_byid(int alg_id
)
324 for (i
= 0; i
< ealg_entries(); i
++) {
325 if (ealg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
326 if (ealg_list
[i
].available
)
327 return &ealg_list
[i
];
334 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid
);
336 struct xfrm_algo_desc
*xfrm_calg_get_byid(int alg_id
)
340 for (i
= 0; i
< calg_entries(); i
++) {
341 if (calg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
342 if (calg_list
[i
].available
)
343 return &calg_list
[i
];
350 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid
);
352 static struct xfrm_algo_desc
*xfrm_get_byname(struct xfrm_algo_desc
*list
,
353 int entries
, char *name
,
361 for (i
= 0; i
< entries
; i
++) {
362 if (strcmp(name
, list
[i
].name
))
365 if (list
[i
].available
)
371 status
= crypto_alg_available(name
, 0);
375 list
[i
].available
= status
;
381 struct xfrm_algo_desc
*xfrm_aalg_get_byname(char *name
, int probe
)
383 return xfrm_get_byname(aalg_list
, aalg_entries(), name
, probe
);
385 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname
);
387 struct xfrm_algo_desc
*xfrm_ealg_get_byname(char *name
, int probe
)
389 return xfrm_get_byname(ealg_list
, ealg_entries(), name
, probe
);
391 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname
);
393 struct xfrm_algo_desc
*xfrm_calg_get_byname(char *name
, int probe
)
395 return xfrm_get_byname(calg_list
, calg_entries(), name
, probe
);
397 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname
);
399 struct xfrm_algo_desc
*xfrm_aalg_get_byidx(unsigned int idx
)
401 if (idx
>= aalg_entries())
404 return &aalg_list
[idx
];
406 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx
);
408 struct xfrm_algo_desc
*xfrm_ealg_get_byidx(unsigned int idx
)
410 if (idx
>= ealg_entries())
413 return &ealg_list
[idx
];
415 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx
);
418 * Probe for the availability of crypto algorithms, and set the available
419 * flag for any algorithms found on the system. This is typically called by
420 * pfkey during userspace SA add, update or register.
422 void xfrm_probe_algs(void)
427 BUG_ON(in_softirq());
429 for (i
= 0; i
< aalg_entries(); i
++) {
430 status
= crypto_alg_available(aalg_list
[i
].name
, 0);
431 if (aalg_list
[i
].available
!= status
)
432 aalg_list
[i
].available
= status
;
435 for (i
= 0; i
< ealg_entries(); i
++) {
436 status
= crypto_alg_available(ealg_list
[i
].name
, 0);
437 if (ealg_list
[i
].available
!= status
)
438 ealg_list
[i
].available
= status
;
441 for (i
= 0; i
< calg_entries(); i
++) {
442 status
= crypto_alg_available(calg_list
[i
].name
, 0);
443 if (calg_list
[i
].available
!= status
)
444 calg_list
[i
].available
= status
;
448 EXPORT_SYMBOL_GPL(xfrm_probe_algs
);
450 int xfrm_count_auth_supported(void)
454 for (i
= 0, n
= 0; i
< aalg_entries(); i
++)
455 if (aalg_list
[i
].available
)
459 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported
);
461 int xfrm_count_enc_supported(void)
465 for (i
= 0, n
= 0; i
< ealg_entries(); i
++)
466 if (ealg_list
[i
].available
)
470 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported
);
472 /* Move to common area: it is shared with AH. */
474 void skb_icv_walk(const struct sk_buff
*skb
, struct crypto_tfm
*tfm
,
475 int offset
, int len
, icv_update_fn_t icv_update
)
477 int start
= skb_headlen(skb
);
478 int i
, copy
= start
- offset
;
479 struct scatterlist sg
;
481 /* Checksum header. */
486 sg
.page
= virt_to_page(skb
->data
+ offset
);
487 sg
.offset
= (unsigned long)(skb
->data
+ offset
) % PAGE_SIZE
;
490 icv_update(tfm
, &sg
, 1);
492 if ((len
-= copy
) == 0)
497 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
500 BUG_TRAP(start
<= offset
+ len
);
502 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
503 if ((copy
= end
- offset
) > 0) {
504 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
509 sg
.page
= frag
->page
;
510 sg
.offset
= frag
->page_offset
+ offset
-start
;
513 icv_update(tfm
, &sg
, 1);
522 if (skb_shinfo(skb
)->frag_list
) {
523 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
525 for (; list
; list
= list
->next
) {
528 BUG_TRAP(start
<= offset
+ len
);
530 end
= start
+ list
->len
;
531 if ((copy
= end
- offset
) > 0) {
534 skb_icv_walk(list
, tfm
, offset
-start
, copy
, icv_update
);
535 if ((len
-= copy
) == 0)
544 EXPORT_SYMBOL_GPL(skb_icv_walk
);
546 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
548 /* Looking generic it is not used in another places. */
551 skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
)
553 int start
= skb_headlen(skb
);
554 int i
, copy
= start
- offset
;
560 sg
[elt
].page
= virt_to_page(skb
->data
+ offset
);
561 sg
[elt
].offset
= (unsigned long)(skb
->data
+ offset
) % PAGE_SIZE
;
562 sg
[elt
].length
= copy
;
564 if ((len
-= copy
) == 0)
569 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
572 BUG_TRAP(start
<= offset
+ len
);
574 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
575 if ((copy
= end
- offset
) > 0) {
576 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
580 sg
[elt
].page
= frag
->page
;
581 sg
[elt
].offset
= frag
->page_offset
+offset
-start
;
582 sg
[elt
].length
= copy
;
591 if (skb_shinfo(skb
)->frag_list
) {
592 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
594 for (; list
; list
= list
->next
) {
597 BUG_TRAP(start
<= offset
+ len
);
599 end
= start
+ list
->len
;
600 if ((copy
= end
- offset
) > 0) {
603 elt
+= skb_to_sgvec(list
, sg
+elt
, offset
- start
, copy
);
604 if ((len
-= copy
) == 0)
614 EXPORT_SYMBOL_GPL(skb_to_sgvec
);
616 /* Check that skb data bits are writable. If they are not, copy data
617 * to newly created private area. If "tailbits" is given, make sure that
618 * tailbits bytes beyond current end of skb are writable.
620 * Returns amount of elements of scatterlist to load for subsequent
621 * transformations and pointer to writable trailer skb.
624 int skb_cow_data(struct sk_buff
*skb
, int tailbits
, struct sk_buff
**trailer
)
628 struct sk_buff
*skb1
, **skb_p
;
630 /* If skb is cloned or its head is paged, reallocate
631 * head pulling out all the pages (pages are considered not writable
632 * at the moment even if they are anonymous).
634 if ((skb_cloned(skb
) || skb_shinfo(skb
)->nr_frags
) &&
635 __pskb_pull_tail(skb
, skb_pagelen(skb
)-skb_headlen(skb
)) == NULL
)
638 /* Easy case. Most of packets will go this way. */
639 if (!skb_shinfo(skb
)->frag_list
) {
640 /* A little of trouble, not enough of space for trailer.
641 * This should not happen, when stack is tuned to generate
642 * good frames. OK, on miss we reallocate and reserve even more
643 * space, 128 bytes is fair. */
645 if (skb_tailroom(skb
) < tailbits
&&
646 pskb_expand_head(skb
, 0, tailbits
-skb_tailroom(skb
)+128, GFP_ATOMIC
))
654 /* Misery. We are in troubles, going to mincer fragments... */
657 skb_p
= &skb_shinfo(skb
)->frag_list
;
660 while ((skb1
= *skb_p
) != NULL
) {
663 /* The fragment is partially pulled by someone,
664 * this can happen on input. Copy it and everything
667 if (skb_shared(skb1
))
670 /* If the skb is the last, worry about trailer. */
672 if (skb1
->next
== NULL
&& tailbits
) {
673 if (skb_shinfo(skb1
)->nr_frags
||
674 skb_shinfo(skb1
)->frag_list
||
675 skb_tailroom(skb1
) < tailbits
)
676 ntail
= tailbits
+ 128;
682 skb_shinfo(skb1
)->nr_frags
||
683 skb_shinfo(skb1
)->frag_list
) {
684 struct sk_buff
*skb2
;
686 /* Fuck, we are miserable poor guys... */
688 skb2
= skb_copy(skb1
, GFP_ATOMIC
);
690 skb2
= skb_copy_expand(skb1
,
694 if (unlikely(skb2
== NULL
))
698 skb_set_owner_w(skb2
, skb1
->sk
);
700 /* Looking around. Are we still alive?
701 * OK, link new skb, drop old one */
703 skb2
->next
= skb1
->next
;
715 EXPORT_SYMBOL_GPL(skb_cow_data
);
717 void *pskb_put(struct sk_buff
*skb
, struct sk_buff
*tail
, int len
)
720 skb
->data_len
+= len
;
723 return skb_put(tail
, len
);
725 EXPORT_SYMBOL_GPL(pskb_put
);