1 #include <crypto/aead.h>
2 #include <crypto/authenc.h>
4 #include <linux/module.h>
8 #include <linux/scatterlist.h>
9 #include <linux/kernel.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/rtnetlink.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/in6.h>
16 #include <net/protocol.h>
20 struct xfrm_skb_cb xfrm
;
24 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
27 * Allocate an AEAD request structure with extra space for SG and IV.
29 * For alignment considerations the IV is placed at the front, followed
30 * by the request and finally the SG list.
32 * TODO: Use spare space in skb for this where possible.
34 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
)
38 len
= crypto_aead_ivsize(aead
);
40 len
+= crypto_aead_alignmask(aead
) &
41 ~(crypto_tfm_ctx_alignment() - 1);
42 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
45 len
+= sizeof(struct aead_givcrypt_request
) + crypto_aead_reqsize(aead
);
46 len
= ALIGN(len
, __alignof__(struct scatterlist
));
48 len
+= sizeof(struct scatterlist
) * nfrags
;
50 return kmalloc(len
, GFP_ATOMIC
);
53 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
)
55 return crypto_aead_ivsize(aead
) ?
56 PTR_ALIGN((u8
*)tmp
, crypto_aead_alignmask(aead
) + 1) : tmp
;
59 static inline struct aead_givcrypt_request
*esp_tmp_givreq(
60 struct crypto_aead
*aead
, u8
*iv
)
62 struct aead_givcrypt_request
*req
;
64 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
65 crypto_tfm_ctx_alignment());
66 aead_givcrypt_set_tfm(req
, aead
);
70 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
72 struct aead_request
*req
;
74 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
75 crypto_tfm_ctx_alignment());
76 aead_request_set_tfm(req
, aead
);
80 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
81 struct aead_request
*req
)
83 return (void *)ALIGN((unsigned long)(req
+ 1) +
84 crypto_aead_reqsize(aead
),
85 __alignof__(struct scatterlist
));
88 static inline struct scatterlist
*esp_givreq_sg(
89 struct crypto_aead
*aead
, struct aead_givcrypt_request
*req
)
91 return (void *)ALIGN((unsigned long)(req
+ 1) +
92 crypto_aead_reqsize(aead
),
93 __alignof__(struct scatterlist
));
96 static void esp_output_done(struct crypto_async_request
*base
, int err
)
98 struct sk_buff
*skb
= base
->data
;
100 kfree(ESP_SKB_CB(skb
)->tmp
);
101 xfrm_output_resume(skb
, err
);
104 static int esp_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
107 struct ip_esp_hdr
*esph
;
108 struct crypto_aead
*aead
;
109 struct aead_givcrypt_request
*req
;
110 struct scatterlist
*sg
;
111 struct scatterlist
*asg
;
112 struct esp_data
*esp
;
113 struct sk_buff
*trailer
;
122 /* skb is pure payload to encrypt */
126 /* Round to block size */
131 alen
= crypto_aead_authsize(aead
);
133 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
134 clen
= ALIGN(clen
+ 2, blksize
);
136 clen
= ALIGN(clen
, esp
->padlen
);
138 if ((err
= skb_cow_data(skb
, clen
- skb
->len
+ alen
, &trailer
)) < 0)
142 tmp
= esp_alloc_tmp(aead
, nfrags
+ 1);
146 iv
= esp_tmp_iv(aead
, tmp
);
147 req
= esp_tmp_givreq(aead
, iv
);
148 asg
= esp_givreq_sg(aead
, req
);
151 /* Fill padding... */
152 tail
= skb_tail_pointer(trailer
);
155 for (i
=0; i
<clen
-skb
->len
- 2; i
++)
158 tail
[clen
- skb
->len
- 2] = (clen
- skb
->len
) - 2;
159 tail
[clen
- skb
->len
- 1] = *skb_mac_header(skb
);
160 pskb_put(skb
, trailer
, clen
- skb
->len
+ alen
);
162 skb_push(skb
, -skb_network_offset(skb
));
163 esph
= ip_esp_hdr(skb
);
164 *skb_mac_header(skb
) = IPPROTO_ESP
;
166 /* this is non-NULL only with UDP Encapsulation */
168 struct xfrm_encap_tmpl
*encap
= x
->encap
;
171 unsigned int sport
, dport
;
174 spin_lock_bh(&x
->lock
);
175 sport
= encap
->encap_sport
;
176 dport
= encap
->encap_dport
;
177 encap_type
= encap
->encap_type
;
178 spin_unlock_bh(&x
->lock
);
180 uh
= (struct udphdr
*)esph
;
183 uh
->len
= htons(skb
->len
- skb_transport_offset(skb
));
186 switch (encap_type
) {
188 case UDP_ENCAP_ESPINUDP
:
189 esph
= (struct ip_esp_hdr
*)(uh
+ 1);
191 case UDP_ENCAP_ESPINUDP_NON_IKE
:
192 udpdata32
= (__be32
*)(uh
+ 1);
193 udpdata32
[0] = udpdata32
[1] = 0;
194 esph
= (struct ip_esp_hdr
*)(udpdata32
+ 2);
198 *skb_mac_header(skb
) = IPPROTO_UDP
;
201 esph
->spi
= x
->id
.spi
;
202 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
);
204 sg_init_table(sg
, nfrags
);
205 skb_to_sgvec(skb
, sg
,
206 esph
->enc_data
+ crypto_aead_ivsize(aead
) - skb
->data
,
208 sg_init_one(asg
, esph
, sizeof(*esph
));
210 aead_givcrypt_set_callback(req
, 0, esp_output_done
, skb
);
211 aead_givcrypt_set_crypt(req
, sg
, sg
, clen
, iv
);
212 aead_givcrypt_set_assoc(req
, asg
, sizeof(*esph
));
213 aead_givcrypt_set_giv(req
, esph
->enc_data
, XFRM_SKB_CB(skb
)->seq
);
215 ESP_SKB_CB(skb
)->tmp
= tmp
;
216 err
= crypto_aead_givencrypt(req
);
217 if (err
== -EINPROGRESS
)
229 static int esp_input_done2(struct sk_buff
*skb
, int err
)
232 struct xfrm_state
*x
= xfrm_input_state(skb
);
233 struct esp_data
*esp
= x
->data
;
234 struct crypto_aead
*aead
= esp
->aead
;
235 int alen
= crypto_aead_authsize(aead
);
236 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
237 int elen
= skb
->len
- hlen
;
242 kfree(ESP_SKB_CB(skb
)->tmp
);
247 if (skb_copy_bits(skb
, skb
->len
-alen
-2, nexthdr
, 2))
252 if (padlen
+ 2 + alen
>= elen
)
255 /* ... check padding bits here. Silly. :-) */
261 struct xfrm_encap_tmpl
*encap
= x
->encap
;
262 struct udphdr
*uh
= (void *)(skb_network_header(skb
) + ihl
);
265 * 1) if the NAT-T peer's IP or port changed then
266 * advertize the change to the keying daemon.
267 * This is an inbound SA, so just compare
270 if (iph
->saddr
!= x
->props
.saddr
.a4
||
271 uh
->source
!= encap
->encap_sport
) {
272 xfrm_address_t ipaddr
;
274 ipaddr
.a4
= iph
->saddr
;
275 km_new_mapping(x
, &ipaddr
, uh
->source
);
277 /* XXX: perhaps add an extra
278 * policy check here, to see
279 * if we should allow or
280 * reject a packet from a
287 * 2) ignore UDP/TCP checksums in case
288 * of NAT-T in Transport Mode, or
289 * perform other post-processing fixes
290 * as per draft-ietf-ipsec-udp-encaps-06,
293 if (x
->props
.mode
== XFRM_MODE_TRANSPORT
)
294 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
297 pskb_trim(skb
, skb
->len
- alen
- padlen
- 2);
298 __skb_pull(skb
, hlen
);
299 skb_set_transport_header(skb
, -ihl
);
303 /* RFC4303: Drop dummy packets without any error */
304 if (err
== IPPROTO_NONE
)
311 static void esp_input_done(struct crypto_async_request
*base
, int err
)
313 struct sk_buff
*skb
= base
->data
;
315 xfrm_input_resume(skb
, esp_input_done2(skb
, err
));
319 * Note: detecting truncated vs. non-truncated authentication data is very
320 * expensive, so we only support truncated data, which is the recommended
323 static int esp_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
325 struct ip_esp_hdr
*esph
;
326 struct esp_data
*esp
= x
->data
;
327 struct crypto_aead
*aead
= esp
->aead
;
328 struct aead_request
*req
;
329 struct sk_buff
*trailer
;
330 int elen
= skb
->len
- sizeof(*esph
) - crypto_aead_ivsize(aead
);
334 struct scatterlist
*sg
;
335 struct scatterlist
*asg
;
338 if (!pskb_may_pull(skb
, sizeof(*esph
)))
344 if ((err
= skb_cow_data(skb
, 0, &trailer
)) < 0)
349 tmp
= esp_alloc_tmp(aead
, nfrags
+ 1);
353 ESP_SKB_CB(skb
)->tmp
= tmp
;
354 iv
= esp_tmp_iv(aead
, tmp
);
355 req
= esp_tmp_req(aead
, iv
);
356 asg
= esp_req_sg(aead
, req
);
359 skb
->ip_summed
= CHECKSUM_NONE
;
361 esph
= (struct ip_esp_hdr
*)skb
->data
;
363 /* Get ivec. This can be wrong, check against another impls. */
366 sg_init_table(sg
, nfrags
);
367 skb_to_sgvec(skb
, sg
, sizeof(*esph
) + crypto_aead_ivsize(aead
), elen
);
368 sg_init_one(asg
, esph
, sizeof(*esph
));
370 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
371 aead_request_set_crypt(req
, sg
, sg
, elen
, iv
);
372 aead_request_set_assoc(req
, asg
, sizeof(*esph
));
374 err
= crypto_aead_decrypt(req
);
375 if (err
== -EINPROGRESS
)
378 err
= esp_input_done2(skb
, err
);
384 static u32
esp4_get_mtu(struct xfrm_state
*x
, int mtu
)
386 struct esp_data
*esp
= x
->data
;
387 u32 blksize
= ALIGN(crypto_aead_blocksize(esp
->aead
), 4);
388 u32 align
= max_t(u32
, blksize
, esp
->padlen
);
391 mtu
-= x
->props
.header_len
+ crypto_aead_authsize(esp
->aead
);
392 rem
= mtu
& (align
- 1);
395 switch (x
->props
.mode
) {
396 case XFRM_MODE_TUNNEL
:
399 case XFRM_MODE_TRANSPORT
:
402 mtu
+= min_t(u32
, blksize
- 4, rem
);
405 /* The worst case. */
406 mtu
+= min_t(u32
, IPV4_BEET_PHMAXLEN
, rem
);
413 static void esp4_err(struct sk_buff
*skb
, u32 info
)
415 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
416 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
417 struct xfrm_state
*x
;
419 if (icmp_hdr(skb
)->type
!= ICMP_DEST_UNREACH
||
420 icmp_hdr(skb
)->code
!= ICMP_FRAG_NEEDED
)
423 x
= xfrm_state_lookup((xfrm_address_t
*)&iph
->daddr
, esph
->spi
, IPPROTO_ESP
, AF_INET
);
426 NETDEBUG(KERN_DEBUG
"pmtu discovery on SA ESP/%08x/%08x\n",
427 ntohl(esph
->spi
), ntohl(iph
->daddr
));
431 static void esp_destroy(struct xfrm_state
*x
)
433 struct esp_data
*esp
= x
->data
;
438 crypto_free_aead(esp
->aead
);
442 static int esp_init_aead(struct xfrm_state
*x
)
444 struct esp_data
*esp
= x
->data
;
445 struct crypto_aead
*aead
;
448 aead
= crypto_alloc_aead(x
->aead
->alg_name
, 0, 0);
455 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
456 (x
->aead
->alg_key_len
+ 7) / 8);
460 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
468 static int esp_init_authenc(struct xfrm_state
*x
)
470 struct esp_data
*esp
= x
->data
;
471 struct crypto_aead
*aead
;
472 struct crypto_authenc_key_param
*param
;
476 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
485 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
, "authenc(%s,%s)",
486 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
487 x
->ealg
->alg_name
) >= CRYPTO_MAX_ALG_NAME
)
490 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
497 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
498 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
500 key
= kmalloc(keylen
, GFP_KERNEL
);
506 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
507 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
508 param
= RTA_DATA(rta
);
509 p
+= RTA_SPACE(sizeof(*param
));
512 struct xfrm_algo_desc
*aalg_desc
;
514 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
515 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
517 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
521 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/8 !=
522 crypto_aead_authsize(aead
)) {
523 NETDEBUG(KERN_INFO
"ESP: %s digestsize %u != %hu\n",
525 crypto_aead_authsize(aead
),
526 aalg_desc
->uinfo
.auth
.icv_fullbits
/8);
530 err
= crypto_aead_setauthsize(
531 aead
, aalg_desc
->uinfo
.auth
.icv_truncbits
/ 8);
536 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
537 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
539 err
= crypto_aead_setkey(aead
, key
, keylen
);
548 static int esp_init_state(struct xfrm_state
*x
)
550 struct esp_data
*esp
;
551 struct crypto_aead
*aead
;
555 esp
= kzalloc(sizeof(*esp
), GFP_KERNEL
);
562 err
= esp_init_aead(x
);
564 err
= esp_init_authenc(x
);
573 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
574 crypto_aead_ivsize(aead
);
575 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
576 x
->props
.header_len
+= sizeof(struct iphdr
);
577 else if (x
->props
.mode
== XFRM_MODE_BEET
)
578 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
;
580 struct xfrm_encap_tmpl
*encap
= x
->encap
;
582 switch (encap
->encap_type
) {
585 case UDP_ENCAP_ESPINUDP
:
586 x
->props
.header_len
+= sizeof(struct udphdr
);
588 case UDP_ENCAP_ESPINUDP_NON_IKE
:
589 x
->props
.header_len
+= sizeof(struct udphdr
) + 2 * sizeof(u32
);
594 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
596 align
= max_t(u32
, align
, esp
->padlen
);
597 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(esp
->aead
);
603 static const struct xfrm_type esp_type
=
605 .description
= "ESP4",
606 .owner
= THIS_MODULE
,
607 .proto
= IPPROTO_ESP
,
608 .flags
= XFRM_TYPE_REPLAY_PROT
,
609 .init_state
= esp_init_state
,
610 .destructor
= esp_destroy
,
611 .get_mtu
= esp4_get_mtu
,
616 static struct net_protocol esp4_protocol
= {
617 .handler
= xfrm4_rcv
,
618 .err_handler
= esp4_err
,
622 static int __init
esp4_init(void)
624 if (xfrm_register_type(&esp_type
, AF_INET
) < 0) {
625 printk(KERN_INFO
"ip esp init: can't add xfrm type\n");
628 if (inet_add_protocol(&esp4_protocol
, IPPROTO_ESP
) < 0) {
629 printk(KERN_INFO
"ip esp init: can't add protocol\n");
630 xfrm_unregister_type(&esp_type
, AF_INET
);
636 static void __exit
esp4_fini(void)
638 if (inet_del_protocol(&esp4_protocol
, IPPROTO_ESP
) < 0)
639 printk(KERN_INFO
"ip esp close: can't remove protocol\n");
640 if (xfrm_unregister_type(&esp_type
, AF_INET
) < 0)
641 printk(KERN_INFO
"ip esp close: can't remove xfrm type\n");
644 module_init(esp4_init
);
645 module_exit(esp4_fini
);
646 MODULE_LICENSE("GPL");
647 MODULE_ALIAS_XFRM_TYPE(AF_INET
, XFRM_PROTO_ESP
);