drm/radeon/kms: retry aux transactions if there are status flags
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / esp4.c
bloba5b413416da33305bdf83ed2fd9e6104031e3687
1 #include <crypto/aead.h>
2 #include <crypto/authenc.h>
3 #include <linux/err.h>
4 #include <linux/module.h>
5 #include <net/ip.h>
6 #include <net/xfrm.h>
7 #include <net/esp.h>
8 #include <linux/scatterlist.h>
9 #include <linux/kernel.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/rtnetlink.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/in6.h>
15 #include <net/icmp.h>
16 #include <net/protocol.h>
17 #include <net/udp.h>
19 struct esp_skb_cb {
20 struct xfrm_skb_cb xfrm;
21 void *tmp;
24 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
26 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
29 * Allocate an AEAD request structure with extra space for SG and IV.
31 * For alignment considerations the IV is placed at the front, followed
32 * by the request and finally the SG list.
34 * TODO: Use spare space in skb for this where possible.
36 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
38 unsigned int len;
40 len = seqhilen;
42 len += crypto_aead_ivsize(aead);
44 if (len) {
45 len += crypto_aead_alignmask(aead) &
46 ~(crypto_tfm_ctx_alignment() - 1);
47 len = ALIGN(len, crypto_tfm_ctx_alignment());
50 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
51 len = ALIGN(len, __alignof__(struct scatterlist));
53 len += sizeof(struct scatterlist) * nfrags;
55 return kmalloc(len, GFP_ATOMIC);
58 static inline __be32 *esp_tmp_seqhi(void *tmp)
60 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
62 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
64 return crypto_aead_ivsize(aead) ?
65 PTR_ALIGN((u8 *)tmp + seqhilen,
66 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
69 static inline struct aead_givcrypt_request *esp_tmp_givreq(
70 struct crypto_aead *aead, u8 *iv)
72 struct aead_givcrypt_request *req;
74 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
75 crypto_tfm_ctx_alignment());
76 aead_givcrypt_set_tfm(req, aead);
77 return req;
80 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
82 struct aead_request *req;
84 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
85 crypto_tfm_ctx_alignment());
86 aead_request_set_tfm(req, aead);
87 return req;
90 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
91 struct aead_request *req)
93 return (void *)ALIGN((unsigned long)(req + 1) +
94 crypto_aead_reqsize(aead),
95 __alignof__(struct scatterlist));
98 static inline struct scatterlist *esp_givreq_sg(
99 struct crypto_aead *aead, struct aead_givcrypt_request *req)
101 return (void *)ALIGN((unsigned long)(req + 1) +
102 crypto_aead_reqsize(aead),
103 __alignof__(struct scatterlist));
106 static void esp_output_done(struct crypto_async_request *base, int err)
108 struct sk_buff *skb = base->data;
110 kfree(ESP_SKB_CB(skb)->tmp);
111 xfrm_output_resume(skb, err);
114 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
116 int err;
117 struct ip_esp_hdr *esph;
118 struct crypto_aead *aead;
119 struct aead_givcrypt_request *req;
120 struct scatterlist *sg;
121 struct scatterlist *asg;
122 struct esp_data *esp;
123 struct sk_buff *trailer;
124 void *tmp;
125 u8 *iv;
126 u8 *tail;
127 int blksize;
128 int clen;
129 int alen;
130 int plen;
131 int tfclen;
132 int nfrags;
133 int assoclen;
134 int sglists;
135 int seqhilen;
136 __be32 *seqhi;
138 /* skb is pure payload to encrypt */
140 err = -ENOMEM;
142 esp = x->data;
143 aead = esp->aead;
144 alen = crypto_aead_authsize(aead);
146 tfclen = 0;
147 if (x->tfcpad) {
148 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
149 u32 padto;
151 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
152 if (skb->len < padto)
153 tfclen = padto - skb->len;
155 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
156 clen = ALIGN(skb->len + 2 + tfclen, blksize);
157 if (esp->padlen)
158 clen = ALIGN(clen, esp->padlen);
159 plen = clen - skb->len - tfclen;
161 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
162 if (err < 0)
163 goto error;
164 nfrags = err;
166 assoclen = sizeof(*esph);
167 sglists = 1;
168 seqhilen = 0;
170 if (x->props.flags & XFRM_STATE_ESN) {
171 sglists += 2;
172 seqhilen += sizeof(__be32);
173 assoclen += seqhilen;
176 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
177 if (!tmp)
178 goto error;
180 seqhi = esp_tmp_seqhi(tmp);
181 iv = esp_tmp_iv(aead, tmp, seqhilen);
182 req = esp_tmp_givreq(aead, iv);
183 asg = esp_givreq_sg(aead, req);
184 sg = asg + sglists;
186 /* Fill padding... */
187 tail = skb_tail_pointer(trailer);
188 if (tfclen) {
189 memset(tail, 0, tfclen);
190 tail += tfclen;
192 do {
193 int i;
194 for (i = 0; i < plen - 2; i++)
195 tail[i] = i + 1;
196 } while (0);
197 tail[plen - 2] = plen - 2;
198 tail[plen - 1] = *skb_mac_header(skb);
199 pskb_put(skb, trailer, clen - skb->len + alen);
201 skb_push(skb, -skb_network_offset(skb));
202 esph = ip_esp_hdr(skb);
203 *skb_mac_header(skb) = IPPROTO_ESP;
205 /* this is non-NULL only with UDP Encapsulation */
206 if (x->encap) {
207 struct xfrm_encap_tmpl *encap = x->encap;
208 struct udphdr *uh;
209 __be32 *udpdata32;
210 __be16 sport, dport;
211 int encap_type;
213 spin_lock_bh(&x->lock);
214 sport = encap->encap_sport;
215 dport = encap->encap_dport;
216 encap_type = encap->encap_type;
217 spin_unlock_bh(&x->lock);
219 uh = (struct udphdr *)esph;
220 uh->source = sport;
221 uh->dest = dport;
222 uh->len = htons(skb->len - skb_transport_offset(skb));
223 uh->check = 0;
225 switch (encap_type) {
226 default:
227 case UDP_ENCAP_ESPINUDP:
228 esph = (struct ip_esp_hdr *)(uh + 1);
229 break;
230 case UDP_ENCAP_ESPINUDP_NON_IKE:
231 udpdata32 = (__be32 *)(uh + 1);
232 udpdata32[0] = udpdata32[1] = 0;
233 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
234 break;
237 *skb_mac_header(skb) = IPPROTO_UDP;
240 esph->spi = x->id.spi;
241 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
243 sg_init_table(sg, nfrags);
244 skb_to_sgvec(skb, sg,
245 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
246 clen + alen);
248 if ((x->props.flags & XFRM_STATE_ESN)) {
249 sg_init_table(asg, 3);
250 sg_set_buf(asg, &esph->spi, sizeof(__be32));
251 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
252 sg_set_buf(asg + 1, seqhi, seqhilen);
253 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
254 } else
255 sg_init_one(asg, esph, sizeof(*esph));
257 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
258 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
259 aead_givcrypt_set_assoc(req, asg, assoclen);
260 aead_givcrypt_set_giv(req, esph->enc_data,
261 XFRM_SKB_CB(skb)->seq.output.low);
263 ESP_SKB_CB(skb)->tmp = tmp;
264 err = crypto_aead_givencrypt(req);
265 if (err == -EINPROGRESS)
266 goto error;
268 if (err == -EBUSY)
269 err = NET_XMIT_DROP;
271 kfree(tmp);
273 error:
274 return err;
277 static int esp_input_done2(struct sk_buff *skb, int err)
279 const struct iphdr *iph;
280 struct xfrm_state *x = xfrm_input_state(skb);
281 struct esp_data *esp = x->data;
282 struct crypto_aead *aead = esp->aead;
283 int alen = crypto_aead_authsize(aead);
284 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
285 int elen = skb->len - hlen;
286 int ihl;
287 u8 nexthdr[2];
288 int padlen;
290 kfree(ESP_SKB_CB(skb)->tmp);
292 if (unlikely(err))
293 goto out;
295 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
296 BUG();
298 err = -EINVAL;
299 padlen = nexthdr[0];
300 if (padlen + 2 + alen >= elen)
301 goto out;
303 /* ... check padding bits here. Silly. :-) */
305 iph = ip_hdr(skb);
306 ihl = iph->ihl * 4;
308 if (x->encap) {
309 struct xfrm_encap_tmpl *encap = x->encap;
310 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
313 * 1) if the NAT-T peer's IP or port changed then
314 * advertize the change to the keying daemon.
315 * This is an inbound SA, so just compare
316 * SRC ports.
318 if (iph->saddr != x->props.saddr.a4 ||
319 uh->source != encap->encap_sport) {
320 xfrm_address_t ipaddr;
322 ipaddr.a4 = iph->saddr;
323 km_new_mapping(x, &ipaddr, uh->source);
325 /* XXX: perhaps add an extra
326 * policy check here, to see
327 * if we should allow or
328 * reject a packet from a
329 * different source
330 * address/port.
335 * 2) ignore UDP/TCP checksums in case
336 * of NAT-T in Transport Mode, or
337 * perform other post-processing fixes
338 * as per draft-ietf-ipsec-udp-encaps-06,
339 * section 3.1.2
341 if (x->props.mode == XFRM_MODE_TRANSPORT)
342 skb->ip_summed = CHECKSUM_UNNECESSARY;
345 pskb_trim(skb, skb->len - alen - padlen - 2);
346 __skb_pull(skb, hlen);
347 skb_set_transport_header(skb, -ihl);
349 err = nexthdr[1];
351 /* RFC4303: Drop dummy packets without any error */
352 if (err == IPPROTO_NONE)
353 err = -EINVAL;
355 out:
356 return err;
359 static void esp_input_done(struct crypto_async_request *base, int err)
361 struct sk_buff *skb = base->data;
363 xfrm_input_resume(skb, esp_input_done2(skb, err));
367 * Note: detecting truncated vs. non-truncated authentication data is very
368 * expensive, so we only support truncated data, which is the recommended
369 * and common case.
371 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
373 struct ip_esp_hdr *esph;
374 struct esp_data *esp = x->data;
375 struct crypto_aead *aead = esp->aead;
376 struct aead_request *req;
377 struct sk_buff *trailer;
378 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
379 int nfrags;
380 int assoclen;
381 int sglists;
382 int seqhilen;
383 __be32 *seqhi;
384 void *tmp;
385 u8 *iv;
386 struct scatterlist *sg;
387 struct scatterlist *asg;
388 int err = -EINVAL;
390 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
391 goto out;
393 if (elen <= 0)
394 goto out;
396 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
397 goto out;
398 nfrags = err;
400 assoclen = sizeof(*esph);
401 sglists = 1;
402 seqhilen = 0;
404 if (x->props.flags & XFRM_STATE_ESN) {
405 sglists += 2;
406 seqhilen += sizeof(__be32);
407 assoclen += seqhilen;
410 err = -ENOMEM;
411 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
412 if (!tmp)
413 goto out;
415 ESP_SKB_CB(skb)->tmp = tmp;
416 seqhi = esp_tmp_seqhi(tmp);
417 iv = esp_tmp_iv(aead, tmp, seqhilen);
418 req = esp_tmp_req(aead, iv);
419 asg = esp_req_sg(aead, req);
420 sg = asg + sglists;
422 skb->ip_summed = CHECKSUM_NONE;
424 esph = (struct ip_esp_hdr *)skb->data;
426 /* Get ivec. This can be wrong, check against another impls. */
427 iv = esph->enc_data;
429 sg_init_table(sg, nfrags);
430 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
432 if ((x->props.flags & XFRM_STATE_ESN)) {
433 sg_init_table(asg, 3);
434 sg_set_buf(asg, &esph->spi, sizeof(__be32));
435 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
436 sg_set_buf(asg + 1, seqhi, seqhilen);
437 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
438 } else
439 sg_init_one(asg, esph, sizeof(*esph));
441 aead_request_set_callback(req, 0, esp_input_done, skb);
442 aead_request_set_crypt(req, sg, sg, elen, iv);
443 aead_request_set_assoc(req, asg, assoclen);
445 err = crypto_aead_decrypt(req);
446 if (err == -EINPROGRESS)
447 goto out;
449 err = esp_input_done2(skb, err);
451 out:
452 return err;
455 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
457 struct esp_data *esp = x->data;
458 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
459 u32 align = max_t(u32, blksize, esp->padlen);
460 u32 rem;
462 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
463 rem = mtu & (align - 1);
464 mtu &= ~(align - 1);
466 switch (x->props.mode) {
467 case XFRM_MODE_TUNNEL:
468 break;
469 default:
470 case XFRM_MODE_TRANSPORT:
471 /* The worst case */
472 mtu -= blksize - 4;
473 mtu += min_t(u32, blksize - 4, rem);
474 break;
475 case XFRM_MODE_BEET:
476 /* The worst case. */
477 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
478 break;
481 return mtu - 2;
484 static void esp4_err(struct sk_buff *skb, u32 info)
486 struct net *net = dev_net(skb->dev);
487 const struct iphdr *iph = (const struct iphdr *)skb->data;
488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
489 struct xfrm_state *x;
491 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
493 return;
495 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
496 esph->spi, IPPROTO_ESP, AF_INET);
497 if (!x)
498 return;
499 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
500 ntohl(esph->spi), ntohl(iph->daddr));
501 xfrm_state_put(x);
504 static void esp_destroy(struct xfrm_state *x)
506 struct esp_data *esp = x->data;
508 if (!esp)
509 return;
511 crypto_free_aead(esp->aead);
512 kfree(esp);
515 static int esp_init_aead(struct xfrm_state *x)
517 struct esp_data *esp = x->data;
518 struct crypto_aead *aead;
519 int err;
521 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
522 err = PTR_ERR(aead);
523 if (IS_ERR(aead))
524 goto error;
526 esp->aead = aead;
528 err = crypto_aead_setkey(aead, x->aead->alg_key,
529 (x->aead->alg_key_len + 7) / 8);
530 if (err)
531 goto error;
533 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
534 if (err)
535 goto error;
537 error:
538 return err;
541 static int esp_init_authenc(struct xfrm_state *x)
543 struct esp_data *esp = x->data;
544 struct crypto_aead *aead;
545 struct crypto_authenc_key_param *param;
546 struct rtattr *rta;
547 char *key;
548 char *p;
549 char authenc_name[CRYPTO_MAX_ALG_NAME];
550 unsigned int keylen;
551 int err;
553 err = -EINVAL;
554 if (x->ealg == NULL)
555 goto error;
557 err = -ENAMETOOLONG;
559 if ((x->props.flags & XFRM_STATE_ESN)) {
560 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
561 "authencesn(%s,%s)",
562 x->aalg ? x->aalg->alg_name : "digest_null",
563 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
564 goto error;
565 } else {
566 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
567 "authenc(%s,%s)",
568 x->aalg ? x->aalg->alg_name : "digest_null",
569 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
570 goto error;
573 aead = crypto_alloc_aead(authenc_name, 0, 0);
574 err = PTR_ERR(aead);
575 if (IS_ERR(aead))
576 goto error;
578 esp->aead = aead;
580 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
581 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
582 err = -ENOMEM;
583 key = kmalloc(keylen, GFP_KERNEL);
584 if (!key)
585 goto error;
587 p = key;
588 rta = (void *)p;
589 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
590 rta->rta_len = RTA_LENGTH(sizeof(*param));
591 param = RTA_DATA(rta);
592 p += RTA_SPACE(sizeof(*param));
594 if (x->aalg) {
595 struct xfrm_algo_desc *aalg_desc;
597 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
598 p += (x->aalg->alg_key_len + 7) / 8;
600 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
601 BUG_ON(!aalg_desc);
603 err = -EINVAL;
604 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
605 crypto_aead_authsize(aead)) {
606 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
607 x->aalg->alg_name,
608 crypto_aead_authsize(aead),
609 aalg_desc->uinfo.auth.icv_fullbits/8);
610 goto free_key;
613 err = crypto_aead_setauthsize(
614 aead, x->aalg->alg_trunc_len / 8);
615 if (err)
616 goto free_key;
619 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
620 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
622 err = crypto_aead_setkey(aead, key, keylen);
624 free_key:
625 kfree(key);
627 error:
628 return err;
631 static int esp_init_state(struct xfrm_state *x)
633 struct esp_data *esp;
634 struct crypto_aead *aead;
635 u32 align;
636 int err;
638 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
639 if (esp == NULL)
640 return -ENOMEM;
642 x->data = esp;
644 if (x->aead)
645 err = esp_init_aead(x);
646 else
647 err = esp_init_authenc(x);
649 if (err)
650 goto error;
652 aead = esp->aead;
654 esp->padlen = 0;
656 x->props.header_len = sizeof(struct ip_esp_hdr) +
657 crypto_aead_ivsize(aead);
658 if (x->props.mode == XFRM_MODE_TUNNEL)
659 x->props.header_len += sizeof(struct iphdr);
660 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
661 x->props.header_len += IPV4_BEET_PHMAXLEN;
662 if (x->encap) {
663 struct xfrm_encap_tmpl *encap = x->encap;
665 switch (encap->encap_type) {
666 default:
667 goto error;
668 case UDP_ENCAP_ESPINUDP:
669 x->props.header_len += sizeof(struct udphdr);
670 break;
671 case UDP_ENCAP_ESPINUDP_NON_IKE:
672 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
673 break;
677 align = ALIGN(crypto_aead_blocksize(aead), 4);
678 if (esp->padlen)
679 align = max_t(u32, align, esp->padlen);
680 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
682 error:
683 return err;
686 static const struct xfrm_type esp_type =
688 .description = "ESP4",
689 .owner = THIS_MODULE,
690 .proto = IPPROTO_ESP,
691 .flags = XFRM_TYPE_REPLAY_PROT,
692 .init_state = esp_init_state,
693 .destructor = esp_destroy,
694 .get_mtu = esp4_get_mtu,
695 .input = esp_input,
696 .output = esp_output
699 static const struct net_protocol esp4_protocol = {
700 .handler = xfrm4_rcv,
701 .err_handler = esp4_err,
702 .no_policy = 1,
703 .netns_ok = 1,
706 static int __init esp4_init(void)
708 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
709 printk(KERN_INFO "ip esp init: can't add xfrm type\n");
710 return -EAGAIN;
712 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
713 printk(KERN_INFO "ip esp init: can't add protocol\n");
714 xfrm_unregister_type(&esp_type, AF_INET);
715 return -EAGAIN;
717 return 0;
720 static void __exit esp4_fini(void)
722 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
723 printk(KERN_INFO "ip esp close: can't remove protocol\n");
724 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
725 printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
728 module_init(esp4_init);
729 module_exit(esp4_fini);
730 MODULE_LICENSE("GPL");
731 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);