Initial import.
[ccrypt.git] / net / core / ccrypt.c
blob6a8abe0712606b491fc431e92af8b8dd34de388e
1 /*
2 * Ethernet Cheap Crypt (ccrypt).
3 * (C) 2006 Dawid Ciezarkiewicz <dpc@asn.pl>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
10 #include <linux/ccrypt.h>
12 #include <linux/if_arp.h>
13 #include <linux/if_pppox.h>
14 #include <linux/if_vlan.h>
15 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <linux/module.h>
19 #include <linux/skbuff.h>
20 #include <linux/ip.h>
21 #include <net/checksum.h>
23 /**
24 * IP ethertype will be changed to this,
25 * so that some bridges wouldn't try to be smarter
26 * then they should be.
28 #define ETHERTYPE_CCRYPTEDIP 0x0999
30 /**
31 * Allocate ccrypt_rx.
33 struct ccrypt_rx* ccrypt_rx_alloc(void) {
34 struct ccrypt_rx* new_cc = kmalloc(sizeof(struct ccrypt_rx), GFP_KERNEL);
35 memset(new_cc, 0, sizeof(struct ccrypt_rx));
36 return new_cc;
39 /**
40 * Allocate ccrypt_tx.
42 struct ccrypt_tx* ccrypt_tx_alloc(void) {
43 struct ccrypt_tx* new_cc = kmalloc(sizeof(struct ccrypt_tx), GFP_KERNEL);
44 memset(new_cc, 0, sizeof(struct ccrypt_tx));
45 return new_cc;
48 /**
49 * Free ccrypt_rx.
51 * Caller must hold ccrypt_rx_lock.
53 static void ccrypt_rx_free(struct ccrypt_rx* cc_rx)
55 unsigned int key_no;
56 unsigned int iv_no;
58 for (key_no = 0; key_no < 2; key_no++) {
59 if (cc_rx->tfms[key_no]) {
60 crypto_free_blkcipher(cc_rx->tfms[key_no]);
61 cc_rx->tfms[key_no] = 0;
64 for (iv_no = 0; iv_no < 2; iv_no++) {
65 if (cc_rx->last_recv_iv[key_no][iv_no]) {
66 kfree(cc_rx->last_recv_iv[key_no][iv_no]);
67 cc_rx->last_recv_iv[key_no][iv_no] = 0;
73 /**
74 * Free ccrypt_tx.
76 * Caller must hold ccrypt_tx_lock.
78 void ccrypt_tx_free(struct ccrypt_tx* cc_tx)
80 if (cc_tx->last_sent_iv) {
81 kfree(cc_tx->last_sent_iv);
82 cc_tx->last_sent_iv = 0;
85 if (cc_tx->tfm) {
86 crypto_free_blkcipher(cc_tx->tfm);
87 cc_tx->tfm = 0;
91 /**
92 * For key switching unification.
94 typedef int key_switch_f(struct net_device* dev, char* algorithm,
95 u8* key, unsigned int keylen);
97 /**
98 * Switch key in ccrypt_tx.
100 * Returns:
101 * 0 on success
103 * Caller must hold ccrypt_tx_lock.
105 static
106 int ccrypt_tx_switch_key(struct ccrypt_tx* cc_tx, char* algorithm,
107 u8* key, unsigned int keylen)
109 struct crypto_blkcipher* new_tfm;
110 u8* new_iv;
111 unsigned int new_iv_size;
112 int res;
114 new_tfm = crypto_alloc_blkcipher(algorithm, 0, 0);
116 if (!new_tfm) {
117 return -EINVAL;
120 res = crypto_blkcipher_setkey(new_tfm, key, keylen);
122 if (res) {
123 crypto_free_blkcipher(new_tfm);
124 return res;
127 new_iv_size = crypto_blkcipher_ivsize(new_tfm);
129 if (new_iv_size != crypto_blkcipher_blocksize(new_tfm)) {
130 printk(KERN_ERR "ccrypt: iv_len != bsize - strange\n");
131 crypto_free_blkcipher(new_tfm);
132 return -EINVAL;
135 /* allocate new iv_vectors for new key */
136 new_iv = kmalloc(new_iv_size, GFP_KERNEL);
138 if (!new_iv) {
139 kfree(new_iv);
140 printk(KERN_ERR "couldn't allocate %d bytes", new_iv_size);
142 crypto_free_blkcipher(new_tfm);
143 return -ENOMEM;
146 memset(new_iv, 0, new_iv_size);
147 if (cc_tx->last_sent_iv) {
148 kfree(cc_tx->last_sent_iv);
151 cc_tx->last_sent_iv = new_iv;
153 if (cc_tx->tfm)
154 crypto_free_blkcipher(cc_tx->tfm);
156 cc_tx->tfm = new_tfm;
158 return 0;
162 * Switch key in ccrypt_rx.
164 * Returns:
165 * 0 on success
167 * Caller must hold ccrypt_rx_lock.
169 static
170 int ccrypt_rx_switch_key(struct ccrypt_rx* cc_rx, char* algorithm,
171 u8* key, unsigned int keylen)
173 struct crypto_blkcipher* new_tfm;
174 u8* new_iv[2];
175 int res;
176 unsigned int new_iv_size;
177 unsigned int cur_iv_no;
179 new_tfm = crypto_alloc_blkcipher(algorithm, 0, 0);
181 if (!new_tfm) {
182 return -EINVAL;
185 res = crypto_blkcipher_setkey(new_tfm, key, keylen);
187 if (res) {
188 crypto_free_blkcipher(new_tfm);
189 return res;
192 new_iv_size = crypto_blkcipher_ivsize(new_tfm);
194 /* allocate new iv_vectors for new key */
195 new_iv[0] = kmalloc(new_iv_size, GFP_KERNEL);
196 new_iv[1] = kmalloc(new_iv_size, GFP_KERNEL);
198 if (!new_iv[0] || !new_iv[1]) {
199 if (new_iv[0]) {
200 kfree(new_iv[0]);
203 if (new_iv[1]) {
204 kfree(new_iv[1]);
207 crypto_free_blkcipher(new_tfm);
208 printk(KERN_ERR "ccrypt: kmalloc(%d) failed.\n",
209 new_iv_size);
210 return -ENOMEM;
213 /* zero new ivs and free old ones, then replace them */
214 for (cur_iv_no = 0; cur_iv_no < 2; ++cur_iv_no) {
215 memset(new_iv[cur_iv_no], '\0', new_iv_size);
217 if (cc_rx->last_recv_iv[1][cur_iv_no]) {
218 kfree(cc_rx->last_recv_iv[1][cur_iv_no]);
221 cc_rx->last_recv_iv[1][cur_iv_no] =
222 cc_rx->last_recv_iv[0][cur_iv_no];
224 cc_rx->last_recv_iv[0][cur_iv_no] = new_iv[cur_iv_no];
227 if (cc_rx->tfms[1]) {
228 crypto_free_blkcipher(cc_rx->tfms[1]);
231 cc_rx->tfms[1] = cc_rx->tfms[0];
232 cc_rx->tfms[0] = new_tfm;
234 cc_rx->last_recv_iv_matched[1] =
235 cc_rx->last_recv_iv_matched[0];
236 cc_rx->last_recv_iv_matched[0] = 1;
238 cc_rx->after_switch = 1;
240 return 0;
244 * Reset rx key. Stop using rx encryption.
246 void ccrypt_rx_reset(struct net_device* dev)
248 spin_lock(&dev->ccrypt_rx_lock);
249 if (dev->ccrypt_rx) {
250 ccrypt_rx_free(dev->ccrypt_rx);
251 dev->ccrypt_rx = 0;
253 spin_unlock(&dev->ccrypt_rx_lock);
257 * Reset tx key. Stop using tx encryption.
259 void ccrypt_tx_reset(struct net_device* dev)
261 spin_lock(&dev->ccrypt_tx_lock);
262 if (dev->ccrypt_tx) {
263 ccrypt_tx_free(dev->ccrypt_tx);
264 dev->ccrypt_tx = 0;
266 spin_unlock(&dev->ccrypt_tx_lock);
270 * Called from user context.
272 static
273 int rx_switch(struct net_device* dev, char* algorithm,
274 u8* key, unsigned int keylen)
276 int res;
278 if (strcmp(algorithm, "null") == 0) {
279 ccrypt_rx_reset(dev);
280 return 0;
283 spin_lock(&dev->ccrypt_rx_lock);
284 if (!dev->ccrypt_rx) {
285 dev->ccrypt_rx = ccrypt_rx_alloc();
286 if (!dev->ccrypt_rx) {
287 spin_unlock(&dev->ccrypt_rx_lock);
288 return -ENOMEM;
291 res = ccrypt_rx_switch_key(dev->ccrypt_rx, algorithm, key, keylen);
292 spin_unlock(&dev->ccrypt_rx_lock);
294 return res;
298 * Called from user context.
300 static
301 int tx_switch(struct net_device* dev, char* algorithm,
302 u8* key, unsigned int keylen)
304 int res;
306 if (strcmp(algorithm, "null") == 0) {
307 ccrypt_tx_reset(dev);
308 return 0;
311 spin_lock(&dev->ccrypt_tx_lock);
312 if (!dev->ccrypt_tx) {
313 dev->ccrypt_tx = ccrypt_tx_alloc();
314 if (!dev->ccrypt_tx) {
315 spin_unlock(&dev->ccrypt_tx_lock);
316 return -ENOMEM;
319 res = ccrypt_tx_switch_key(dev->ccrypt_tx, algorithm, key, keylen);
320 spin_unlock(&dev->ccrypt_tx_lock);
322 return res;
326 * Handle key writes - both rx and tx.
328 * Check permissions, copy data from user, parse it, call appropriate
329 * switch handler.
331 * Returns 0 on success.
333 static
334 int ccrypt_key_store_handle(struct net_device* dev,
335 const char __user *user_buffer,
336 unsigned long count,
337 key_switch_f switch_handler)
339 const unsigned int max_alg_len = CRYPTO_MAX_ALG_NAME;
341 /* key length in bytes */
342 const unsigned int max_key_len = 64;
344 /* key length as string */
345 const unsigned int max_key_string_len = max_key_len * 2;
347 /* alg + ':' + keystr + '\0' */
348 const unsigned int max_buffer_len =
349 max_alg_len + 1 + max_key_string_len + 1;
351 unsigned int a, b;
352 unsigned int i, j;
353 unsigned int key_len;
354 u8 alg_string_ok;
355 int res;
357 char buffer[max_buffer_len];
358 char alg_string[max_alg_len];
359 u8 key[max_key_len];
361 if (!capable(CAP_NET_ADMIN))
362 return -EACCES;
364 if (count > max_buffer_len - 1) {
365 return -EINVAL;
368 memcpy(buffer, user_buffer, count);
369 buffer[count] = '\0';
371 alg_string_ok = 0;
372 for (i = 0; i < max_alg_len && i <= count; ++i) {
373 if (buffer[i] == ':' || buffer[i] == '\0') {
374 alg_string[i] = '\0';
375 alg_string_ok = 1;
376 if (buffer[i] == ':')
377 i++;
378 break;
380 alg_string[i] = buffer[i];
383 if (!alg_string_ok) {
384 return -EINVAL;
387 j = i;
388 key_len = 0;
389 for (i = 0; i < max_key_len; i++, key_len++, j+= 2) {
390 if (buffer[j] == 0) {
391 break;
394 if (buffer[j] >= '0' && buffer[j] <= '9') {
395 a = buffer[j] - '0';
397 else if (buffer[j] >= 'a' && buffer[j] <= 'f') {
398 a = buffer[j] - 'a' + 10;
399 } else {
400 return -EINVAL;
403 if (buffer[j + 1] >= '0' && buffer[j + 1] <= '9') {
404 b = buffer[j + 1] - '0';
406 else if (buffer[j + 1] >= 'a' && buffer[j + 1] <= 'f') {
407 b = buffer[j + 1] - 'a' + 10;
408 } else {
409 return -EINVAL;
412 key[i] = b * 16 + a;
415 res = switch_handler(dev, alg_string, key, key_len);
417 /* errors */
418 if (res < 0) {
419 return res;
422 /* ok */
423 if (res == 0) {
424 return count;
427 printk(KERN_ERR "Error: ccrypt error - should not be here\n");
428 return -EINVAL;
431 ssize_t ccrypt_rx_store(struct device *dev, struct device_attribute *attr,
432 const char *buf, size_t len)
434 return ccrypt_key_store_handle(to_net_dev(dev), buf, len, rx_switch);
437 ssize_t ccrypt_tx_store(struct device *dev, struct device_attribute *attr,
438 const char *buf, size_t len)
440 return ccrypt_key_store_handle(to_net_dev(dev), buf, len, tx_switch);
443 ssize_t ccrypt_tx_show(struct device *dev, struct device_attribute *attr,
444 char *buf)
446 return -EINVAL; /* not implemented yet */
449 ssize_t ccrypt_rx_show(struct device *dev, struct device_attribute *attr,
450 char *buf)
452 return -EINVAL; /* not implemented yet */
456 * Check if buffer has right ipv4 structures.
458 static
459 inline int is_valid_ipv4(struct iphdr* hdr, int len)
461 u16 tmp_check;
463 if (len < sizeof(struct iphdr)) {
464 return 0;
467 if (hdr->ihl < 5 || hdr->ihl > 15) {
468 return 0;
471 if (len < sizeof(struct iphdr) + hdr->ihl * 4) {
472 return 0;
475 tmp_check = hdr->check;
476 hdr->check = 0; /* required by ip_fast_csum */
478 if (tmp_check != ip_fast_csum((unsigned char *)hdr, hdr->ihl)) {
479 return 0;
482 hdr->check = tmp_check;
484 return 1;
488 * IP validation.
490 static
491 inline int is_valid_ip(struct iphdr* hdr, int len)
493 if (len < sizeof(struct iphdr)) {
494 return 0;
497 if (hdr->version == 4) {
498 return is_valid_ipv4(hdr, len);
501 return 0;
505 * ARP validation.
507 static inline int is_valid_arp(struct arphdr* hdr, int len)
509 if (len < 4) {
510 return 0;
513 switch (hdr->ar_hrd) {
514 /* supported hardware layers */
515 case __constant_htons(ARPHRD_ETHER):
516 break;
517 default:
518 return 0;
521 switch (hdr->ar_pro) {
522 /* supported protocols */
523 case __constant_htons(ETH_P_IP): /* ipv4 */
524 break;
525 default:
526 return 0;
529 /* hardware address length
530 * as we support only Ethernet ... */
531 if (hdr->ar_hln != 6) {
532 return 0;
535 return 1;
539 * PPPoE validation.
541 int is_valid_pppoe(u16 ethertype, struct pppoe_hdr* hdr, int len)
543 if (len < sizeof(struct pppoe_hdr)) {
544 return 0;
547 if (hdr->type != 1) {
548 return 0;
551 if (hdr->ver != 1) {
552 return 0;
555 switch (hdr->code) {
556 case PADI_CODE:
557 case PADO_CODE:
558 case PADR_CODE:
559 case PADS_CODE:
560 case PADT_CODE:
561 if (ethertype != ETH_P_PPP_DISC) {
562 return 0;
564 break;
565 case 0:
566 if (ethertype != ETH_P_PPP_SES) {
567 return 0;
569 break;
570 default:
571 return 0;
573 return 1;
577 * Check if decoded buffer is right in needed places.
579 * Ethertype should be after htons().
581 static
582 int is_decoded_buffer_valid(u16 ethertype, u8* buffer, int len)
584 /* TODO: add more protocols */
585 /* XXX: keep documentation in sync */
586 switch (ethertype) {
587 case ETH_P_IP:
588 /* IP */
589 if (!is_valid_ip((struct iphdr*)buffer, len)) {
590 return 0;
592 break;
593 case ETH_P_ARP:
594 /* arp */
595 if (!is_valid_arp((struct arphdr*)buffer, len)) {
596 return 0;
598 break;
599 case ETH_P_PPP_DISC:
600 case ETH_P_PPP_SES:
601 /* pppoe */
602 if (!is_valid_pppoe(ethertype, (struct pppoe_hdr*)buffer, len)) {
603 return 0;
605 break;
606 default:
607 return 0;
609 return 1;
613 * Save received iv vector in appropriate place.
615 static
616 inline void save_recv_iv(struct ccrypt_rx* cc_rx,
617 unsigned int key_no, unsigned int iv_no,
618 u8* src_buffer, unsigned int len, unsigned int iv_len)
620 if (likely(len >= iv_len)) {
621 memcpy(cc_rx->last_recv_iv[key_no][iv_no],
622 src_buffer, iv_len);
624 else {
625 memset(cc_rx->last_recv_iv[key_no][iv_no] + len,
626 '\0', iv_len - len);
627 memcpy(cc_rx->last_recv_iv[key_no][iv_no],
628 src_buffer, len);
633 * Try to decode incoming packet using skb->dev->ccrypt_rx group.
635 * Returns 0 on success.
636 * -EINVAL on standard "drop it".
638 * Caller must hold ccrypt_rx_lock.
640 int ccrypt_decrypt(struct sk_buff **pskb)
642 struct ccrypt_rx* cc_rx;
643 struct crypto_blkcipher* tfm = 0;
644 struct blkcipher_desc desc;
645 struct sk_buff* skb = 0;
646 int res;
647 u16 len;
648 unsigned int aligned_len, unaligned_len;
649 unsigned int bsize;
650 struct scatterlist sg_out;
651 struct scatterlist sg_residual;
652 struct scatterlist sg;
653 unsigned int iv_len;
654 int i;
655 u8* iv;
656 u8 key_no_org;
657 u8 key_no, iv_no;
658 u8* decode_buffer;
659 u16 ethertype;
660 u8* data;
662 skb = *pskb;
663 cc_rx = skb->dev->ccrypt_rx;
664 cc_rx->valid_counter++;
665 len = skb->len;
667 if (len < ETH_ZLEN - sizeof(struct ethhdr) - VLAN_HLEN) {
668 /* if shorter - it couldn't have been sent by ccrypt_encode */
669 return -EINVAL;
671 data = skb->data;
673 ethertype = htons(*((u16*)(skb->data - 2)));
675 if (ethertype == ETHERTYPE_CCRYPTEDIP) {
676 ethertype = ETH_P_IP;
677 *((u16*)(data - 2)) = __constant_htons(ETH_P_IP);
678 skb->protocol = __constant_htons(ETH_P_IP);
679 } else if (ethertype == ETH_P_8021Q) {
680 len -= VLAN_HLEN;
681 data += VLAN_HLEN;
682 ethertype = htons(*((u16*)(data - 2)));
686 * original stays in data, all tries will
687 * be validated in decode_buffer
689 decode_buffer = kmalloc(sizeof(u8) * len, GFP_ATOMIC);
691 if (!decode_buffer) {
692 if (net_ratelimit())
693 printk(KERN_ERR "ccrypt_decrypt: kmalloc failed.\n");
694 return -ENOMEM;
697 sg_set_buf(&sg_out, decode_buffer, len);
700 * be warned: fancy logic ahead
702 for (key_no_org = 0; key_no_org < 2; ++key_no_org) {
704 /* if we are right after key switch, use key 2 first
705 * until you get first msg encoded with new key */
706 if (cc_rx->after_switch) {
707 key_no = 1 - key_no_org;
709 else {
710 key_no = key_no_org;
713 if (!cc_rx->after_switch && key_no == 1) {
714 /* if sender used new key once - it should
715 * not use old key anymore */
716 continue;
719 tfm = cc_rx->tfms[key_no];
720 if (!tfm) {
721 continue;
723 memset(&desc, 0, sizeof(desc));
724 desc.tfm = tfm;
725 desc.flags = 0;
727 bsize = crypto_blkcipher_blocksize(tfm);
728 unaligned_len = len % bsize;
729 aligned_len = len - unaligned_len;
730 iv_len = crypto_blkcipher_ivsize(tfm);
732 for (iv_no = 0; iv_no < 2; ++iv_no) {
733 if (cc_rx->last_recv_iv_matched[key_no] && iv_no == 1) {
734 /* skip if there is no point trying
735 * because there is no iv from "wrong packet"
736 * to try */
737 continue;
740 iv = cc_rx->last_recv_iv[key_no][iv_no];
742 if (!iv) {
743 continue;
746 sg_set_buf(&sg, data, aligned_len);
747 crypto_blkcipher_set_iv(tfm, iv, iv_len);
748 res = crypto_blkcipher_decrypt(&desc, &sg_out, &sg, aligned_len);
750 if (res) {
751 printk(KERN_ERR "cipher_decrypt_iv() failed flags=%x\n",
752 tfm->base.crt_flags);
753 return res;
756 if (unaligned_len) {
757 u8 residual_block[bsize];
758 sg_set_buf(&sg_residual, residual_block, bsize);
760 if (unlikely(aligned_len < bsize * 2)) {
761 sg_set_buf(&sg, iv, bsize);
763 else {
764 sg_set_buf(&sg, data, bsize);
767 res = crypto_blkcipher_encrypt(&desc,
768 &sg_residual, &sg, bsize);
770 if (res) {
771 printk(KERN_ERR "cipher_encrypt_iv() failed flags=%x\n",
772 tfm->base.crt_flags);
773 return res;
776 for (i = 0; i < unaligned_len; ++i) {
777 decode_buffer[aligned_len + i] =
778 residual_block[i] ^ data[aligned_len + i];
782 /* it's a kind of magic ... magic ... magic ... */
783 if (is_decoded_buffer_valid(ethertype, decode_buffer, len)) {
784 if (key_no == 0) {
785 cc_rx->after_switch = 0;
788 cc_rx->last_recv_iv_matched[key_no] = 1;
789 save_recv_iv(cc_rx, key_no, 0, data, len, iv_len);
790 goto finish_match;
795 /* there was no match for both ivs for key - save "wrong iv" */
796 cc_rx->last_recv_iv_matched[key_no] = 0;
797 save_recv_iv(cc_rx, key_no, 1, data, len, iv_len);
800 /* finish_no_match: */
801 kfree(decode_buffer);
803 if (cc_rx->valid_counter < 1000) {
804 if (++cc_rx->invalid_counter > 10) {
805 if (net_ratelimit()) {
806 printk(KERN_INFO "ccrypt_rx on %s detected frequent "
807 "invalid packets\n",
808 skb->dev->name);
810 cc_rx->invalid_counter = 0;
813 cc_rx->valid_counter = 0;
814 return -EINVAL;
816 finish_match:
817 memcpy(data, decode_buffer, len);
818 kfree(decode_buffer);
819 return 0;
823 * Encode sk_buff.
825 * Returns 0 on success.
827 * Caller must hold ccrypt_tx_lock.
829 * Assumptions:
830 * (*pskb)->data points at the start of frame,
831 * (where mac.raw should point)
832 * (*pskb)->len is overall packet len
833 * *pskb is linearized
835 int ccrypt_encrypt(struct sk_buff **pskb)
837 struct crypto_blkcipher* tfm = 0;
838 struct blkcipher_desc desc;
839 struct sk_buff* skb = 0;
840 struct sk_buff* nskb = 0;
841 int res;
842 unsigned int len;
843 unsigned int aligned_len, unaligned_len;
844 unsigned int bsize;
845 struct scatterlist sg;
846 struct scatterlist sg_residual;
847 unsigned int iv_len;
848 unsigned int i;
849 unsigned int expand;
850 u8* iv;
851 u8* data;
852 unsigned int old_len;
853 struct ccrypt_tx* cc_tx = 0;
855 skb = *pskb;
857 cc_tx = skb->dev->ccrypt_tx;
859 tfm = cc_tx->tfm;
861 memset(&desc, 0, sizeof(desc));
862 desc.tfm = tfm;
863 desc.flags = 0;
865 if (!tfm) {
866 return -EINVAL;
870 * we can't let packet be expanded in the future
871 * do it now so the Ethernet device wouldn't have to
873 if (skb->len < ETH_ZLEN) {
874 if (skb_shared(skb)) {
875 nskb = skb_clone(skb, GFP_ATOMIC);
876 if (!skb) {
877 if (net_ratelimit()) {
878 printk(KERN_ERR "ccrypt_tx: "
879 "couldn't unshare tiny packet\n");
881 return -ENOMEM;
883 skb = nskb;
884 *pskb = nskb;
886 old_len = skb->len;
887 expand = ETH_ZLEN - old_len;
888 if (skb_tailroom(skb) < expand) {
889 res = pskb_expand_head(skb, 0, expand, GFP_ATOMIC);
890 if (res) {
891 if (net_ratelimit()) {
892 printk(KERN_ERR "ccrypt_tx: "
893 "couldn't expand tiny packet\n");
895 return res;
898 skb_put(skb, expand);
899 memset(skb->data + old_len, 0, expand);
902 data = skb->data + sizeof(struct ethhdr);
903 len = skb->len - sizeof(struct ethhdr);
905 switch (((struct ethhdr*)(skb->data))->h_proto) {
906 case __constant_htons(ETH_P_8021Q):
907 data += VLAN_HLEN;
908 len -= VLAN_HLEN;
909 break;
910 case __constant_htons(ETH_P_IP):
911 ((struct ethhdr*)(skb->data))->h_proto
912 = __constant_htons(ETHERTYPE_CCRYPTEDIP);
915 bsize = crypto_blkcipher_blocksize(tfm);
916 unaligned_len = len % bsize;
917 aligned_len = len - unaligned_len;
918 iv_len = crypto_blkcipher_ivsize(tfm);
919 sg_set_buf(&sg, data, aligned_len);
920 iv = cc_tx->last_sent_iv;
922 crypto_blkcipher_set_iv(tfm, iv, iv_len);
924 res = crypto_blkcipher_encrypt(&desc, &sg, &sg, aligned_len);
926 if (res) {
927 printk(KERN_ERR "cipher_encrypt_iv() failed flags=%x\n",
928 tfm->base.crt_flags);
929 return res;
932 /* do residual block termination */
933 if (unaligned_len) {
934 u8 residual_block[bsize];
935 sg_set_buf(&sg_residual, residual_block, bsize);
937 if (unlikely(aligned_len < bsize * 2)) {
938 sg_set_buf(&sg, iv, bsize);
940 else {
941 sg_set_buf(&sg, data, bsize);
944 res = crypto_blkcipher_encrypt(&desc, &sg_residual, &sg, bsize);
946 if (res) {
947 printk(KERN_ERR "cipher_encrypt_iv() failed flags=%x\n",
948 tfm->base.crt_flags);
949 return res;
952 for (i = 0; i < unaligned_len; ++i) {
953 data[aligned_len + i] ^= residual_block[i];
957 if (likely(len >= iv_len)) {
958 memcpy(iv, data, iv_len);
960 else {
961 memset(iv + len, 0, iv_len - len);
962 memcpy(iv, data, len);
965 return 0;
968 EXPORT_SYMBOL(ccrypt_tx_store);
969 EXPORT_SYMBOL(ccrypt_rx_store);
970 EXPORT_SYMBOL(ccrypt_rx_reset);
971 EXPORT_SYMBOL(ccrypt_tx_reset);
972 EXPORT_SYMBOL(ccrypt_tx_show);
973 EXPORT_SYMBOL(ccrypt_rx_show);
974 EXPORT_SYMBOL(ccrypt_decrypt);
975 EXPORT_SYMBOL(ccrypt_encrypt);