[XFRM]: beet: fix beet mode decapsulation
[linux-2.6.22.y-op.git] / include / asm-arm26 / checksum.h
blobf2b4b0a403bdeef8c2b574b03133ca78881003f1
1 /*
2 * linux/include/asm-arm/checksum.h
4 * IP checksum routines
6 * Copyright (C) Original authors of ../asm-i386/checksum.h
7 * Copyright (C) 1996-1999 Russell King
8 */
9 #ifndef __ASM_ARM_CHECKSUM_H
10 #define __ASM_ARM_CHECKSUM_H
12 #include <linux/in6.h>
15 * computes the checksum of a memory block at buff, length len,
16 * and adds in "sum" (32-bit)
18 * returns a 32-bit number suitable for feeding into itself
19 * or csum_tcpudp_magic
21 * this function must be called with even lengths, except
22 * for the last fragment, which may be odd
24 * it's best to have buff aligned on a 32-bit boundary
26 __wsum csum_partial(const void *buff, int len, __wsum sum);
29 * the same as csum_partial, but copies from src while it
30 * checksums, and handles user-space pointer exceptions correctly, when needed.
32 * here even more important to align src and dst on a 32-bit (or even
33 * better 64-bit) boundary
36 __wsum
37 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
39 __wsum
40 csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
43 * This is a version of ip_compute_csum() optimized for IP headers,
44 * which always checksum on 4 octet boundaries.
46 static inline __sum16
47 ip_fast_csum(const void *iph, unsigned int ihl)
49 unsigned int sum, tmp1;
51 __asm__ __volatile__(
52 "ldr %0, [%1], #4 @ ip_fast_csum \n\
53 ldr %3, [%1], #4 \n\
54 sub %2, %2, #5 \n\
55 adds %0, %0, %3 \n\
56 ldr %3, [%1], #4 \n\
57 adcs %0, %0, %3 \n\
58 ldr %3, [%1], #4 \n\
59 1: adcs %0, %0, %3 \n\
60 ldr %3, [%1], #4 \n\
61 tst %2, #15 @ do this carefully \n\
62 subne %2, %2, #1 @ without destroying \n\
63 bne 1b @ the carry flag \n\
64 adcs %0, %0, %3 \n\
65 adc %0, %0, #0 \n\
66 adds %0, %0, %0, lsl #16 \n\
67 addcs %0, %0, #0x10000 \n\
68 mvn %0, %0 \n\
69 mov %0, %0, lsr #16"
70 : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
71 : "1" (iph), "2" (ihl)
72 : "cc");
73 return (__force __sum16)sum;
77 * Fold a partial checksum without adding pseudo headers
79 static inline __sum16 csum_fold(__wsum sum)
81 __asm__(
82 "adds %0, %1, %1, lsl #16 @ csum_fold \n\
83 addcs %0, %0, #0x10000"
84 : "=r" (sum)
85 : "r" (sum)
86 : "cc");
87 return (__force __sum16)(~(__force u32)sum >> 16);
90 static inline __wsum
91 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
92 unsigned short proto, __wsum sum)
94 __asm__(
95 "adds %0, %1, %2 @ csum_tcpudp_nofold \n\
96 adcs %0, %0, %3 \n\
97 adcs %0, %0, %4 \n\
98 adcs %0, %0, %5 \n\
99 adc %0, %0, #0"
100 : "=&r"(sum)
101 : "r" (sum), "r" (daddr), "r" (saddr), "r" (htons(len)), "Ir" (htons(proto))
102 : "cc");
103 return sum;
106 * computes the checksum of the TCP/UDP pseudo-header
107 * returns a 16-bit checksum, already complemented
109 static inline __sum16
110 csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
111 unsigned short proto, __wsum sum)
113 __asm__(
114 "adds %0, %1, %2 @ csum_tcpudp_magic \n\
115 adcs %0, %0, %3 \n\
116 adcs %0, %0, %4 \n\
117 adcs %0, %0, %5 \n\
118 adc %0, %0, #0 \n\
119 adds %0, %0, %0, lsl #16 \n\
120 addcs %0, %0, #0x10000 \n\
121 mvn %0, %0"
122 : "=&r"(sum)
123 : "r" (sum), "r" (daddr), "r" (saddr), "r" (htons(len)), "Ir" (htons(proto))
124 : "cc");
125 return (__force __sum16)((__force u32)sum >> 16);
130 * this routine is used for miscellaneous IP-like checksums, mainly
131 * in icmp.c
133 static inline __sum16
134 ip_compute_csum(const void *buff, int len)
136 return csum_fold(csum_partial(buff, len, 0));
139 #define _HAVE_ARCH_IPV6_CSUM
140 extern __wsum
141 __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
142 __be32 proto, __wsum sum);
144 static inline __sum16
145 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len,
146 unsigned short proto, __wsum sum)
148 return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
149 htonl(proto), sum));
151 #endif