[POWERPC] Handle alignment faults on SPE load/store instructions
[linux-2.6/mini2440.git] / include / asm-arm / checksum.h
blobeaa0efd8d0d47e39ce6b5087a9eb60d1bc61811b
1 /*
2 * linux/include/asm-arm/checksum.h
4 * IP checksum routines
6 * Copyright (C) Original authors of ../asm-i386/checksum.h
7 * Copyright (C) 1996-1999 Russell King
8 */
9 #ifndef __ASM_ARM_CHECKSUM_H
10 #define __ASM_ARM_CHECKSUM_H
12 #include <linux/in6.h>
15 * computes the checksum of a memory block at buff, length len,
16 * and adds in "sum" (32-bit)
18 * returns a 32-bit number suitable for feeding into itself
19 * or csum_tcpudp_magic
21 * this function must be called with even lengths, except
22 * for the last fragment, which may be odd
24 * it's best to have buff aligned on a 32-bit boundary
26 __wsum csum_partial(const void *buff, int len, __wsum sum);
29 * the same as csum_partial, but copies from src while it
30 * checksums, and handles user-space pointer exceptions correctly, when needed.
32 * here even more important to align src and dst on a 32-bit (or even
33 * better 64-bit) boundary
36 __wsum
37 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
39 __wsum
40 csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
43 * Fold a partial checksum without adding pseudo headers
45 static inline __sum16 csum_fold(__wsum sum)
47 __asm__(
48 "add %0, %1, %1, ror #16 @ csum_fold"
49 : "=r" (sum)
50 : "r" (sum)
51 : "cc");
52 return (__force __sum16)(~(__force u32)sum >> 16);
56 * This is a version of ip_compute_csum() optimized for IP headers,
57 * which always checksum on 4 octet boundaries.
59 static inline __sum16
60 ip_fast_csum(const void *iph, unsigned int ihl)
62 unsigned int tmp1;
63 __wsum sum;
65 __asm__ __volatile__(
66 "ldr %0, [%1], #4 @ ip_fast_csum \n\
67 ldr %3, [%1], #4 \n\
68 sub %2, %2, #5 \n\
69 adds %0, %0, %3 \n\
70 ldr %3, [%1], #4 \n\
71 adcs %0, %0, %3 \n\
72 ldr %3, [%1], #4 \n\
73 1: adcs %0, %0, %3 \n\
74 ldr %3, [%1], #4 \n\
75 tst %2, #15 @ do this carefully \n\
76 subne %2, %2, #1 @ without destroying \n\
77 bne 1b @ the carry flag \n\
78 adcs %0, %0, %3 \n\
79 adc %0, %0, #0"
80 : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
81 : "1" (iph), "2" (ihl)
82 : "cc", "memory");
83 return csum_fold(sum);
86 static inline __wsum
87 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
88 unsigned short proto, __wsum sum)
90 __asm__(
91 "adds %0, %1, %2 @ csum_tcpudp_nofold \n\
92 adcs %0, %0, %3 \n"
93 #ifdef __ARMEB__
94 "adcs %0, %0, %4 \n"
95 #else
96 "adcs %0, %0, %4, lsl #8 \n"
97 #endif
98 "adcs %0, %0, %5 \n\
99 adc %0, %0, #0"
100 : "=&r"(sum)
101 : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
102 : "cc");
103 return sum;
106 * computes the checksum of the TCP/UDP pseudo-header
107 * returns a 16-bit checksum, already complemented
109 static inline __sum16
110 csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
111 unsigned short proto, __wsum sum)
113 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
118 * this routine is used for miscellaneous IP-like checksums, mainly
119 * in icmp.c
121 static inline __sum16
122 ip_compute_csum(const void *buff, int len)
124 return csum_fold(csum_partial(buff, len, 0));
127 #define _HAVE_ARCH_IPV6_CSUM
128 extern __wsum
129 __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
130 __be32 proto, __wsum sum);
132 static inline __sum16
133 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len,
134 unsigned short proto, __wsum sum)
136 return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
137 htonl(proto), sum));
139 #endif