cross-tools: Do not install manpages during this stage.
[dragonfly.git] / sys / netinet / in_cksum.c
blob86d1f0f65b3ca928c19bd58d3df4f4e54abc5dab
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/in_cksum.h>
40 #include <netinet/in.h>
41 #include <netinet/in_systm.h>
42 #include <netinet/ip.h>
43 #include <netinet/ip_var.h>
45 #include <machine/endian.h>
48 * Return the 16 bit 1's complement checksum in network byte order. Devolve
49 * the mbuf into 32 bit aligned segments that we can pass to assembly and
50 * do the rest manually. Even though we return a 16 bit unsigned value,
51 * we declare it as a 32 bit unsigned value to reduce unnecessary assembly
52 * conversions.
54 * Byte ordering issues. Note two things. First, no secondary carry occurs,
55 * and second, a one's complement checksum is endian-independant. If we are
56 * given a data buffer in network byte order, our checksum will be in network
57 * byte order.
59 * 0xffff + 0xffff = 0xfffe + C = 0xffff (so no second carry occurs).
61 * 0x8142 + 0x8243 = 0x0385 + C = 0x0386 (checksum is in same byte order
62 * 0x4281 + 0x4382 = 0x8603 as the data regardless of arch)
64 * This works with 16, 32, 64, etc... bits as long as we deal with the
65 * carry when collapsing it back down to 16 bits.
68 uint32_t
69 in_cksum_range(const struct mbuf *m, int nxt, int offset, int bytes)
71 const uint8_t *ptr;
72 uint32_t sum0;
73 uint32_t sum1;
74 int n;
75 int flip;
77 sum0 = 0;
78 sum1 = 0;
79 flip = 0;
81 if (nxt != 0) {
82 uint32_t sum32;
83 struct ipovly ipov;
85 /* pseudo header */
86 if (offset < sizeof(struct ipovly))
87 panic("in_cksum_range: offset too short");
88 if (m->m_len < sizeof(struct ip))
89 panic("in_cksum_range: bad mbuf chain");
90 bzero(&ipov, sizeof ipov);
91 ipov.ih_len = htons(bytes);
92 ipov.ih_pr = nxt;
93 ipov.ih_src = mtod(m, const struct ip *)->ip_src;
94 ipov.ih_dst = mtod(m, const struct ip *)->ip_dst;
95 ptr = (const uint8_t *)&ipov;
97 sum32 = asm_ones32(ptr, sizeof(ipov) / 4);
98 sum32 = (sum32 >> 16) + (sum32 & 0xffff);
99 if (flip)
100 sum1 += sum32;
101 else
102 sum0 += sum32;
106 * Skip fully engulfed mbufs. Branch predict optimal.
108 while (m && offset >= m->m_len) {
109 offset -= m->m_len;
110 m = m->m_next;
114 * Process the checksum for each segment. Note that the code below is
115 * branch-predict optimal, so it's faster then you might otherwise
116 * believe. When we are buffer-aligned but also odd-byte-aligned from
117 * the point of view of the IP packet, we accumulate to sum1 instead of
118 * sum0.
120 * Initial offsets do not pre-set flip (assert that offset is even?)
122 while (bytes > 0 && m) {
124 * Calculate pointer base and number of bytes to snarf, account
125 * for snarfed bytes.
127 ptr = mtod(m, const uint8_t *) + offset;
128 if ((n = m->m_len - offset) > bytes)
129 n = bytes;
130 bytes -= n;
133 * First 16-bit-align our buffer by eating a byte if necessary,
134 * then 32-bit-align our buffer by eating a word if necessary.
136 * We are endian-sensitive when chomping a byte. WARNING! Be
137 * careful optimizing this! 16 ane 32 bit words must be aligned
138 * for this to be generic code.
140 if (((intptr_t)ptr & 1) && n) {
141 #if BYTE_ORDER == LITTLE_ENDIAN
142 if (flip)
143 sum1 += ptr[0];
144 else
145 sum0 += ptr[0];
146 #else
147 if (flip)
148 sum0 += ptr[0];
149 else
150 sum1 += ptr[0];
151 #endif
152 ++ptr;
153 --n;
154 flip = 1 - flip;
156 if (((intptr_t)ptr & 2) && n > 1) {
157 if (flip)
158 sum1 += *(const uint16_t *)ptr;
159 else
160 sum0 += *(const uint16_t *)ptr;
161 ptr += 2;
162 n -= 2;
166 * Process a 32-bit aligned data buffer and accumulate the result
167 * in sum0 or sum1. Allow only one 16 bit overflow carry.
169 if (n >= 4) {
170 uint32_t sum32;
172 sum32 = asm_ones32((const void *)ptr, n >> 2);
173 sum32 = (sum32 >> 16) + (sum32 & 0xffff);
174 if (flip)
175 sum1 += sum32;
176 else
177 sum0 += sum32;
178 ptr += n & ~3;
179 /* n &= 3; dontcare */
183 * Handle oddly-sized buffers. Handle word issues first while
184 * ptr is still aligned.
186 if (n & 2) {
187 if (flip)
188 sum1 += *(const uint16_t *)ptr;
189 else
190 sum0 += *(const uint16_t *)ptr;
191 ptr += 2;
192 /* n -= 2; dontcare */
194 if (n & 1) {
195 #if BYTE_ORDER == LITTLE_ENDIAN
196 if (flip)
197 sum1 += ptr[0];
198 else
199 sum0 += ptr[0];
200 #else
201 if (flip)
202 sum0 += ptr[0];
203 else
204 sum1 += ptr[0];
205 #endif
206 /* ++ptr; dontcare */
207 /* --n; dontcare */
208 flip = 1 - flip;
210 m = m->m_next;
211 offset = 0;
215 * Due to byte aligned or oddly-sized buffers we may have a checksum
216 * in sum1 which needs to be shifted and added to our main sum. There
217 * is a presumption here that no more then 255 overflows occured which
218 * is 255/3 byte aligned mbufs in the worst case.
220 sum0 += sum1 << 8;
221 sum0 = (sum0 >> 16) + (sum0 & 0xffff);
222 if (sum0 > 0xffff)
223 ++sum0;
224 return(~sum0 & 0xffff);