2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/netinet/in_cksum.c,v 1.9 2005/01/06 09:14:13 hsu Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <sys/in_cksum.h>
42 #include <netinet/in.h>
43 #include <netinet/in_systm.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
47 #include <machine/endian.h>
50 * Return the 16 bit 1's complement checksum in network byte order. Devolve
51 * the mbuf into 32 bit aligned segments that we can pass to assembly and
52 * do the rest manually. Even though we return a 16 bit unsigned value,
53 * we declare it as a 32 bit unsigned value to reduce unnecessary assembly
56 * Byte ordering issues. Note two things. First, no secondary carry occurs,
57 * and second, a one's complement checksum is endian-independant. If we are
58 * given a data buffer in network byte order, our checksum will be in network
61 * 0xffff + 0xffff = 0xfffe + C = 0xffff (so no second carry occurs).
63 * 0x8142 + 0x8243 = 0x0385 + C = 0x0386 (checksum is in same byte order
64 * 0x4281 + 0x4382 = 0x8603 as the data regardless of arch)
66 * This works with 16, 32, 64, etc... bits as long as we deal with the
67 * carry when collapsing it back down to 16 bits.
71 in_cksum_range(struct mbuf
*m
, int nxt
, int offset
, int bytes
)
88 if (offset
< sizeof(struct ipovly
))
89 panic("in_cksum_range: offset too short");
90 if (m
->m_len
< sizeof(struct ip
))
91 panic("in_cksum_range: bad mbuf chain");
92 bzero(&ipov
, sizeof ipov
);
93 ipov
.ih_len
= htons(bytes
);
95 ipov
.ih_src
= mtod(m
, struct ip
*)->ip_src
;
96 ipov
.ih_dst
= mtod(m
, struct ip
*)->ip_dst
;
97 ptr
= (uint8_t *)&ipov
;
99 sum32
= asm_ones32(ptr
, sizeof(ipov
) / 4);
100 sum32
= (sum32
>> 16) + (sum32
& 0xffff);
108 * Skip fully engulfed mbufs. Branch predict optimal.
110 while (m
&& offset
>= m
->m_len
) {
116 * Process the checksum for each segment. Note that the code below is
117 * branch-predict optimal, so it's faster then you might otherwise
118 * believe. When we are buffer-aligned but also odd-byte-aligned from
119 * the point of view of the IP packet, we accumulate to sum1 instead of
122 * Initial offsets do not pre-set flip (assert that offset is even?)
124 while (bytes
> 0 && m
) {
126 * Calculate pointer base and number of bytes to snarf, account
129 ptr
= mtod(m
, __uint8_t
*) + offset
;
130 if ((n
= m
->m_len
- offset
) > bytes
)
135 * First 16-bit-align our buffer by eating a byte if necessary,
136 * then 32-bit-align our buffer by eating a word if necessary.
138 * We are endian-sensitive when chomping a byte. WARNING! Be
139 * careful optimizing this! 16 ane 32 bit words must be aligned
140 * for this to be generic code.
142 if (((intptr_t)ptr
& 1) && n
) {
143 #if BYTE_ORDER == LITTLE_ENDIAN
158 if (((intptr_t)ptr
& 2) && n
> 1) {
160 sum1
+= *(__uint16_t
*)ptr
;
162 sum0
+= *(__uint16_t
*)ptr
;
168 * Process a 32-bit aligned data buffer and accumulate the result
169 * in sum0 or sum1. Allow only one 16 bit overflow carry.
174 sum32
= asm_ones32((void *)ptr
, n
>> 2);
175 sum32
= (sum32
>> 16) + (sum32
& 0xffff);
181 /* n &= 3; dontcare */
185 * Handle oddly-sized buffers. Handle word issues first while
186 * ptr is still aligned.
190 sum1
+= *(__uint16_t
*)ptr
;
192 sum0
+= *(__uint16_t
*)ptr
;
194 /* n -= 2; dontcare */
197 #if BYTE_ORDER == LITTLE_ENDIAN
208 /* ++ptr; dontcare */
217 * Due to byte aligned or oddly-sized buffers we may have a checksum
218 * in sum1 which needs to be shifted and added to our main sum. There
219 * is a presumption here that no more then 255 overflows occured which
220 * is 255/3 byte aligned mbufs in the worst case.
223 sum0
= (sum0
>> 16) + (sum0
& 0xffff);
226 return(~sum0
& 0xffff);