monitor: do not use mb_read/mb_set
[qemu/kevin.git] / net / eth.c
blob70bcd8e35565f37501302470ad057ccba81dae79
1 /*
2 * QEMU network structures definitions and helper functions
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
8 * Authors:
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
18 #include "qemu/osdep.h"
19 #include "qemu/log.h"
20 #include "net/eth.h"
21 #include "net/checksum.h"
22 #include "net/tap.h"
24 void eth_setup_vlan_headers_ex(struct eth_header *ehdr, uint16_t vlan_tag,
25 uint16_t vlan_ethtype, bool *is_new)
27 struct vlan_header *vhdr = PKT_GET_VLAN_HDR(ehdr);
29 switch (be16_to_cpu(ehdr->h_proto)) {
30 case ETH_P_VLAN:
31 case ETH_P_DVLAN:
32 /* vlan hdr exists */
33 *is_new = false;
34 break;
36 default:
37 /* No VLAN header, put a new one */
38 vhdr->h_proto = ehdr->h_proto;
39 ehdr->h_proto = cpu_to_be16(vlan_ethtype);
40 *is_new = true;
41 break;
43 vhdr->h_tci = cpu_to_be16(vlan_tag);
46 uint8_t
47 eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto)
49 uint8_t ecn_state = 0;
51 if (l3_proto == ETH_P_IP) {
52 struct ip_header *iphdr = (struct ip_header *) l3_hdr;
54 if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) {
55 if (IPTOS_ECN(iphdr->ip_tos) == IPTOS_ECN_CE) {
56 ecn_state = VIRTIO_NET_HDR_GSO_ECN;
58 if (l4proto == IP_PROTO_TCP) {
59 return VIRTIO_NET_HDR_GSO_TCPV4 | ecn_state;
60 } else if (l4proto == IP_PROTO_UDP) {
61 return VIRTIO_NET_HDR_GSO_UDP | ecn_state;
64 } else if (l3_proto == ETH_P_IPV6) {
65 struct ip6_header *ip6hdr = (struct ip6_header *) l3_hdr;
67 if (IP6_ECN(ip6hdr->ip6_ecn_acc) == IP6_ECN_CE) {
68 ecn_state = VIRTIO_NET_HDR_GSO_ECN;
71 if (l4proto == IP_PROTO_TCP) {
72 return VIRTIO_NET_HDR_GSO_TCPV6 | ecn_state;
75 qemu_log_mask(LOG_UNIMP, "%s: probably not GSO frame, "
76 "unknown L3 protocol: 0x%04"PRIx16"\n", __func__, l3_proto);
78 return VIRTIO_NET_HDR_GSO_NONE | ecn_state;
81 uint16_t
82 eth_get_l3_proto(const struct iovec *l2hdr_iov, int iovcnt, size_t l2hdr_len)
84 uint16_t proto;
85 size_t copied;
86 size_t size = iov_size(l2hdr_iov, iovcnt);
87 size_t proto_offset = l2hdr_len - sizeof(proto);
89 if (size < proto_offset) {
90 return ETH_P_UNKNOWN;
93 copied = iov_to_buf(l2hdr_iov, iovcnt, proto_offset,
94 &proto, sizeof(proto));
96 return (copied == sizeof(proto)) ? be16_to_cpu(proto) : ETH_P_UNKNOWN;
99 static bool
100 _eth_copy_chunk(size_t input_size,
101 const struct iovec *iov, int iovcnt,
102 size_t offset, size_t length,
103 void *buffer)
105 size_t copied;
107 if (input_size < offset) {
108 return false;
111 copied = iov_to_buf(iov, iovcnt, offset, buffer, length);
113 if (copied < length) {
114 return false;
117 return true;
120 static bool
121 _eth_tcp_has_data(bool is_ip4,
122 const struct ip_header *ip4_hdr,
123 const struct ip6_header *ip6_hdr,
124 size_t full_ip6hdr_len,
125 const struct tcp_header *tcp)
127 uint32_t l4len;
129 if (is_ip4) {
130 l4len = be16_to_cpu(ip4_hdr->ip_len) - IP_HDR_GET_LEN(ip4_hdr);
131 } else {
132 size_t opts_len = full_ip6hdr_len - sizeof(struct ip6_header);
133 l4len = be16_to_cpu(ip6_hdr->ip6_ctlun.ip6_un1.ip6_un1_plen) - opts_len;
136 return l4len > TCP_HEADER_DATA_OFFSET(tcp);
139 void eth_get_protocols(const struct iovec *iov, int iovcnt,
140 bool *hasip4, bool *hasip6,
141 size_t *l3hdr_off,
142 size_t *l4hdr_off,
143 size_t *l5hdr_off,
144 eth_ip6_hdr_info *ip6hdr_info,
145 eth_ip4_hdr_info *ip4hdr_info,
146 eth_l4_hdr_info *l4hdr_info)
148 int proto;
149 bool fragment = false;
150 size_t l2hdr_len = eth_get_l2_hdr_length_iov(iov, iovcnt);
151 size_t input_size = iov_size(iov, iovcnt);
152 size_t copied;
153 uint8_t ip_p;
155 *hasip4 = *hasip6 = false;
156 l4hdr_info->proto = ETH_L4_HDR_PROTO_INVALID;
158 proto = eth_get_l3_proto(iov, iovcnt, l2hdr_len);
160 *l3hdr_off = l2hdr_len;
162 if (proto == ETH_P_IP) {
163 struct ip_header *iphdr = &ip4hdr_info->ip4_hdr;
165 if (input_size < l2hdr_len) {
166 return;
169 copied = iov_to_buf(iov, iovcnt, l2hdr_len, iphdr, sizeof(*iphdr));
170 if (copied < sizeof(*iphdr) ||
171 IP_HEADER_VERSION(iphdr) != IP_HEADER_VERSION_4) {
172 return;
175 *hasip4 = true;
176 ip_p = iphdr->ip_p;
177 ip4hdr_info->fragment = IP4_IS_FRAGMENT(iphdr);
178 *l4hdr_off = l2hdr_len + IP_HDR_GET_LEN(iphdr);
180 fragment = ip4hdr_info->fragment;
181 } else if (proto == ETH_P_IPV6) {
182 if (!eth_parse_ipv6_hdr(iov, iovcnt, l2hdr_len, ip6hdr_info)) {
183 return;
186 *hasip6 = true;
187 ip_p = ip6hdr_info->l4proto;
188 *l4hdr_off = l2hdr_len + ip6hdr_info->full_hdr_len;
189 fragment = ip6hdr_info->fragment;
190 } else {
191 return;
194 if (fragment) {
195 return;
198 switch (ip_p) {
199 case IP_PROTO_TCP:
200 if (_eth_copy_chunk(input_size,
201 iov, iovcnt,
202 *l4hdr_off, sizeof(l4hdr_info->hdr.tcp),
203 &l4hdr_info->hdr.tcp)) {
204 l4hdr_info->proto = ETH_L4_HDR_PROTO_TCP;
205 *l5hdr_off = *l4hdr_off +
206 TCP_HEADER_DATA_OFFSET(&l4hdr_info->hdr.tcp);
208 l4hdr_info->has_tcp_data =
209 _eth_tcp_has_data(proto == ETH_P_IP,
210 &ip4hdr_info->ip4_hdr,
211 &ip6hdr_info->ip6_hdr,
212 *l4hdr_off - *l3hdr_off,
213 &l4hdr_info->hdr.tcp);
215 break;
217 case IP_PROTO_UDP:
218 if (_eth_copy_chunk(input_size,
219 iov, iovcnt,
220 *l4hdr_off, sizeof(l4hdr_info->hdr.udp),
221 &l4hdr_info->hdr.udp)) {
222 l4hdr_info->proto = ETH_L4_HDR_PROTO_UDP;
223 *l5hdr_off = *l4hdr_off + sizeof(l4hdr_info->hdr.udp);
225 break;
229 size_t
230 eth_strip_vlan(const struct iovec *iov, int iovcnt, size_t iovoff,
231 uint8_t *new_ehdr_buf,
232 uint16_t *payload_offset, uint16_t *tci)
234 struct vlan_header vlan_hdr;
235 struct eth_header *new_ehdr = (struct eth_header *) new_ehdr_buf;
237 size_t copied = iov_to_buf(iov, iovcnt, iovoff,
238 new_ehdr, sizeof(*new_ehdr));
240 if (copied < sizeof(*new_ehdr)) {
241 return 0;
244 switch (be16_to_cpu(new_ehdr->h_proto)) {
245 case ETH_P_VLAN:
246 case ETH_P_DVLAN:
247 copied = iov_to_buf(iov, iovcnt, iovoff + sizeof(*new_ehdr),
248 &vlan_hdr, sizeof(vlan_hdr));
250 if (copied < sizeof(vlan_hdr)) {
251 return 0;
254 new_ehdr->h_proto = vlan_hdr.h_proto;
256 *tci = be16_to_cpu(vlan_hdr.h_tci);
257 *payload_offset = iovoff + sizeof(*new_ehdr) + sizeof(vlan_hdr);
259 if (be16_to_cpu(new_ehdr->h_proto) == ETH_P_VLAN) {
261 copied = iov_to_buf(iov, iovcnt, *payload_offset,
262 PKT_GET_VLAN_HDR(new_ehdr), sizeof(vlan_hdr));
264 if (copied < sizeof(vlan_hdr)) {
265 return 0;
268 *payload_offset += sizeof(vlan_hdr);
270 return sizeof(struct eth_header) + sizeof(struct vlan_header);
271 } else {
272 return sizeof(struct eth_header);
274 default:
275 return 0;
279 size_t
280 eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff,
281 uint16_t vet, uint8_t *new_ehdr_buf,
282 uint16_t *payload_offset, uint16_t *tci)
284 struct vlan_header vlan_hdr;
285 struct eth_header *new_ehdr = (struct eth_header *) new_ehdr_buf;
287 size_t copied = iov_to_buf(iov, iovcnt, iovoff,
288 new_ehdr, sizeof(*new_ehdr));
290 if (copied < sizeof(*new_ehdr)) {
291 return 0;
294 if (be16_to_cpu(new_ehdr->h_proto) == vet) {
295 copied = iov_to_buf(iov, iovcnt, iovoff + sizeof(*new_ehdr),
296 &vlan_hdr, sizeof(vlan_hdr));
298 if (copied < sizeof(vlan_hdr)) {
299 return 0;
302 new_ehdr->h_proto = vlan_hdr.h_proto;
304 *tci = be16_to_cpu(vlan_hdr.h_tci);
305 *payload_offset = iovoff + sizeof(*new_ehdr) + sizeof(vlan_hdr);
306 return sizeof(struct eth_header);
309 return 0;
312 void
313 eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len)
315 struct ip_header *iphdr = (struct ip_header *) l3hdr;
316 iphdr->ip_sum = 0;
317 iphdr->ip_sum = cpu_to_be16(net_raw_checksum(l3hdr, l3hdr_len));
320 uint32_t
321 eth_calc_ip4_pseudo_hdr_csum(struct ip_header *iphdr,
322 uint16_t csl,
323 uint32_t *cso)
325 struct ip_pseudo_header ipph;
326 ipph.ip_src = iphdr->ip_src;
327 ipph.ip_dst = iphdr->ip_dst;
328 ipph.ip_payload = cpu_to_be16(csl);
329 ipph.ip_proto = iphdr->ip_p;
330 ipph.zeros = 0;
331 *cso = sizeof(ipph);
332 return net_checksum_add(*cso, (uint8_t *) &ipph);
335 uint32_t
336 eth_calc_ip6_pseudo_hdr_csum(struct ip6_header *iphdr,
337 uint16_t csl,
338 uint8_t l4_proto,
339 uint32_t *cso)
341 struct ip6_pseudo_header ipph;
342 ipph.ip6_src = iphdr->ip6_src;
343 ipph.ip6_dst = iphdr->ip6_dst;
344 ipph.len = cpu_to_be16(csl);
345 ipph.zero[0] = 0;
346 ipph.zero[1] = 0;
347 ipph.zero[2] = 0;
348 ipph.next_hdr = l4_proto;
349 *cso = sizeof(ipph);
350 return net_checksum_add(*cso, (uint8_t *)&ipph);
353 static bool
354 eth_is_ip6_extension_header_type(uint8_t hdr_type)
356 switch (hdr_type) {
357 case IP6_HOP_BY_HOP:
358 case IP6_ROUTING:
359 case IP6_FRAGMENT:
360 case IP6_AUTHENTICATION:
361 case IP6_DESTINATON:
362 case IP6_MOBILITY:
363 return true;
364 default:
365 return false;
369 static bool
370 _eth_get_rss_ex_dst_addr(const struct iovec *pkt, int pkt_frags,
371 size_t ext_hdr_offset,
372 struct ip6_ext_hdr *ext_hdr,
373 struct in6_address *dst_addr)
375 struct ip6_ext_hdr_routing rt_hdr;
376 size_t input_size = iov_size(pkt, pkt_frags);
377 size_t bytes_read;
379 if (input_size < ext_hdr_offset + sizeof(rt_hdr) + sizeof(*dst_addr)) {
380 return false;
383 bytes_read = iov_to_buf(pkt, pkt_frags, ext_hdr_offset,
384 &rt_hdr, sizeof(rt_hdr));
385 assert(bytes_read == sizeof(rt_hdr));
386 if ((rt_hdr.rtype != 2) || (rt_hdr.segleft != 1)) {
387 return false;
389 bytes_read = iov_to_buf(pkt, pkt_frags, ext_hdr_offset + sizeof(rt_hdr),
390 dst_addr, sizeof(*dst_addr));
391 assert(bytes_read == sizeof(*dst_addr));
393 return true;
396 static bool
397 _eth_get_rss_ex_src_addr(const struct iovec *pkt, int pkt_frags,
398 size_t dsthdr_offset,
399 struct ip6_ext_hdr *ext_hdr,
400 struct in6_address *src_addr)
402 size_t bytes_left = (ext_hdr->ip6r_len + 1) * 8 - sizeof(*ext_hdr);
403 struct ip6_option_hdr opthdr;
404 size_t opt_offset = dsthdr_offset + sizeof(*ext_hdr);
406 while (bytes_left > sizeof(opthdr)) {
407 size_t input_size = iov_size(pkt, pkt_frags);
408 size_t bytes_read, optlen;
410 if (input_size < opt_offset) {
411 return false;
414 bytes_read = iov_to_buf(pkt, pkt_frags, opt_offset,
415 &opthdr, sizeof(opthdr));
417 if (bytes_read != sizeof(opthdr)) {
418 return false;
421 optlen = (opthdr.type == IP6_OPT_PAD1) ? 1
422 : (opthdr.len + sizeof(opthdr));
424 if (optlen > bytes_left) {
425 return false;
428 if (opthdr.type == IP6_OPT_HOME) {
429 size_t input_size = iov_size(pkt, pkt_frags);
431 if (input_size < opt_offset + sizeof(opthdr)) {
432 return false;
435 bytes_read = iov_to_buf(pkt, pkt_frags,
436 opt_offset + sizeof(opthdr),
437 src_addr, sizeof(*src_addr));
439 return bytes_read == sizeof(*src_addr);
442 opt_offset += optlen;
443 bytes_left -= optlen;
446 return false;
449 bool eth_parse_ipv6_hdr(const struct iovec *pkt, int pkt_frags,
450 size_t ip6hdr_off, eth_ip6_hdr_info *info)
452 struct ip6_ext_hdr ext_hdr;
453 size_t bytes_read;
454 uint8_t curr_ext_hdr_type;
455 size_t input_size = iov_size(pkt, pkt_frags);
457 info->rss_ex_dst_valid = false;
458 info->rss_ex_src_valid = false;
459 info->fragment = false;
461 if (input_size < ip6hdr_off) {
462 return false;
465 bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off,
466 &info->ip6_hdr, sizeof(info->ip6_hdr));
467 if (bytes_read < sizeof(info->ip6_hdr)) {
468 return false;
471 info->full_hdr_len = sizeof(struct ip6_header);
473 curr_ext_hdr_type = info->ip6_hdr.ip6_nxt;
475 if (!eth_is_ip6_extension_header_type(curr_ext_hdr_type)) {
476 info->l4proto = info->ip6_hdr.ip6_nxt;
477 info->has_ext_hdrs = false;
478 return true;
481 info->has_ext_hdrs = true;
483 do {
484 if (input_size < ip6hdr_off + info->full_hdr_len) {
485 return false;
488 bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off + info->full_hdr_len,
489 &ext_hdr, sizeof(ext_hdr));
491 if (bytes_read < sizeof(ext_hdr)) {
492 return false;
495 if (curr_ext_hdr_type == IP6_ROUTING) {
496 if (ext_hdr.ip6r_len == sizeof(struct in6_address) / 8) {
497 info->rss_ex_dst_valid =
498 _eth_get_rss_ex_dst_addr(pkt, pkt_frags,
499 ip6hdr_off + info->full_hdr_len,
500 &ext_hdr, &info->rss_ex_dst);
502 } else if (curr_ext_hdr_type == IP6_DESTINATON) {
503 info->rss_ex_src_valid =
504 _eth_get_rss_ex_src_addr(pkt, pkt_frags,
505 ip6hdr_off + info->full_hdr_len,
506 &ext_hdr, &info->rss_ex_src);
507 } else if (curr_ext_hdr_type == IP6_FRAGMENT) {
508 info->fragment = true;
511 info->full_hdr_len += (ext_hdr.ip6r_len + 1) * IP6_EXT_GRANULARITY;
512 curr_ext_hdr_type = ext_hdr.ip6r_nxt;
513 } while (eth_is_ip6_extension_header_type(curr_ext_hdr_type));
515 info->l4proto = ext_hdr.ip6r_nxt;
516 return true;
519 bool eth_pad_short_frame(uint8_t *padded_pkt, size_t *padded_buflen,
520 const void *pkt, size_t pkt_size)
522 assert(padded_buflen && *padded_buflen >= ETH_ZLEN);
524 if (pkt_size >= ETH_ZLEN) {
525 return false;
528 /* pad to minimum Ethernet frame length */
529 memcpy(padded_pkt, pkt, pkt_size);
530 memset(&padded_pkt[pkt_size], 0, ETH_ZLEN - pkt_size);
531 *padded_buflen = ETH_ZLEN;
533 return true;