tcg/ppc: Improve unaligned load/store handling on 64-bit backend
[qemu/kevin.git] / net / eth.c
blob7c61132cbb1555638b523bc3b34178a78031fa15
1 /*
2 * QEMU network structures definitions and helper functions
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
8 * Authors:
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
18 #include "net/eth.h"
19 #include "net/checksum.h"
20 #include "qemu-common.h"
21 #include "net/tap.h"
23 void eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag,
24 bool *is_new)
26 struct vlan_header *vhdr = PKT_GET_VLAN_HDR(ehdr);
28 switch (be16_to_cpu(ehdr->h_proto)) {
29 case ETH_P_VLAN:
30 case ETH_P_DVLAN:
31 /* vlan hdr exists */
32 *is_new = false;
33 break;
35 default:
36 /* No VLAN header, put a new one */
37 vhdr->h_proto = ehdr->h_proto;
38 ehdr->h_proto = cpu_to_be16(ETH_P_VLAN);
39 *is_new = true;
40 break;
42 vhdr->h_tci = cpu_to_be16(vlan_tag);
45 uint8_t
46 eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto)
48 uint8_t ecn_state = 0;
50 if (l3_proto == ETH_P_IP) {
51 struct ip_header *iphdr = (struct ip_header *) l3_hdr;
53 if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) {
54 if (IPTOS_ECN(iphdr->ip_tos) == IPTOS_ECN_CE) {
55 ecn_state = VIRTIO_NET_HDR_GSO_ECN;
57 if (l4proto == IP_PROTO_TCP) {
58 return VIRTIO_NET_HDR_GSO_TCPV4 | ecn_state;
59 } else if (l4proto == IP_PROTO_UDP) {
60 return VIRTIO_NET_HDR_GSO_UDP | ecn_state;
63 } else if (l3_proto == ETH_P_IPV6) {
64 struct ip6_header *ip6hdr = (struct ip6_header *) l3_hdr;
66 if (IP6_ECN(ip6hdr->ip6_ecn_acc) == IP6_ECN_CE) {
67 ecn_state = VIRTIO_NET_HDR_GSO_ECN;
70 if (l4proto == IP_PROTO_TCP) {
71 return VIRTIO_NET_HDR_GSO_TCPV6 | ecn_state;
75 /* Unsupported offload */
76 g_assert_not_reached();
78 return VIRTIO_NET_HDR_GSO_NONE | ecn_state;
81 void eth_get_protocols(const uint8_t *headers,
82 uint32_t hdr_length,
83 bool *isip4, bool *isip6,
84 bool *isudp, bool *istcp)
86 int proto;
87 size_t l2hdr_len = eth_get_l2_hdr_length(headers);
88 assert(hdr_length >= eth_get_l2_hdr_length(headers));
89 *isip4 = *isip6 = *isudp = *istcp = false;
91 proto = eth_get_l3_proto(headers, l2hdr_len);
92 if (proto == ETH_P_IP) {
93 *isip4 = true;
95 struct ip_header *iphdr;
97 assert(hdr_length >=
98 eth_get_l2_hdr_length(headers) + sizeof(struct ip_header));
100 iphdr = PKT_GET_IP_HDR(headers);
102 if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) {
103 if (iphdr->ip_p == IP_PROTO_TCP) {
104 *istcp = true;
105 } else if (iphdr->ip_p == IP_PROTO_UDP) {
106 *isudp = true;
109 } else if (proto == ETH_P_IPV6) {
110 uint8_t l4proto;
111 size_t full_ip6hdr_len;
113 struct iovec hdr_vec;
114 hdr_vec.iov_base = (void *) headers;
115 hdr_vec.iov_len = hdr_length;
117 *isip6 = true;
118 if (eth_parse_ipv6_hdr(&hdr_vec, 1, l2hdr_len,
119 &l4proto, &full_ip6hdr_len)) {
120 if (l4proto == IP_PROTO_TCP) {
121 *istcp = true;
122 } else if (l4proto == IP_PROTO_UDP) {
123 *isudp = true;
129 void
130 eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
131 void *l3hdr, size_t l3hdr_len,
132 size_t l3payload_len,
133 size_t frag_offset, bool more_frags)
135 if (eth_get_l3_proto(l2hdr, l2hdr_len) == ETH_P_IP) {
136 uint16_t orig_flags;
137 struct ip_header *iphdr = (struct ip_header *) l3hdr;
138 uint16_t frag_off_units = frag_offset / IP_FRAG_UNIT_SIZE;
139 uint16_t new_ip_off;
141 assert(frag_offset % IP_FRAG_UNIT_SIZE == 0);
142 assert((frag_off_units & ~IP_OFFMASK) == 0);
144 orig_flags = be16_to_cpu(iphdr->ip_off) & ~(IP_OFFMASK|IP_MF);
145 new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0);
146 iphdr->ip_off = cpu_to_be16(new_ip_off);
147 iphdr->ip_len = cpu_to_be16(l3payload_len + l3hdr_len);
151 void
152 eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len)
154 struct ip_header *iphdr = (struct ip_header *) l3hdr;
155 iphdr->ip_sum = 0;
156 iphdr->ip_sum = cpu_to_be16(net_raw_checksum(l3hdr, l3hdr_len));
159 uint32_t
160 eth_calc_pseudo_hdr_csum(struct ip_header *iphdr, uint16_t csl)
162 struct ip_pseudo_header ipph;
163 ipph.ip_src = iphdr->ip_src;
164 ipph.ip_dst = iphdr->ip_dst;
165 ipph.ip_payload = cpu_to_be16(csl);
166 ipph.ip_proto = iphdr->ip_p;
167 ipph.zeros = 0;
168 return net_checksum_add(sizeof(ipph), (uint8_t *) &ipph);
171 static bool
172 eth_is_ip6_extension_header_type(uint8_t hdr_type)
174 switch (hdr_type) {
175 case IP6_HOP_BY_HOP:
176 case IP6_ROUTING:
177 case IP6_FRAGMENT:
178 case IP6_ESP:
179 case IP6_AUTHENTICATION:
180 case IP6_DESTINATON:
181 case IP6_MOBILITY:
182 return true;
183 default:
184 return false;
188 bool eth_parse_ipv6_hdr(struct iovec *pkt, int pkt_frags,
189 size_t ip6hdr_off, uint8_t *l4proto,
190 size_t *full_hdr_len)
192 struct ip6_header ip6_hdr;
193 struct ip6_ext_hdr ext_hdr;
194 size_t bytes_read;
196 bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off,
197 &ip6_hdr, sizeof(ip6_hdr));
198 if (bytes_read < sizeof(ip6_hdr)) {
199 return false;
202 *full_hdr_len = sizeof(struct ip6_header);
204 if (!eth_is_ip6_extension_header_type(ip6_hdr.ip6_nxt)) {
205 *l4proto = ip6_hdr.ip6_nxt;
206 return true;
209 do {
210 bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off + *full_hdr_len,
211 &ext_hdr, sizeof(ext_hdr));
212 *full_hdr_len += (ext_hdr.ip6r_len + 1) * IP6_EXT_GRANULARITY;
213 } while (eth_is_ip6_extension_header_type(ext_hdr.ip6r_nxt));
215 *l4proto = ext_hdr.ip6r_nxt;
216 return true;