2 * QEMU network structures definitions and helper functions
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
18 #include "qemu/osdep.h"
20 #include "net/checksum.h"
21 #include "qemu-common.h"
24 void eth_setup_vlan_headers(struct eth_header
*ehdr
, uint16_t vlan_tag
,
27 struct vlan_header
*vhdr
= PKT_GET_VLAN_HDR(ehdr
);
29 switch (be16_to_cpu(ehdr
->h_proto
)) {
37 /* No VLAN header, put a new one */
38 vhdr
->h_proto
= ehdr
->h_proto
;
39 ehdr
->h_proto
= cpu_to_be16(ETH_P_VLAN
);
43 vhdr
->h_tci
= cpu_to_be16(vlan_tag
);
47 eth_get_gso_type(uint16_t l3_proto
, uint8_t *l3_hdr
, uint8_t l4proto
)
49 uint8_t ecn_state
= 0;
51 if (l3_proto
== ETH_P_IP
) {
52 struct ip_header
*iphdr
= (struct ip_header
*) l3_hdr
;
54 if (IP_HEADER_VERSION(iphdr
) == IP_HEADER_VERSION_4
) {
55 if (IPTOS_ECN(iphdr
->ip_tos
) == IPTOS_ECN_CE
) {
56 ecn_state
= VIRTIO_NET_HDR_GSO_ECN
;
58 if (l4proto
== IP_PROTO_TCP
) {
59 return VIRTIO_NET_HDR_GSO_TCPV4
| ecn_state
;
60 } else if (l4proto
== IP_PROTO_UDP
) {
61 return VIRTIO_NET_HDR_GSO_UDP
| ecn_state
;
64 } else if (l3_proto
== ETH_P_IPV6
) {
65 struct ip6_header
*ip6hdr
= (struct ip6_header
*) l3_hdr
;
67 if (IP6_ECN(ip6hdr
->ip6_ecn_acc
) == IP6_ECN_CE
) {
68 ecn_state
= VIRTIO_NET_HDR_GSO_ECN
;
71 if (l4proto
== IP_PROTO_TCP
) {
72 return VIRTIO_NET_HDR_GSO_TCPV6
| ecn_state
;
76 /* Unsupported offload */
77 g_assert_not_reached();
79 return VIRTIO_NET_HDR_GSO_NONE
| ecn_state
;
82 void eth_get_protocols(const uint8_t *headers
,
84 bool *isip4
, bool *isip6
,
85 bool *isudp
, bool *istcp
)
88 size_t l2hdr_len
= eth_get_l2_hdr_length(headers
);
89 assert(hdr_length
>= eth_get_l2_hdr_length(headers
));
90 *isip4
= *isip6
= *isudp
= *istcp
= false;
92 proto
= eth_get_l3_proto(headers
, l2hdr_len
);
93 if (proto
== ETH_P_IP
) {
96 struct ip_header
*iphdr
;
99 eth_get_l2_hdr_length(headers
) + sizeof(struct ip_header
));
101 iphdr
= PKT_GET_IP_HDR(headers
);
103 if (IP_HEADER_VERSION(iphdr
) == IP_HEADER_VERSION_4
) {
104 if (iphdr
->ip_p
== IP_PROTO_TCP
) {
106 } else if (iphdr
->ip_p
== IP_PROTO_UDP
) {
110 } else if (proto
== ETH_P_IPV6
) {
112 size_t full_ip6hdr_len
;
114 struct iovec hdr_vec
;
115 hdr_vec
.iov_base
= (void *) headers
;
116 hdr_vec
.iov_len
= hdr_length
;
119 if (eth_parse_ipv6_hdr(&hdr_vec
, 1, l2hdr_len
,
120 &l4proto
, &full_ip6hdr_len
)) {
121 if (l4proto
== IP_PROTO_TCP
) {
123 } else if (l4proto
== IP_PROTO_UDP
) {
131 eth_setup_ip4_fragmentation(const void *l2hdr
, size_t l2hdr_len
,
132 void *l3hdr
, size_t l3hdr_len
,
133 size_t l3payload_len
,
134 size_t frag_offset
, bool more_frags
)
136 if (eth_get_l3_proto(l2hdr
, l2hdr_len
) == ETH_P_IP
) {
138 struct ip_header
*iphdr
= (struct ip_header
*) l3hdr
;
139 uint16_t frag_off_units
= frag_offset
/ IP_FRAG_UNIT_SIZE
;
142 assert(frag_offset
% IP_FRAG_UNIT_SIZE
== 0);
143 assert((frag_off_units
& ~IP_OFFMASK
) == 0);
145 orig_flags
= be16_to_cpu(iphdr
->ip_off
) & ~(IP_OFFMASK
|IP_MF
);
146 new_ip_off
= frag_off_units
| orig_flags
| (more_frags
? IP_MF
: 0);
147 iphdr
->ip_off
= cpu_to_be16(new_ip_off
);
148 iphdr
->ip_len
= cpu_to_be16(l3payload_len
+ l3hdr_len
);
153 eth_fix_ip4_checksum(void *l3hdr
, size_t l3hdr_len
)
155 struct ip_header
*iphdr
= (struct ip_header
*) l3hdr
;
157 iphdr
->ip_sum
= cpu_to_be16(net_raw_checksum(l3hdr
, l3hdr_len
));
161 eth_calc_pseudo_hdr_csum(struct ip_header
*iphdr
, uint16_t csl
)
163 struct ip_pseudo_header ipph
;
164 ipph
.ip_src
= iphdr
->ip_src
;
165 ipph
.ip_dst
= iphdr
->ip_dst
;
166 ipph
.ip_payload
= cpu_to_be16(csl
);
167 ipph
.ip_proto
= iphdr
->ip_p
;
169 return net_checksum_add(sizeof(ipph
), (uint8_t *) &ipph
);
173 eth_is_ip6_extension_header_type(uint8_t hdr_type
)
180 case IP6_AUTHENTICATION
:
189 bool eth_parse_ipv6_hdr(struct iovec
*pkt
, int pkt_frags
,
190 size_t ip6hdr_off
, uint8_t *l4proto
,
191 size_t *full_hdr_len
)
193 struct ip6_header ip6_hdr
;
194 struct ip6_ext_hdr ext_hdr
;
197 bytes_read
= iov_to_buf(pkt
, pkt_frags
, ip6hdr_off
,
198 &ip6_hdr
, sizeof(ip6_hdr
));
199 if (bytes_read
< sizeof(ip6_hdr
)) {
203 *full_hdr_len
= sizeof(struct ip6_header
);
205 if (!eth_is_ip6_extension_header_type(ip6_hdr
.ip6_nxt
)) {
206 *l4proto
= ip6_hdr
.ip6_nxt
;
211 bytes_read
= iov_to_buf(pkt
, pkt_frags
, ip6hdr_off
+ *full_hdr_len
,
212 &ext_hdr
, sizeof(ext_hdr
));
213 *full_hdr_len
+= (ext_hdr
.ip6r_len
+ 1) * IP6_EXT_GRANULARITY
;
214 } while (eth_is_ip6_extension_header_type(ext_hdr
.ip6r_nxt
));
216 *l4proto
= ext_hdr
.ip6r_nxt
;