2 * QEMU network structures definitions and helper functions
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
19 #include "net/checksum.h"
20 #include "qemu-common.h"
23 void eth_setup_vlan_headers(struct eth_header
*ehdr
, uint16_t vlan_tag
,
26 struct vlan_header
*vhdr
= PKT_GET_VLAN_HDR(ehdr
);
28 switch (be16_to_cpu(ehdr
->h_proto
)) {
36 /* No VLAN header, put a new one */
37 vhdr
->h_proto
= ehdr
->h_proto
;
38 ehdr
->h_proto
= cpu_to_be16(ETH_P_VLAN
);
42 vhdr
->h_tci
= cpu_to_be16(vlan_tag
);
46 eth_get_gso_type(uint16_t l3_proto
, uint8_t *l3_hdr
, uint8_t l4proto
)
48 uint8_t ecn_state
= 0;
50 if (l3_proto
== ETH_P_IP
) {
51 struct ip_header
*iphdr
= (struct ip_header
*) l3_hdr
;
53 if (IP_HEADER_VERSION(iphdr
) == IP_HEADER_VERSION_4
) {
54 if (IPTOS_ECN(iphdr
->ip_tos
) == IPTOS_ECN_CE
) {
55 ecn_state
= VIRTIO_NET_HDR_GSO_ECN
;
57 if (l4proto
== IP_PROTO_TCP
) {
58 return VIRTIO_NET_HDR_GSO_TCPV4
| ecn_state
;
59 } else if (l4proto
== IP_PROTO_UDP
) {
60 return VIRTIO_NET_HDR_GSO_UDP
| ecn_state
;
63 } else if (l3_proto
== ETH_P_IPV6
) {
64 struct ip6_header
*ip6hdr
= (struct ip6_header
*) l3_hdr
;
66 if (IP6_ECN(ip6hdr
->ip6_ecn_acc
) == IP6_ECN_CE
) {
67 ecn_state
= VIRTIO_NET_HDR_GSO_ECN
;
70 if (l4proto
== IP_PROTO_TCP
) {
71 return VIRTIO_NET_HDR_GSO_TCPV6
| ecn_state
;
75 /* Unsupported offload */
76 g_assert_not_reached();
78 return VIRTIO_NET_HDR_GSO_NONE
| ecn_state
;
81 void eth_get_protocols(const uint8_t *headers
,
83 bool *isip4
, bool *isip6
,
84 bool *isudp
, bool *istcp
)
87 size_t l2hdr_len
= eth_get_l2_hdr_length(headers
);
88 assert(hdr_length
>= eth_get_l2_hdr_length(headers
));
89 *isip4
= *isip6
= *isudp
= *istcp
= false;
91 proto
= eth_get_l3_proto(headers
, l2hdr_len
);
92 if (proto
== ETH_P_IP
) {
95 struct ip_header
*iphdr
;
98 eth_get_l2_hdr_length(headers
) + sizeof(struct ip_header
));
100 iphdr
= PKT_GET_IP_HDR(headers
);
102 if (IP_HEADER_VERSION(iphdr
) == IP_HEADER_VERSION_4
) {
103 if (iphdr
->ip_p
== IP_PROTO_TCP
) {
105 } else if (iphdr
->ip_p
== IP_PROTO_UDP
) {
109 } else if (proto
== ETH_P_IPV6
) {
111 size_t full_ip6hdr_len
;
113 struct iovec hdr_vec
;
114 hdr_vec
.iov_base
= (void *) headers
;
115 hdr_vec
.iov_len
= hdr_length
;
118 if (eth_parse_ipv6_hdr(&hdr_vec
, 1, l2hdr_len
,
119 &l4proto
, &full_ip6hdr_len
)) {
120 if (l4proto
== IP_PROTO_TCP
) {
122 } else if (l4proto
== IP_PROTO_UDP
) {
130 eth_setup_ip4_fragmentation(const void *l2hdr
, size_t l2hdr_len
,
131 void *l3hdr
, size_t l3hdr_len
,
132 size_t l3payload_len
,
133 size_t frag_offset
, bool more_frags
)
135 if (eth_get_l3_proto(l2hdr
, l2hdr_len
) == ETH_P_IP
) {
137 struct ip_header
*iphdr
= (struct ip_header
*) l3hdr
;
138 uint16_t frag_off_units
= frag_offset
/ IP_FRAG_UNIT_SIZE
;
141 assert(frag_offset
% IP_FRAG_UNIT_SIZE
== 0);
142 assert((frag_off_units
& ~IP_OFFMASK
) == 0);
144 orig_flags
= be16_to_cpu(iphdr
->ip_off
) & ~(IP_OFFMASK
|IP_MF
);
145 new_ip_off
= frag_off_units
| orig_flags
| (more_frags
? IP_MF
: 0);
146 iphdr
->ip_off
= cpu_to_be16(new_ip_off
);
147 iphdr
->ip_len
= cpu_to_be16(l3payload_len
+ l3hdr_len
);
152 eth_fix_ip4_checksum(void *l3hdr
, size_t l3hdr_len
)
154 struct ip_header
*iphdr
= (struct ip_header
*) l3hdr
;
156 iphdr
->ip_sum
= cpu_to_be16(net_raw_checksum(l3hdr
, l3hdr_len
));
160 eth_calc_pseudo_hdr_csum(struct ip_header
*iphdr
, uint16_t csl
)
162 struct ip_pseudo_header ipph
;
163 ipph
.ip_src
= iphdr
->ip_src
;
164 ipph
.ip_dst
= iphdr
->ip_dst
;
165 ipph
.ip_payload
= cpu_to_be16(csl
);
166 ipph
.ip_proto
= iphdr
->ip_p
;
168 return net_checksum_add(sizeof(ipph
), (uint8_t *) &ipph
);
172 eth_is_ip6_extension_header_type(uint8_t hdr_type
)
179 case IP6_AUTHENTICATION
:
188 bool eth_parse_ipv6_hdr(struct iovec
*pkt
, int pkt_frags
,
189 size_t ip6hdr_off
, uint8_t *l4proto
,
190 size_t *full_hdr_len
)
192 struct ip6_header ip6_hdr
;
193 struct ip6_ext_hdr ext_hdr
;
196 bytes_read
= iov_to_buf(pkt
, pkt_frags
, ip6hdr_off
,
197 &ip6_hdr
, sizeof(ip6_hdr
));
198 if (bytes_read
< sizeof(ip6_hdr
)) {
202 *full_hdr_len
= sizeof(struct ip6_header
);
204 if (!eth_is_ip6_extension_header_type(ip6_hdr
.ip6_nxt
)) {
205 *l4proto
= ip6_hdr
.ip6_nxt
;
210 bytes_read
= iov_to_buf(pkt
, pkt_frags
, ip6hdr_off
+ *full_hdr_len
,
211 &ext_hdr
, sizeof(ext_hdr
));
212 *full_hdr_len
+= (ext_hdr
.ip6r_len
+ 1) * IP6_EXT_GRANULARITY
;
213 } while (eth_is_ip6_extension_header_type(ext_hdr
.ip6r_nxt
));
215 *l4proto
= ext_hdr
.ip6r_nxt
;