hw/riscv: Move sifive_plic model to hw/intc
[qemu/ar7.git] / hw / net / net_rx_pkt.c
blob1e1c504e425f34ce755f9eedeb0f760210f9bbc7
1 /*
2 * QEMU RX packets abstractions
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
8 * Authors:
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
18 #include "qemu/osdep.h"
19 #include "trace.h"
20 #include "net_rx_pkt.h"
21 #include "net/checksum.h"
22 #include "net/tap.h"
24 struct NetRxPkt {
25 struct virtio_net_hdr virt_hdr;
26 uint8_t ehdr_buf[sizeof(struct eth_header) + sizeof(struct vlan_header)];
27 struct iovec *vec;
28 uint16_t vec_len_total;
29 uint16_t vec_len;
30 uint32_t tot_len;
31 uint16_t tci;
32 size_t ehdr_buf_len;
33 bool has_virt_hdr;
34 eth_pkt_types_e packet_type;
36 /* Analysis results */
37 bool isip4;
38 bool isip6;
39 bool isudp;
40 bool istcp;
42 size_t l3hdr_off;
43 size_t l4hdr_off;
44 size_t l5hdr_off;
46 eth_ip6_hdr_info ip6hdr_info;
47 eth_ip4_hdr_info ip4hdr_info;
48 eth_l4_hdr_info l4hdr_info;
51 void net_rx_pkt_init(struct NetRxPkt **pkt, bool has_virt_hdr)
53 struct NetRxPkt *p = g_malloc0(sizeof *p);
54 p->has_virt_hdr = has_virt_hdr;
55 p->vec = NULL;
56 p->vec_len_total = 0;
57 *pkt = p;
60 void net_rx_pkt_uninit(struct NetRxPkt *pkt)
62 if (pkt->vec_len_total != 0) {
63 g_free(pkt->vec);
66 g_free(pkt);
69 struct virtio_net_hdr *net_rx_pkt_get_vhdr(struct NetRxPkt *pkt)
71 assert(pkt);
72 return &pkt->virt_hdr;
75 static inline void
76 net_rx_pkt_iovec_realloc(struct NetRxPkt *pkt,
77 int new_iov_len)
79 if (pkt->vec_len_total < new_iov_len) {
80 g_free(pkt->vec);
81 pkt->vec = g_malloc(sizeof(*pkt->vec) * new_iov_len);
82 pkt->vec_len_total = new_iov_len;
86 static void
87 net_rx_pkt_pull_data(struct NetRxPkt *pkt,
88 const struct iovec *iov, int iovcnt,
89 size_t ploff)
91 uint32_t pllen = iov_size(iov, iovcnt) - ploff;
93 if (pkt->ehdr_buf_len) {
94 net_rx_pkt_iovec_realloc(pkt, iovcnt + 1);
96 pkt->vec[0].iov_base = pkt->ehdr_buf;
97 pkt->vec[0].iov_len = pkt->ehdr_buf_len;
99 pkt->tot_len = pllen + pkt->ehdr_buf_len;
100 pkt->vec_len = iov_copy(pkt->vec + 1, pkt->vec_len_total - 1,
101 iov, iovcnt, ploff, pllen) + 1;
102 } else {
103 net_rx_pkt_iovec_realloc(pkt, iovcnt);
105 pkt->tot_len = pllen;
106 pkt->vec_len = iov_copy(pkt->vec, pkt->vec_len_total,
107 iov, iovcnt, ploff, pkt->tot_len);
110 eth_get_protocols(pkt->vec, pkt->vec_len, &pkt->isip4, &pkt->isip6,
111 &pkt->isudp, &pkt->istcp,
112 &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
113 &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);
115 trace_net_rx_pkt_parsed(pkt->isip4, pkt->isip6, pkt->isudp, pkt->istcp,
116 pkt->l3hdr_off, pkt->l4hdr_off, pkt->l5hdr_off);
119 void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt,
120 const struct iovec *iov, int iovcnt,
121 size_t iovoff, bool strip_vlan)
123 uint16_t tci = 0;
124 uint16_t ploff = iovoff;
125 assert(pkt);
127 if (strip_vlan) {
128 pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, pkt->ehdr_buf,
129 &ploff, &tci);
130 } else {
131 pkt->ehdr_buf_len = 0;
134 pkt->tci = tci;
136 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff);
139 void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt,
140 const struct iovec *iov, int iovcnt,
141 size_t iovoff, bool strip_vlan,
142 uint16_t vet)
144 uint16_t tci = 0;
145 uint16_t ploff = iovoff;
146 assert(pkt);
148 if (strip_vlan) {
149 pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, vet,
150 pkt->ehdr_buf,
151 &ploff, &tci);
152 } else {
153 pkt->ehdr_buf_len = 0;
156 pkt->tci = tci;
158 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff);
161 void net_rx_pkt_dump(struct NetRxPkt *pkt)
163 #ifdef NET_RX_PKT_DEBUG
164 assert(pkt);
166 printf("RX PKT: tot_len: %d, ehdr_buf_len: %lu, vlan_tag: %d\n",
167 pkt->tot_len, pkt->ehdr_buf_len, pkt->tci);
168 #endif
171 void net_rx_pkt_set_packet_type(struct NetRxPkt *pkt,
172 eth_pkt_types_e packet_type)
174 assert(pkt);
176 pkt->packet_type = packet_type;
180 eth_pkt_types_e net_rx_pkt_get_packet_type(struct NetRxPkt *pkt)
182 assert(pkt);
184 return pkt->packet_type;
187 size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt)
189 assert(pkt);
191 return pkt->tot_len;
194 void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data,
195 size_t len)
197 const struct iovec iov = {
198 .iov_base = (void *)data,
199 .iov_len = len
202 assert(pkt);
204 eth_get_protocols(&iov, 1, &pkt->isip4, &pkt->isip6,
205 &pkt->isudp, &pkt->istcp,
206 &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
207 &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);
210 void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
211 bool *isip4, bool *isip6,
212 bool *isudp, bool *istcp)
214 assert(pkt);
216 *isip4 = pkt->isip4;
217 *isip6 = pkt->isip6;
218 *isudp = pkt->isudp;
219 *istcp = pkt->istcp;
222 size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt)
224 assert(pkt);
225 return pkt->l3hdr_off;
228 size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt)
230 assert(pkt);
231 return pkt->l4hdr_off;
234 size_t net_rx_pkt_get_l5_hdr_offset(struct NetRxPkt *pkt)
236 assert(pkt);
237 return pkt->l5hdr_off;
240 eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt)
242 return &pkt->ip6hdr_info;
245 eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt)
247 return &pkt->ip4hdr_info;
250 eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt)
252 return &pkt->l4hdr_info;
255 static inline void
256 _net_rx_rss_add_chunk(uint8_t *rss_input, size_t *bytes_written,
257 void *ptr, size_t size)
259 memcpy(&rss_input[*bytes_written], ptr, size);
260 trace_net_rx_pkt_rss_add_chunk(ptr, size, *bytes_written);
261 *bytes_written += size;
264 static inline void
265 _net_rx_rss_prepare_ip4(uint8_t *rss_input,
266 struct NetRxPkt *pkt,
267 size_t *bytes_written)
269 struct ip_header *ip4_hdr = &pkt->ip4hdr_info.ip4_hdr;
271 _net_rx_rss_add_chunk(rss_input, bytes_written,
272 &ip4_hdr->ip_src, sizeof(uint32_t));
274 _net_rx_rss_add_chunk(rss_input, bytes_written,
275 &ip4_hdr->ip_dst, sizeof(uint32_t));
278 static inline void
279 _net_rx_rss_prepare_ip6(uint8_t *rss_input,
280 struct NetRxPkt *pkt,
281 bool ipv6ex, size_t *bytes_written)
283 eth_ip6_hdr_info *ip6info = &pkt->ip6hdr_info;
285 _net_rx_rss_add_chunk(rss_input, bytes_written,
286 (ipv6ex && ip6info->rss_ex_src_valid) ? &ip6info->rss_ex_src
287 : &ip6info->ip6_hdr.ip6_src,
288 sizeof(struct in6_address));
290 _net_rx_rss_add_chunk(rss_input, bytes_written,
291 (ipv6ex && ip6info->rss_ex_dst_valid) ? &ip6info->rss_ex_dst
292 : &ip6info->ip6_hdr.ip6_dst,
293 sizeof(struct in6_address));
296 static inline void
297 _net_rx_rss_prepare_tcp(uint8_t *rss_input,
298 struct NetRxPkt *pkt,
299 size_t *bytes_written)
301 struct tcp_header *tcphdr = &pkt->l4hdr_info.hdr.tcp;
303 _net_rx_rss_add_chunk(rss_input, bytes_written,
304 &tcphdr->th_sport, sizeof(uint16_t));
306 _net_rx_rss_add_chunk(rss_input, bytes_written,
307 &tcphdr->th_dport, sizeof(uint16_t));
310 static inline void
311 _net_rx_rss_prepare_udp(uint8_t *rss_input,
312 struct NetRxPkt *pkt,
313 size_t *bytes_written)
315 struct udp_header *udphdr = &pkt->l4hdr_info.hdr.udp;
317 _net_rx_rss_add_chunk(rss_input, bytes_written,
318 &udphdr->uh_sport, sizeof(uint16_t));
320 _net_rx_rss_add_chunk(rss_input, bytes_written,
321 &udphdr->uh_dport, sizeof(uint16_t));
324 uint32_t
325 net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt,
326 NetRxPktRssType type,
327 uint8_t *key)
329 uint8_t rss_input[36];
330 size_t rss_length = 0;
331 uint32_t rss_hash = 0;
332 net_toeplitz_key key_data;
334 switch (type) {
335 case NetPktRssIpV4:
336 assert(pkt->isip4);
337 trace_net_rx_pkt_rss_ip4();
338 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
339 break;
340 case NetPktRssIpV4Tcp:
341 assert(pkt->isip4);
342 assert(pkt->istcp);
343 trace_net_rx_pkt_rss_ip4_tcp();
344 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
345 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
346 break;
347 case NetPktRssIpV6Tcp:
348 assert(pkt->isip6);
349 assert(pkt->istcp);
350 trace_net_rx_pkt_rss_ip6_tcp();
351 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
352 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
353 break;
354 case NetPktRssIpV6:
355 assert(pkt->isip6);
356 trace_net_rx_pkt_rss_ip6();
357 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
358 break;
359 case NetPktRssIpV6Ex:
360 assert(pkt->isip6);
361 trace_net_rx_pkt_rss_ip6_ex();
362 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
363 break;
364 case NetPktRssIpV6TcpEx:
365 assert(pkt->isip6);
366 assert(pkt->istcp);
367 trace_net_rx_pkt_rss_ip6_ex_tcp();
368 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
369 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
370 break;
371 case NetPktRssIpV4Udp:
372 assert(pkt->isip4);
373 assert(pkt->isudp);
374 trace_net_rx_pkt_rss_ip4_udp();
375 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
376 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
377 break;
378 case NetPktRssIpV6Udp:
379 assert(pkt->isip6);
380 assert(pkt->isudp);
381 trace_net_rx_pkt_rss_ip6_udp();
382 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
383 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
384 break;
385 case NetPktRssIpV6UdpEx:
386 assert(pkt->isip6);
387 assert(pkt->isudp);
388 trace_net_rx_pkt_rss_ip6_ex_udp();
389 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
390 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
391 break;
392 default:
393 assert(false);
394 break;
397 net_toeplitz_key_init(&key_data, key);
398 net_toeplitz_add(&rss_hash, rss_input, rss_length, &key_data);
400 trace_net_rx_pkt_rss_hash(rss_length, rss_hash);
402 return rss_hash;
405 uint16_t net_rx_pkt_get_ip_id(struct NetRxPkt *pkt)
407 assert(pkt);
409 if (pkt->isip4) {
410 return be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_id);
413 return 0;
416 bool net_rx_pkt_is_tcp_ack(struct NetRxPkt *pkt)
418 assert(pkt);
420 if (pkt->istcp) {
421 return TCP_HEADER_FLAGS(&pkt->l4hdr_info.hdr.tcp) & TCP_FLAG_ACK;
424 return false;
427 bool net_rx_pkt_has_tcp_data(struct NetRxPkt *pkt)
429 assert(pkt);
431 if (pkt->istcp) {
432 return pkt->l4hdr_info.has_tcp_data;
435 return false;
438 struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt)
440 assert(pkt);
442 return pkt->vec;
445 uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt)
447 assert(pkt);
449 return pkt->vec_len;
452 void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt,
453 struct virtio_net_hdr *vhdr)
455 assert(pkt);
457 memcpy(&pkt->virt_hdr, vhdr, sizeof pkt->virt_hdr);
460 void net_rx_pkt_set_vhdr_iovec(struct NetRxPkt *pkt,
461 const struct iovec *iov, int iovcnt)
463 assert(pkt);
465 iov_to_buf(iov, iovcnt, 0, &pkt->virt_hdr, sizeof pkt->virt_hdr);
468 bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt)
470 assert(pkt);
472 return pkt->ehdr_buf_len ? true : false;
475 bool net_rx_pkt_has_virt_hdr(struct NetRxPkt *pkt)
477 assert(pkt);
479 return pkt->has_virt_hdr;
482 uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt)
484 assert(pkt);
486 return pkt->tci;
489 bool net_rx_pkt_validate_l3_csum(struct NetRxPkt *pkt, bool *csum_valid)
491 uint32_t cntr;
492 uint16_t csum;
493 uint32_t csl;
495 trace_net_rx_pkt_l3_csum_validate_entry();
497 if (!pkt->isip4) {
498 trace_net_rx_pkt_l3_csum_validate_not_ip4();
499 return false;
502 csl = pkt->l4hdr_off - pkt->l3hdr_off;
504 cntr = net_checksum_add_iov(pkt->vec, pkt->vec_len,
505 pkt->l3hdr_off,
506 csl, 0);
508 csum = net_checksum_finish(cntr);
510 *csum_valid = (csum == 0);
512 trace_net_rx_pkt_l3_csum_validate_csum(pkt->l3hdr_off, csl,
513 cntr, csum, *csum_valid);
515 return true;
518 static uint16_t
519 _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt)
521 uint32_t cntr;
522 uint16_t csum;
523 uint16_t csl;
524 uint32_t cso;
526 trace_net_rx_pkt_l4_csum_calc_entry();
528 if (pkt->isip4) {
529 if (pkt->isudp) {
530 csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen);
531 trace_net_rx_pkt_l4_csum_calc_ip4_udp();
532 } else {
533 csl = be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_len) -
534 IP_HDR_GET_LEN(&pkt->ip4hdr_info.ip4_hdr);
535 trace_net_rx_pkt_l4_csum_calc_ip4_tcp();
538 cntr = eth_calc_ip4_pseudo_hdr_csum(&pkt->ip4hdr_info.ip4_hdr,
539 csl, &cso);
540 trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl);
541 } else {
542 if (pkt->isudp) {
543 csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen);
544 trace_net_rx_pkt_l4_csum_calc_ip6_udp();
545 } else {
546 struct ip6_header *ip6hdr = &pkt->ip6hdr_info.ip6_hdr;
547 size_t full_ip6hdr_len = pkt->l4hdr_off - pkt->l3hdr_off;
548 size_t ip6opts_len = full_ip6hdr_len - sizeof(struct ip6_header);
550 csl = be16_to_cpu(ip6hdr->ip6_ctlun.ip6_un1.ip6_un1_plen) -
551 ip6opts_len;
552 trace_net_rx_pkt_l4_csum_calc_ip6_tcp();
555 cntr = eth_calc_ip6_pseudo_hdr_csum(&pkt->ip6hdr_info.ip6_hdr, csl,
556 pkt->ip6hdr_info.l4proto, &cso);
557 trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl);
560 cntr += net_checksum_add_iov(pkt->vec, pkt->vec_len,
561 pkt->l4hdr_off, csl, cso);
563 csum = net_checksum_finish_nozero(cntr);
565 trace_net_rx_pkt_l4_csum_calc_csum(pkt->l4hdr_off, csl, cntr, csum);
567 return csum;
570 bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid)
572 uint16_t csum;
574 trace_net_rx_pkt_l4_csum_validate_entry();
576 if (!pkt->istcp && !pkt->isudp) {
577 trace_net_rx_pkt_l4_csum_validate_not_xxp();
578 return false;
581 if (pkt->isudp && (pkt->l4hdr_info.hdr.udp.uh_sum == 0)) {
582 trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum();
583 return false;
586 if (pkt->isip4 && pkt->ip4hdr_info.fragment) {
587 trace_net_rx_pkt_l4_csum_validate_ip4_fragment();
588 return false;
591 csum = _net_rx_pkt_calc_l4_csum(pkt);
593 *csum_valid = ((csum == 0) || (csum == 0xFFFF));
595 trace_net_rx_pkt_l4_csum_validate_csum(*csum_valid);
597 return true;
600 bool net_rx_pkt_fix_l4_csum(struct NetRxPkt *pkt)
602 uint16_t csum = 0;
603 uint32_t l4_cso;
605 trace_net_rx_pkt_l4_csum_fix_entry();
607 if (pkt->istcp) {
608 l4_cso = offsetof(struct tcp_header, th_sum);
609 trace_net_rx_pkt_l4_csum_fix_tcp(l4_cso);
610 } else if (pkt->isudp) {
611 if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) {
612 trace_net_rx_pkt_l4_csum_fix_udp_with_no_checksum();
613 return false;
615 l4_cso = offsetof(struct udp_header, uh_sum);
616 trace_net_rx_pkt_l4_csum_fix_udp(l4_cso);
617 } else {
618 trace_net_rx_pkt_l4_csum_fix_not_xxp();
619 return false;
622 if (pkt->isip4 && pkt->ip4hdr_info.fragment) {
623 trace_net_rx_pkt_l4_csum_fix_ip4_fragment();
624 return false;
627 /* Set zero to checksum word */
628 iov_from_buf(pkt->vec, pkt->vec_len,
629 pkt->l4hdr_off + l4_cso,
630 &csum, sizeof(csum));
632 /* Calculate L4 checksum */
633 csum = cpu_to_be16(_net_rx_pkt_calc_l4_csum(pkt));
635 /* Set calculated checksum to checksum word */
636 iov_from_buf(pkt->vec, pkt->vec_len,
637 pkt->l4hdr_off + l4_cso,
638 &csum, sizeof(csum));
640 trace_net_rx_pkt_l4_csum_fix_csum(pkt->l4hdr_off + l4_cso, csum);
642 return true;