2 * QEMU TX packets abstractions
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
18 #include "qemu/osdep.h"
19 #include "net_tx_pkt.h"
21 #include "net/checksum.h"
24 #include "hw/pci/pci.h"
27 NET_TX_PKT_VHDR_FRAG
= 0,
28 NET_TX_PKT_L2HDR_FRAG
,
29 NET_TX_PKT_L3HDR_FRAG
,
30 NET_TX_PKT_PL_START_FRAG
33 /* TX packet private context */
37 struct virtio_net_hdr virt_hdr
;
42 uint32_t max_raw_frags
;
46 uint8_t l2_hdr
[ETH_MAX_L2_HDR_LEN
];
47 uint8_t l3_hdr
[ETH_MAX_IP_DGRAM_LEN
];
51 uint32_t payload_frags
;
52 uint32_t max_payload_frags
;
55 eth_pkt_types_e packet_type
;
61 void net_tx_pkt_init(struct NetTxPkt
**pkt
, PCIDevice
*pci_dev
,
62 uint32_t max_frags
, bool has_virt_hdr
)
64 struct NetTxPkt
*p
= g_malloc0(sizeof *p
);
68 p
->vec
= g_malloc((sizeof *p
->vec
) *
69 (max_frags
+ NET_TX_PKT_PL_START_FRAG
));
71 p
->raw
= g_malloc((sizeof *p
->raw
) * max_frags
);
73 p
->max_payload_frags
= max_frags
;
74 p
->max_raw_frags
= max_frags
;
75 p
->has_virt_hdr
= has_virt_hdr
;
76 p
->vec
[NET_TX_PKT_VHDR_FRAG
].iov_base
= &p
->virt_hdr
;
77 p
->vec
[NET_TX_PKT_VHDR_FRAG
].iov_len
=
78 p
->has_virt_hdr
? sizeof p
->virt_hdr
: 0;
79 p
->vec
[NET_TX_PKT_L2HDR_FRAG
].iov_base
= &p
->l2_hdr
;
80 p
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_base
= &p
->l3_hdr
;
85 void net_tx_pkt_uninit(struct NetTxPkt
*pkt
)
94 void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt
*pkt
)
98 struct ip_header
*ip_hdr
;
99 ip_hdr
= pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_base
;
101 ip_hdr
->ip_len
= cpu_to_be16(pkt
->payload_len
+
102 pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_len
);
105 csum
= net_raw_checksum((uint8_t *)ip_hdr
,
106 pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_len
);
107 ip_hdr
->ip_sum
= cpu_to_be16(csum
);
110 void net_tx_pkt_update_ip_checksums(struct NetTxPkt
*pkt
)
115 uint8_t gso_type
= pkt
->virt_hdr
.gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
;
116 void *ip_hdr
= pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_base
;
118 if (pkt
->payload_len
+ pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_len
>
119 ETH_MAX_IP_DGRAM_LEN
) {
123 if (gso_type
== VIRTIO_NET_HDR_GSO_TCPV4
||
124 gso_type
== VIRTIO_NET_HDR_GSO_UDP
) {
125 /* Calculate IP header checksum */
126 net_tx_pkt_update_ip_hdr_checksum(pkt
);
128 /* Calculate IP pseudo header checksum */
129 cntr
= eth_calc_ip4_pseudo_hdr_csum(ip_hdr
, pkt
->payload_len
, &cso
);
130 csum
= cpu_to_be16(~net_checksum_finish(cntr
));
131 } else if (gso_type
== VIRTIO_NET_HDR_GSO_TCPV6
) {
132 /* Calculate IP pseudo header checksum */
133 cntr
= eth_calc_ip6_pseudo_hdr_csum(ip_hdr
, pkt
->payload_len
,
135 csum
= cpu_to_be16(~net_checksum_finish(cntr
));
140 iov_from_buf(&pkt
->vec
[NET_TX_PKT_PL_START_FRAG
], pkt
->payload_frags
,
141 pkt
->virt_hdr
.csum_offset
, &csum
, sizeof(csum
));
144 static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt
*pkt
)
146 pkt
->hdr_len
= pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
].iov_len
+
147 pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_len
;
150 static bool net_tx_pkt_parse_headers(struct NetTxPkt
*pkt
)
152 struct iovec
*l2_hdr
, *l3_hdr
;
154 size_t full_ip6hdr_len
;
159 l2_hdr
= &pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
];
160 l3_hdr
= &pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
];
162 bytes_read
= iov_to_buf(pkt
->raw
, pkt
->raw_frags
, 0, l2_hdr
->iov_base
,
164 if (bytes_read
< sizeof(struct eth_header
)) {
169 l2_hdr
->iov_len
= sizeof(struct eth_header
);
170 switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr
->iov_base
)->h_proto
)) {
172 l2_hdr
->iov_len
+= sizeof(struct vlan_header
);
175 l2_hdr
->iov_len
+= 2 * sizeof(struct vlan_header
);
179 if (bytes_read
< l2_hdr
->iov_len
) {
182 pkt
->packet_type
= ETH_PKT_UCAST
;
185 l2_hdr
->iov_len
= ETH_MAX_L2_HDR_LEN
;
186 l2_hdr
->iov_len
= eth_get_l2_hdr_length(l2_hdr
->iov_base
);
187 pkt
->packet_type
= get_eth_packet_type(l2_hdr
->iov_base
);
190 l3_proto
= eth_get_l3_proto(l2_hdr
, 1, l2_hdr
->iov_len
);
194 bytes_read
= iov_to_buf(pkt
->raw
, pkt
->raw_frags
, l2_hdr
->iov_len
,
195 l3_hdr
->iov_base
, sizeof(struct ip_header
));
197 if (bytes_read
< sizeof(struct ip_header
)) {
202 l3_hdr
->iov_len
= IP_HDR_GET_LEN(l3_hdr
->iov_base
);
204 if (l3_hdr
->iov_len
< sizeof(struct ip_header
)) {
209 pkt
->l4proto
= ((struct ip_header
*) l3_hdr
->iov_base
)->ip_p
;
211 if (IP_HDR_GET_LEN(l3_hdr
->iov_base
) != sizeof(struct ip_header
)) {
212 /* copy optional IPv4 header data if any*/
213 bytes_read
= iov_to_buf(pkt
->raw
, pkt
->raw_frags
,
214 l2_hdr
->iov_len
+ sizeof(struct ip_header
),
215 l3_hdr
->iov_base
+ sizeof(struct ip_header
),
216 l3_hdr
->iov_len
- sizeof(struct ip_header
));
217 if (bytes_read
< l3_hdr
->iov_len
- sizeof(struct ip_header
)) {
227 eth_ip6_hdr_info hdrinfo
;
229 if (!eth_parse_ipv6_hdr(pkt
->raw
, pkt
->raw_frags
, l2_hdr
->iov_len
,
235 pkt
->l4proto
= hdrinfo
.l4proto
;
236 full_ip6hdr_len
= hdrinfo
.full_hdr_len
;
238 if (full_ip6hdr_len
> ETH_MAX_IP_DGRAM_LEN
) {
243 bytes_read
= iov_to_buf(pkt
->raw
, pkt
->raw_frags
, l2_hdr
->iov_len
,
244 l3_hdr
->iov_base
, full_ip6hdr_len
);
246 if (bytes_read
< full_ip6hdr_len
) {
250 l3_hdr
->iov_len
= full_ip6hdr_len
;
259 net_tx_pkt_calculate_hdr_len(pkt
);
263 static void net_tx_pkt_rebuild_payload(struct NetTxPkt
*pkt
)
265 pkt
->payload_len
= iov_size(pkt
->raw
, pkt
->raw_frags
) - pkt
->hdr_len
;
266 pkt
->payload_frags
= iov_copy(&pkt
->vec
[NET_TX_PKT_PL_START_FRAG
],
267 pkt
->max_payload_frags
,
268 pkt
->raw
, pkt
->raw_frags
,
269 pkt
->hdr_len
, pkt
->payload_len
);
272 bool net_tx_pkt_parse(struct NetTxPkt
*pkt
)
274 if (net_tx_pkt_parse_headers(pkt
)) {
275 net_tx_pkt_rebuild_payload(pkt
);
282 struct virtio_net_hdr
*net_tx_pkt_get_vhdr(struct NetTxPkt
*pkt
)
285 return &pkt
->virt_hdr
;
288 static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt
*pkt
,
291 uint8_t rc
= VIRTIO_NET_HDR_GSO_NONE
;
294 l3_proto
= eth_get_l3_proto(&pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
], 1,
295 pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
].iov_len
);
301 rc
= eth_get_gso_type(l3_proto
, pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_base
,
308 void net_tx_pkt_build_vheader(struct NetTxPkt
*pkt
, bool tso_enable
,
309 bool csum_enable
, uint32_t gso_size
)
311 struct tcp_hdr l4hdr
;
314 /* csum has to be enabled if tso is. */
315 assert(csum_enable
|| !tso_enable
);
317 pkt
->virt_hdr
.gso_type
= net_tx_pkt_get_gso_type(pkt
, tso_enable
);
319 switch (pkt
->virt_hdr
.gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
320 case VIRTIO_NET_HDR_GSO_NONE
:
321 pkt
->virt_hdr
.hdr_len
= 0;
322 pkt
->virt_hdr
.gso_size
= 0;
325 case VIRTIO_NET_HDR_GSO_UDP
:
326 pkt
->virt_hdr
.gso_size
= gso_size
;
327 pkt
->virt_hdr
.hdr_len
= pkt
->hdr_len
+ sizeof(struct udp_header
);
330 case VIRTIO_NET_HDR_GSO_TCPV4
:
331 case VIRTIO_NET_HDR_GSO_TCPV6
:
332 iov_to_buf(&pkt
->vec
[NET_TX_PKT_PL_START_FRAG
], pkt
->payload_frags
,
333 0, &l4hdr
, sizeof(l4hdr
));
334 pkt
->virt_hdr
.hdr_len
= pkt
->hdr_len
+ l4hdr
.th_off
* sizeof(uint32_t);
335 pkt
->virt_hdr
.gso_size
= gso_size
;
339 g_assert_not_reached();
343 switch (pkt
->l4proto
) {
345 pkt
->virt_hdr
.flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
346 pkt
->virt_hdr
.csum_start
= pkt
->hdr_len
;
347 pkt
->virt_hdr
.csum_offset
= offsetof(struct tcp_hdr
, th_sum
);
350 pkt
->virt_hdr
.flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
351 pkt
->virt_hdr
.csum_start
= pkt
->hdr_len
;
352 pkt
->virt_hdr
.csum_offset
= offsetof(struct udp_hdr
, uh_sum
);
360 void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt
*pkt
,
361 uint16_t vlan
, uint16_t vlan_ethtype
)
366 eth_setup_vlan_headers_ex(pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
].iov_base
,
367 vlan
, vlan_ethtype
, &is_new
);
369 /* update l2hdrlen */
371 pkt
->hdr_len
+= sizeof(struct vlan_header
);
372 pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
].iov_len
+=
373 sizeof(struct vlan_header
);
377 bool net_tx_pkt_add_raw_fragment(struct NetTxPkt
*pkt
, hwaddr pa
,
380 hwaddr mapped_len
= 0;
381 struct iovec
*ventry
;
383 assert(pkt
->max_raw_frags
> pkt
->raw_frags
);
389 ventry
= &pkt
->raw
[pkt
->raw_frags
];
392 ventry
->iov_base
= pci_dma_map(pkt
->pci_dev
, pa
,
393 &mapped_len
, DMA_DIRECTION_TO_DEVICE
);
395 if ((ventry
->iov_base
!= NULL
) && (len
== mapped_len
)) {
396 ventry
->iov_len
= mapped_len
;
404 bool net_tx_pkt_has_fragments(struct NetTxPkt
*pkt
)
406 return pkt
->raw_frags
> 0;
409 eth_pkt_types_e
net_tx_pkt_get_packet_type(struct NetTxPkt
*pkt
)
413 return pkt
->packet_type
;
416 size_t net_tx_pkt_get_total_len(struct NetTxPkt
*pkt
)
420 return pkt
->hdr_len
+ pkt
->payload_len
;
423 void net_tx_pkt_dump(struct NetTxPkt
*pkt
)
425 #ifdef NET_TX_PKT_DEBUG
428 printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, "
429 "l3hdr_len: %lu, payload_len: %u\n", pkt
->hdr_len
, pkt
->packet_type
,
430 pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
].iov_len
,
431 pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_len
, pkt
->payload_len
);
435 void net_tx_pkt_reset(struct NetTxPkt
*pkt
)
439 /* no assert, as reset can be called before tx_pkt_init */
444 memset(&pkt
->virt_hdr
, 0, sizeof(pkt
->virt_hdr
));
448 pkt
->payload_len
= 0;
449 pkt
->payload_frags
= 0;
452 for (i
= 0; i
< pkt
->raw_frags
; i
++) {
453 assert(pkt
->raw
[i
].iov_base
);
454 pci_dma_unmap(pkt
->pci_dev
, pkt
->raw
[i
].iov_base
, pkt
->raw
[i
].iov_len
,
455 DMA_DIRECTION_TO_DEVICE
, 0);
463 static void net_tx_pkt_do_sw_csum(struct NetTxPkt
*pkt
)
465 struct iovec
*iov
= &pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
];
469 /* num of iovec without vhdr */
470 uint32_t iov_len
= pkt
->payload_frags
+ NET_TX_PKT_PL_START_FRAG
- 1;
472 struct ip_header
*iphdr
;
473 size_t csum_offset
= pkt
->virt_hdr
.csum_start
+ pkt
->virt_hdr
.csum_offset
;
475 /* Put zero to checksum field */
476 iov_from_buf(iov
, iov_len
, csum_offset
, &csum
, sizeof csum
);
478 /* Calculate L4 TCP/UDP checksum */
479 csl
= pkt
->payload_len
;
481 /* add pseudo header to csum */
482 iphdr
= pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_base
;
483 csum_cntr
= eth_calc_ip4_pseudo_hdr_csum(iphdr
, csl
, &cso
);
487 net_checksum_add_iov(iov
, iov_len
, pkt
->virt_hdr
.csum_start
, csl
, cso
);
489 /* Put the checksum obtained into the packet */
490 csum
= cpu_to_be16(net_checksum_finish(csum_cntr
));
491 iov_from_buf(iov
, iov_len
, csum_offset
, &csum
, sizeof csum
);
495 NET_TX_PKT_FRAGMENT_L2_HDR_POS
= 0,
496 NET_TX_PKT_FRAGMENT_L3_HDR_POS
,
497 NET_TX_PKT_FRAGMENT_HEADER_NUM
500 #define NET_MAX_FRAG_SG_LIST (64)
502 static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt
*pkt
,
503 int *src_idx
, size_t *src_offset
, struct iovec
*dst
, int *dst_idx
)
506 struct iovec
*src
= pkt
->vec
;
508 *dst_idx
= NET_TX_PKT_FRAGMENT_HEADER_NUM
;
510 while (fetched
< IP_FRAG_ALIGN_SIZE(pkt
->virt_hdr
.gso_size
)) {
512 /* no more place in fragment iov */
513 if (*dst_idx
== NET_MAX_FRAG_SG_LIST
) {
517 /* no more data in iovec */
518 if (*src_idx
== (pkt
->payload_frags
+ NET_TX_PKT_PL_START_FRAG
)) {
523 dst
[*dst_idx
].iov_base
= src
[*src_idx
].iov_base
+ *src_offset
;
524 dst
[*dst_idx
].iov_len
= MIN(src
[*src_idx
].iov_len
- *src_offset
,
525 IP_FRAG_ALIGN_SIZE(pkt
->virt_hdr
.gso_size
) - fetched
);
527 *src_offset
+= dst
[*dst_idx
].iov_len
;
528 fetched
+= dst
[*dst_idx
].iov_len
;
530 if (*src_offset
== src
[*src_idx
].iov_len
) {
541 static inline void net_tx_pkt_sendv(struct NetTxPkt
*pkt
,
542 NetClientState
*nc
, const struct iovec
*iov
, int iov_cnt
)
544 if (pkt
->is_loopback
) {
545 nc
->info
->receive_iov(nc
, iov
, iov_cnt
);
547 qemu_sendv_packet(nc
, iov
, iov_cnt
);
551 static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt
*pkt
,
554 struct iovec fragment
[NET_MAX_FRAG_SG_LIST
];
555 size_t fragment_len
= 0;
556 bool more_frags
= false;
558 /* some pointers for shorter code */
559 void *l2_iov_base
, *l3_iov_base
;
560 size_t l2_iov_len
, l3_iov_len
;
561 int src_idx
= NET_TX_PKT_PL_START_FRAG
, dst_idx
;
562 size_t src_offset
= 0;
563 size_t fragment_offset
= 0;
565 l2_iov_base
= pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
].iov_base
;
566 l2_iov_len
= pkt
->vec
[NET_TX_PKT_L2HDR_FRAG
].iov_len
;
567 l3_iov_base
= pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_base
;
568 l3_iov_len
= pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_len
;
571 fragment
[NET_TX_PKT_FRAGMENT_L2_HDR_POS
].iov_base
= l2_iov_base
;
572 fragment
[NET_TX_PKT_FRAGMENT_L2_HDR_POS
].iov_len
= l2_iov_len
;
573 fragment
[NET_TX_PKT_FRAGMENT_L3_HDR_POS
].iov_base
= l3_iov_base
;
574 fragment
[NET_TX_PKT_FRAGMENT_L3_HDR_POS
].iov_len
= l3_iov_len
;
577 /* Put as much data as possible and send */
579 fragment_len
= net_tx_pkt_fetch_fragment(pkt
, &src_idx
, &src_offset
,
582 more_frags
= (fragment_offset
+ fragment_len
< pkt
->payload_len
);
584 eth_setup_ip4_fragmentation(l2_iov_base
, l2_iov_len
, l3_iov_base
,
585 l3_iov_len
, fragment_len
, fragment_offset
, more_frags
);
587 eth_fix_ip4_checksum(l3_iov_base
, l3_iov_len
);
589 net_tx_pkt_sendv(pkt
, nc
, fragment
, dst_idx
);
591 fragment_offset
+= fragment_len
;
593 } while (more_frags
);
598 bool net_tx_pkt_send(struct NetTxPkt
*pkt
, NetClientState
*nc
)
602 if (!pkt
->has_virt_hdr
&&
603 pkt
->virt_hdr
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
604 net_tx_pkt_do_sw_csum(pkt
);
608 * Since underlying infrastructure does not support IP datagrams longer
609 * than 64K we should drop such packets and don't even try to send
611 if (VIRTIO_NET_HDR_GSO_NONE
!= pkt
->virt_hdr
.gso_type
) {
612 if (pkt
->payload_len
>
613 ETH_MAX_IP_DGRAM_LEN
-
614 pkt
->vec
[NET_TX_PKT_L3HDR_FRAG
].iov_len
) {
619 if (pkt
->has_virt_hdr
||
620 pkt
->virt_hdr
.gso_type
== VIRTIO_NET_HDR_GSO_NONE
) {
621 net_tx_pkt_sendv(pkt
, nc
, pkt
->vec
,
622 pkt
->payload_frags
+ NET_TX_PKT_PL_START_FRAG
);
626 return net_tx_pkt_do_sw_fragmentation(pkt
, nc
);
629 bool net_tx_pkt_send_loopback(struct NetTxPkt
*pkt
, NetClientState
*nc
)
633 pkt
->is_loopback
= true;
634 res
= net_tx_pkt_send(pkt
, nc
);
635 pkt
->is_loopback
= false;