1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
23 #include "workarounds.h"
26 * TX descriptor ring full threshold
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
31 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
33 static void efx_dequeue_buffer(struct efx_tx_queue
*tx_queue
,
34 struct efx_tx_buffer
*buffer
)
36 if (buffer
->unmap_len
) {
37 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
38 dma_addr_t unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
40 if (buffer
->unmap_single
)
41 pci_unmap_single(pci_dev
, unmap_addr
, buffer
->unmap_len
,
44 pci_unmap_page(pci_dev
, unmap_addr
, buffer
->unmap_len
,
46 buffer
->unmap_len
= 0;
47 buffer
->unmap_single
= false;
51 dev_kfree_skb_any((struct sk_buff
*) buffer
->skb
);
53 netif_vdbg(tx_queue
->efx
, tx_done
, tx_queue
->efx
->net_dev
,
54 "TX queue %d transmission id %x complete\n",
55 tx_queue
->queue
, tx_queue
->read_count
);
60 * struct efx_tso_header - a DMA mapped buffer for packet headers
61 * @next: Linked list of free ones.
62 * The list is protected by the TX queue lock.
63 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
64 * @dma_addr: The DMA address of the header below.
66 * This controls the memory used for a TSO header. Use TSOH_DATA()
67 * to find the packet header data. Use TSOH_SIZE() to calculate the
68 * total size required for a given packet header length. TSO headers
69 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
71 struct efx_tso_header
{
73 struct efx_tso_header
*next
;
79 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
81 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
);
82 static void efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
,
83 struct efx_tso_header
*tsoh
);
85 static void efx_tsoh_free(struct efx_tx_queue
*tx_queue
,
86 struct efx_tx_buffer
*buffer
)
89 if (likely(!buffer
->tsoh
->unmap_len
)) {
90 buffer
->tsoh
->next
= tx_queue
->tso_headers_free
;
91 tx_queue
->tso_headers_free
= buffer
->tsoh
;
93 efx_tsoh_heap_free(tx_queue
, buffer
->tsoh
);
100 static inline unsigned
101 efx_max_tx_len(struct efx_nic
*efx
, dma_addr_t dma_addr
)
103 /* Depending on the NIC revision, we can use descriptor
104 * lengths up to 8K or 8K-1. However, since PCI Express
105 * devices must split read requests at 4K boundaries, there is
106 * little benefit from using descriptors that cross those
107 * boundaries and we keep things simple by not doing so.
109 unsigned len
= (~dma_addr
& 0xfff) + 1;
111 /* Work around hardware bug for unaligned buffers. */
112 if (EFX_WORKAROUND_5391(efx
) && (dma_addr
& 0xf))
113 len
= min_t(unsigned, len
, 512 - (dma_addr
& 0xf));
119 * Add a socket buffer to a TX queue
121 * This maps all fragments of a socket buffer for DMA and adds them to
122 * the TX queue. The queue's insert pointer will be incremented by
123 * the number of fragments in the socket buffer.
125 * If any DMA mapping fails, any mapped fragments will be unmapped,
126 * the queue's insert pointer will be restored to its original value.
128 * This function is split out from efx_hard_start_xmit to allow the
129 * loopback test to direct packets via specific TX queues.
131 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
132 * You must hold netif_tx_lock() to call this function.
134 netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
136 struct efx_nic
*efx
= tx_queue
->efx
;
137 struct pci_dev
*pci_dev
= efx
->pci_dev
;
138 struct efx_tx_buffer
*buffer
;
139 skb_frag_t
*fragment
;
142 unsigned int len
, unmap_len
= 0, fill_level
, insert_ptr
;
143 dma_addr_t dma_addr
, unmap_addr
= 0;
144 unsigned int dma_len
;
147 netdev_tx_t rc
= NETDEV_TX_OK
;
149 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
151 if (skb_shinfo(skb
)->gso_size
)
152 return efx_enqueue_skb_tso(tx_queue
, skb
);
154 /* Get size of the initial fragment */
155 len
= skb_headlen(skb
);
157 /* Pad if necessary */
158 if (EFX_WORKAROUND_15592(efx
) && skb
->len
<= 32) {
159 EFX_BUG_ON_PARANOID(skb
->data_len
);
161 if (skb_pad(skb
, len
- skb
->len
))
165 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
166 q_space
= efx
->txq_entries
- 1 - fill_level
;
168 /* Map for DMA. Use pci_map_single rather than pci_map_page
169 * since this is more efficient on machines with sparse
173 dma_addr
= pci_map_single(pci_dev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
175 /* Process all fragments */
177 if (unlikely(pci_dma_mapping_error(pci_dev
, dma_addr
)))
180 /* Store fields for marking in the per-fragment final
183 unmap_addr
= dma_addr
;
185 /* Add to TX queue, splitting across DMA boundaries */
187 if (unlikely(q_space
-- <= 0)) {
188 /* It might be that completions have
189 * happened since the xmit path last
190 * checked. Update the xmit path's
191 * copy of read_count.
193 netif_tx_stop_queue(tx_queue
->core_txq
);
194 /* This memory barrier protects the
195 * change of queue state from the access
198 tx_queue
->old_read_count
=
199 ACCESS_ONCE(tx_queue
->read_count
);
200 fill_level
= (tx_queue
->insert_count
201 - tx_queue
->old_read_count
);
202 q_space
= efx
->txq_entries
- 1 - fill_level
;
203 if (unlikely(q_space
-- <= 0)) {
208 netif_tx_start_queue(tx_queue
->core_txq
);
211 insert_ptr
= tx_queue
->insert_count
& tx_queue
->ptr_mask
;
212 buffer
= &tx_queue
->buffer
[insert_ptr
];
213 efx_tsoh_free(tx_queue
, buffer
);
214 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
215 EFX_BUG_ON_PARANOID(buffer
->skb
);
216 EFX_BUG_ON_PARANOID(buffer
->len
);
217 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
218 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
220 dma_len
= efx_max_tx_len(efx
, dma_addr
);
221 if (likely(dma_len
>= len
))
224 /* Fill out per descriptor fields */
225 buffer
->len
= dma_len
;
226 buffer
->dma_addr
= dma_addr
;
229 ++tx_queue
->insert_count
;
232 /* Transfer ownership of the unmapping to the final buffer */
233 buffer
->unmap_single
= unmap_single
;
234 buffer
->unmap_len
= unmap_len
;
237 /* Get address and size of next fragment */
238 if (i
>= skb_shinfo(skb
)->nr_frags
)
240 fragment
= &skb_shinfo(skb
)->frags
[i
];
241 len
= fragment
->size
;
242 page
= fragment
->page
;
243 page_offset
= fragment
->page_offset
;
246 unmap_single
= false;
247 dma_addr
= pci_map_page(pci_dev
, page
, page_offset
, len
,
251 /* Transfer ownership of the skb to the final buffer */
253 buffer
->continuation
= false;
255 /* Pass off to hardware */
256 efx_nic_push_buffers(tx_queue
);
261 netif_err(efx
, tx_err
, efx
->net_dev
,
262 " TX queue %d could not map skb with %d bytes %d "
263 "fragments for DMA\n", tx_queue
->queue
, skb
->len
,
264 skb_shinfo(skb
)->nr_frags
+ 1);
266 /* Mark the packet as transmitted, and free the SKB ourselves */
267 dev_kfree_skb_any(skb
);
270 /* Work backwards until we hit the original insert pointer value */
271 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
272 --tx_queue
->insert_count
;
273 insert_ptr
= tx_queue
->insert_count
& tx_queue
->ptr_mask
;
274 buffer
= &tx_queue
->buffer
[insert_ptr
];
275 efx_dequeue_buffer(tx_queue
, buffer
);
279 /* Free the fragment we were mid-way through pushing */
282 pci_unmap_single(pci_dev
, unmap_addr
, unmap_len
,
285 pci_unmap_page(pci_dev
, unmap_addr
, unmap_len
,
292 /* Remove packets from the TX queue
294 * This removes packets from the TX queue, up to and including the
297 static void efx_dequeue_buffers(struct efx_tx_queue
*tx_queue
,
300 struct efx_nic
*efx
= tx_queue
->efx
;
301 unsigned int stop_index
, read_ptr
;
303 stop_index
= (index
+ 1) & tx_queue
->ptr_mask
;
304 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
306 while (read_ptr
!= stop_index
) {
307 struct efx_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
308 if (unlikely(buffer
->len
== 0)) {
309 netif_err(efx
, tx_err
, efx
->net_dev
,
310 "TX queue %d spurious TX completion id %x\n",
311 tx_queue
->queue
, read_ptr
);
312 efx_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
316 efx_dequeue_buffer(tx_queue
, buffer
);
317 buffer
->continuation
= true;
320 ++tx_queue
->read_count
;
321 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
325 /* Initiate a packet transmission. We use one channel per CPU
326 * (sharing when we have more CPUs than channels). On Falcon, the TX
327 * completion events will be directed back to the CPU that transmitted
328 * the packet, which should be cache-efficient.
330 * Context: non-blocking.
331 * Note that returning anything other than NETDEV_TX_OK will cause the
332 * OS to free the skb.
334 netdev_tx_t
efx_hard_start_xmit(struct sk_buff
*skb
,
335 struct net_device
*net_dev
)
337 struct efx_nic
*efx
= netdev_priv(net_dev
);
338 struct efx_tx_queue
*tx_queue
;
340 if (unlikely(efx
->port_inhibited
))
341 return NETDEV_TX_BUSY
;
343 tx_queue
= efx_get_tx_queue(efx
, skb_get_queue_mapping(skb
),
344 skb
->ip_summed
== CHECKSUM_PARTIAL
?
345 EFX_TXQ_TYPE_OFFLOAD
: 0);
347 return efx_enqueue_skb(tx_queue
, skb
);
350 void efx_xmit_done(struct efx_tx_queue
*tx_queue
, unsigned int index
)
353 struct efx_nic
*efx
= tx_queue
->efx
;
355 EFX_BUG_ON_PARANOID(index
> tx_queue
->ptr_mask
);
357 efx_dequeue_buffers(tx_queue
, index
);
359 /* See if we need to restart the netif queue. This barrier
360 * separates the update of read_count from the test of the
363 if (unlikely(netif_tx_queue_stopped(tx_queue
->core_txq
)) &&
364 likely(efx
->port_enabled
)) {
365 fill_level
= tx_queue
->insert_count
- tx_queue
->read_count
;
366 if (fill_level
< EFX_TXQ_THRESHOLD(efx
)) {
367 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx
));
368 netif_tx_wake_queue(tx_queue
->core_txq
);
372 /* Check whether the hardware queue is now empty */
373 if ((int)(tx_queue
->read_count
- tx_queue
->old_write_count
) >= 0) {
374 tx_queue
->old_write_count
= ACCESS_ONCE(tx_queue
->write_count
);
375 if (tx_queue
->read_count
== tx_queue
->old_write_count
) {
377 tx_queue
->empty_read_count
=
378 tx_queue
->read_count
| EFX_EMPTY_COUNT_VALID
;
383 int efx_probe_tx_queue(struct efx_tx_queue
*tx_queue
)
385 struct efx_nic
*efx
= tx_queue
->efx
;
386 unsigned int entries
;
389 /* Create the smallest power-of-two aligned ring */
390 entries
= max(roundup_pow_of_two(efx
->txq_entries
), EFX_MIN_DMAQ_SIZE
);
391 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
392 tx_queue
->ptr_mask
= entries
- 1;
394 netif_dbg(efx
, probe
, efx
->net_dev
,
395 "creating TX queue %d size %#x mask %#x\n",
396 tx_queue
->queue
, efx
->txq_entries
, tx_queue
->ptr_mask
);
398 /* Allocate software ring */
399 tx_queue
->buffer
= kzalloc(entries
* sizeof(*tx_queue
->buffer
),
401 if (!tx_queue
->buffer
)
403 for (i
= 0; i
<= tx_queue
->ptr_mask
; ++i
)
404 tx_queue
->buffer
[i
].continuation
= true;
406 /* Allocate hardware ring */
407 rc
= efx_nic_probe_tx(tx_queue
);
414 kfree(tx_queue
->buffer
);
415 tx_queue
->buffer
= NULL
;
419 void efx_init_tx_queue(struct efx_tx_queue
*tx_queue
)
421 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
422 "initialising TX queue %d\n", tx_queue
->queue
);
424 tx_queue
->insert_count
= 0;
425 tx_queue
->write_count
= 0;
426 tx_queue
->old_write_count
= 0;
427 tx_queue
->read_count
= 0;
428 tx_queue
->old_read_count
= 0;
429 tx_queue
->empty_read_count
= 0 | EFX_EMPTY_COUNT_VALID
;
431 /* Set up TX descriptor ring */
432 efx_nic_init_tx(tx_queue
);
435 void efx_release_tx_buffers(struct efx_tx_queue
*tx_queue
)
437 struct efx_tx_buffer
*buffer
;
439 if (!tx_queue
->buffer
)
442 /* Free any buffers left in the ring */
443 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
444 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
& tx_queue
->ptr_mask
];
445 efx_dequeue_buffer(tx_queue
, buffer
);
446 buffer
->continuation
= true;
449 ++tx_queue
->read_count
;
453 void efx_fini_tx_queue(struct efx_tx_queue
*tx_queue
)
455 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
456 "shutting down TX queue %d\n", tx_queue
->queue
);
458 /* Flush TX queue, remove descriptor ring */
459 efx_nic_fini_tx(tx_queue
);
461 efx_release_tx_buffers(tx_queue
);
463 /* Free up TSO header cache */
464 efx_fini_tso(tx_queue
);
467 void efx_remove_tx_queue(struct efx_tx_queue
*tx_queue
)
469 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
470 "destroying TX queue %d\n", tx_queue
->queue
);
471 efx_nic_remove_tx(tx_queue
);
473 kfree(tx_queue
->buffer
);
474 tx_queue
->buffer
= NULL
;
478 /* Efx TCP segmentation acceleration.
480 * Why? Because by doing it here in the driver we can go significantly
481 * faster than the GSO.
483 * Requires TX checksum offload support.
486 /* Number of bytes inserted at the start of a TSO header buffer,
487 * similar to NET_IP_ALIGN.
489 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
490 #define TSOH_OFFSET 0
492 #define TSOH_OFFSET NET_IP_ALIGN
495 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
497 /* Total size of struct efx_tso_header, buffer and padding */
498 #define TSOH_SIZE(hdr_len) \
499 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
501 /* Size of blocks on free list. Larger blocks must be allocated from
504 #define TSOH_STD_SIZE 128
506 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
507 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
508 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
509 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
510 #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
513 * struct tso_state - TSO state for an SKB
514 * @out_len: Remaining length in current segment
515 * @seqnum: Current sequence number
516 * @ipv4_id: Current IPv4 ID, host endian
517 * @packet_space: Remaining space in current packet
518 * @dma_addr: DMA address of current position
519 * @in_len: Remaining length in current SKB fragment
520 * @unmap_len: Length of SKB fragment
521 * @unmap_addr: DMA address of SKB fragment
522 * @unmap_single: DMA single vs page mapping flag
523 * @protocol: Network protocol (after any VLAN header)
524 * @header_len: Number of bytes of header
525 * @full_packet_size: Number of bytes to put in each outgoing segment
527 * The state used during segmentation. It is put into this data structure
528 * just to make it easy to pass into inline functions.
531 /* Output position */
535 unsigned packet_space
;
541 dma_addr_t unmap_addr
;
546 int full_packet_size
;
551 * Verify that our various assumptions about sk_buffs and the conditions
552 * under which TSO will be attempted hold true. Return the protocol number.
554 static __be16
efx_tso_check_protocol(struct sk_buff
*skb
)
556 __be16 protocol
= skb
->protocol
;
558 EFX_BUG_ON_PARANOID(((struct ethhdr
*)skb
->data
)->h_proto
!=
560 if (protocol
== htons(ETH_P_8021Q
)) {
561 /* Find the encapsulated protocol; reset network header
562 * and transport header based on that. */
563 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
564 protocol
= veh
->h_vlan_encapsulated_proto
;
565 skb_set_network_header(skb
, sizeof(*veh
));
566 if (protocol
== htons(ETH_P_IP
))
567 skb_set_transport_header(skb
, sizeof(*veh
) +
568 4 * ip_hdr(skb
)->ihl
);
569 else if (protocol
== htons(ETH_P_IPV6
))
570 skb_set_transport_header(skb
, sizeof(*veh
) +
571 sizeof(struct ipv6hdr
));
574 if (protocol
== htons(ETH_P_IP
)) {
575 EFX_BUG_ON_PARANOID(ip_hdr(skb
)->protocol
!= IPPROTO_TCP
);
577 EFX_BUG_ON_PARANOID(protocol
!= htons(ETH_P_IPV6
));
578 EFX_BUG_ON_PARANOID(ipv6_hdr(skb
)->nexthdr
!= NEXTHDR_TCP
);
580 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb
), skb
->data
)
581 + (tcp_hdr(skb
)->doff
<< 2u)) >
589 * Allocate a page worth of efx_tso_header structures, and string them
590 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
592 static int efx_tsoh_block_alloc(struct efx_tx_queue
*tx_queue
)
595 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
596 struct efx_tso_header
*tsoh
;
600 base_kva
= pci_alloc_consistent(pci_dev
, PAGE_SIZE
, &dma_addr
);
601 if (base_kva
== NULL
) {
602 netif_err(tx_queue
->efx
, tx_err
, tx_queue
->efx
->net_dev
,
603 "Unable to allocate page for TSO headers\n");
607 /* pci_alloc_consistent() allocates pages. */
608 EFX_BUG_ON_PARANOID(dma_addr
& (PAGE_SIZE
- 1u));
610 for (kva
= base_kva
; kva
< base_kva
+ PAGE_SIZE
; kva
+= TSOH_STD_SIZE
) {
611 tsoh
= (struct efx_tso_header
*)kva
;
612 tsoh
->dma_addr
= dma_addr
+ (TSOH_BUFFER(tsoh
) - base_kva
);
613 tsoh
->next
= tx_queue
->tso_headers_free
;
614 tx_queue
->tso_headers_free
= tsoh
;
621 /* Free up a TSO header, and all others in the same page. */
622 static void efx_tsoh_block_free(struct efx_tx_queue
*tx_queue
,
623 struct efx_tso_header
*tsoh
,
624 struct pci_dev
*pci_dev
)
626 struct efx_tso_header
**p
;
627 unsigned long base_kva
;
630 base_kva
= (unsigned long)tsoh
& PAGE_MASK
;
631 base_dma
= tsoh
->dma_addr
& PAGE_MASK
;
633 p
= &tx_queue
->tso_headers_free
;
635 if (((unsigned long)*p
& PAGE_MASK
) == base_kva
)
641 pci_free_consistent(pci_dev
, PAGE_SIZE
, (void *)base_kva
, base_dma
);
644 static struct efx_tso_header
*
645 efx_tsoh_heap_alloc(struct efx_tx_queue
*tx_queue
, size_t header_len
)
647 struct efx_tso_header
*tsoh
;
649 tsoh
= kmalloc(TSOH_SIZE(header_len
), GFP_ATOMIC
| GFP_DMA
);
653 tsoh
->dma_addr
= pci_map_single(tx_queue
->efx
->pci_dev
,
654 TSOH_BUFFER(tsoh
), header_len
,
656 if (unlikely(pci_dma_mapping_error(tx_queue
->efx
->pci_dev
,
662 tsoh
->unmap_len
= header_len
;
667 efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
, struct efx_tso_header
*tsoh
)
669 pci_unmap_single(tx_queue
->efx
->pci_dev
,
670 tsoh
->dma_addr
, tsoh
->unmap_len
,
676 * efx_tx_queue_insert - push descriptors onto the TX queue
677 * @tx_queue: Efx TX queue
678 * @dma_addr: DMA address of fragment
679 * @len: Length of fragment
680 * @final_buffer: The final buffer inserted into the queue
682 * Push descriptors onto the TX queue. Return 0 on success or 1 if
685 static int efx_tx_queue_insert(struct efx_tx_queue
*tx_queue
,
686 dma_addr_t dma_addr
, unsigned len
,
687 struct efx_tx_buffer
**final_buffer
)
689 struct efx_tx_buffer
*buffer
;
690 struct efx_nic
*efx
= tx_queue
->efx
;
691 unsigned dma_len
, fill_level
, insert_ptr
;
694 EFX_BUG_ON_PARANOID(len
<= 0);
696 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
697 /* -1 as there is no way to represent all descriptors used */
698 q_space
= efx
->txq_entries
- 1 - fill_level
;
701 if (unlikely(q_space
-- <= 0)) {
702 /* It might be that completions have happened
703 * since the xmit path last checked. Update
704 * the xmit path's copy of read_count.
706 netif_tx_stop_queue(tx_queue
->core_txq
);
707 /* This memory barrier protects the change of
708 * queue state from the access of read_count. */
710 tx_queue
->old_read_count
=
711 ACCESS_ONCE(tx_queue
->read_count
);
712 fill_level
= (tx_queue
->insert_count
713 - tx_queue
->old_read_count
);
714 q_space
= efx
->txq_entries
- 1 - fill_level
;
715 if (unlikely(q_space
-- <= 0)) {
716 *final_buffer
= NULL
;
720 netif_tx_start_queue(tx_queue
->core_txq
);
723 insert_ptr
= tx_queue
->insert_count
& tx_queue
->ptr_mask
;
724 buffer
= &tx_queue
->buffer
[insert_ptr
];
725 ++tx_queue
->insert_count
;
727 EFX_BUG_ON_PARANOID(tx_queue
->insert_count
-
728 tx_queue
->read_count
>=
731 efx_tsoh_free(tx_queue
, buffer
);
732 EFX_BUG_ON_PARANOID(buffer
->len
);
733 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
734 EFX_BUG_ON_PARANOID(buffer
->skb
);
735 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
736 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
738 buffer
->dma_addr
= dma_addr
;
740 dma_len
= efx_max_tx_len(efx
, dma_addr
);
742 /* If there is enough space to send then do so */
746 buffer
->len
= dma_len
; /* Don't set the other members */
751 EFX_BUG_ON_PARANOID(!len
);
753 *final_buffer
= buffer
;
759 * Put a TSO header into the TX queue.
761 * This is special-cased because we know that it is small enough to fit in
762 * a single fragment, and we know it doesn't cross a page boundary. It
763 * also allows us to not worry about end-of-packet etc.
765 static void efx_tso_put_header(struct efx_tx_queue
*tx_queue
,
766 struct efx_tso_header
*tsoh
, unsigned len
)
768 struct efx_tx_buffer
*buffer
;
770 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
& tx_queue
->ptr_mask
];
771 efx_tsoh_free(tx_queue
, buffer
);
772 EFX_BUG_ON_PARANOID(buffer
->len
);
773 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
774 EFX_BUG_ON_PARANOID(buffer
->skb
);
775 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
776 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
778 buffer
->dma_addr
= tsoh
->dma_addr
;
781 ++tx_queue
->insert_count
;
785 /* Remove descriptors put into a tx_queue. */
786 static void efx_enqueue_unwind(struct efx_tx_queue
*tx_queue
)
788 struct efx_tx_buffer
*buffer
;
789 dma_addr_t unmap_addr
;
791 /* Work backwards until we hit the original insert pointer value */
792 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
793 --tx_queue
->insert_count
;
794 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
&
796 efx_tsoh_free(tx_queue
, buffer
);
797 EFX_BUG_ON_PARANOID(buffer
->skb
);
798 if (buffer
->unmap_len
) {
799 unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
801 if (buffer
->unmap_single
)
802 pci_unmap_single(tx_queue
->efx
->pci_dev
,
803 unmap_addr
, buffer
->unmap_len
,
806 pci_unmap_page(tx_queue
->efx
->pci_dev
,
807 unmap_addr
, buffer
->unmap_len
,
809 buffer
->unmap_len
= 0;
812 buffer
->continuation
= true;
817 /* Parse the SKB header and initialise state. */
818 static void tso_start(struct tso_state
*st
, const struct sk_buff
*skb
)
820 /* All ethernet/IP/TCP headers combined size is TCP header size
821 * plus offset of TCP header relative to start of packet.
823 st
->header_len
= ((tcp_hdr(skb
)->doff
<< 2u)
824 + PTR_DIFF(tcp_hdr(skb
), skb
->data
));
825 st
->full_packet_size
= st
->header_len
+ skb_shinfo(skb
)->gso_size
;
827 if (st
->protocol
== htons(ETH_P_IP
))
828 st
->ipv4_id
= ntohs(ip_hdr(skb
)->id
);
831 st
->seqnum
= ntohl(tcp_hdr(skb
)->seq
);
833 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->urg
);
834 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->syn
);
835 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->rst
);
837 st
->packet_space
= st
->full_packet_size
;
838 st
->out_len
= skb
->len
- st
->header_len
;
840 st
->unmap_single
= false;
843 static int tso_get_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
846 st
->unmap_addr
= pci_map_page(efx
->pci_dev
, frag
->page
,
847 frag
->page_offset
, frag
->size
,
849 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
850 st
->unmap_single
= false;
851 st
->unmap_len
= frag
->size
;
852 st
->in_len
= frag
->size
;
853 st
->dma_addr
= st
->unmap_addr
;
859 static int tso_get_head_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
860 const struct sk_buff
*skb
)
862 int hl
= st
->header_len
;
863 int len
= skb_headlen(skb
) - hl
;
865 st
->unmap_addr
= pci_map_single(efx
->pci_dev
, skb
->data
+ hl
,
866 len
, PCI_DMA_TODEVICE
);
867 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
868 st
->unmap_single
= true;
871 st
->dma_addr
= st
->unmap_addr
;
879 * tso_fill_packet_with_fragment - form descriptors for the current fragment
880 * @tx_queue: Efx TX queue
881 * @skb: Socket buffer
884 * Form descriptors for the current fragment, until we reach the end
885 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
886 * space in @tx_queue.
888 static int tso_fill_packet_with_fragment(struct efx_tx_queue
*tx_queue
,
889 const struct sk_buff
*skb
,
890 struct tso_state
*st
)
892 struct efx_tx_buffer
*buffer
;
893 int n
, end_of_packet
, rc
;
897 if (st
->packet_space
== 0)
900 EFX_BUG_ON_PARANOID(st
->in_len
<= 0);
901 EFX_BUG_ON_PARANOID(st
->packet_space
<= 0);
903 n
= min(st
->in_len
, st
->packet_space
);
905 st
->packet_space
-= n
;
909 rc
= efx_tx_queue_insert(tx_queue
, st
->dma_addr
, n
, &buffer
);
910 if (likely(rc
== 0)) {
911 if (st
->out_len
== 0)
912 /* Transfer ownership of the skb */
915 end_of_packet
= st
->out_len
== 0 || st
->packet_space
== 0;
916 buffer
->continuation
= !end_of_packet
;
918 if (st
->in_len
== 0) {
919 /* Transfer ownership of the pci mapping */
920 buffer
->unmap_len
= st
->unmap_len
;
921 buffer
->unmap_single
= st
->unmap_single
;
932 * tso_start_new_packet - generate a new header and prepare for the new packet
933 * @tx_queue: Efx TX queue
934 * @skb: Socket buffer
937 * Generate a new header and prepare for the new packet. Return 0 on
938 * success, or -1 if failed to alloc header.
940 static int tso_start_new_packet(struct efx_tx_queue
*tx_queue
,
941 const struct sk_buff
*skb
,
942 struct tso_state
*st
)
944 struct efx_tso_header
*tsoh
;
945 struct tcphdr
*tsoh_th
;
949 /* Allocate a DMA-mapped header buffer. */
950 if (likely(TSOH_SIZE(st
->header_len
) <= TSOH_STD_SIZE
)) {
951 if (tx_queue
->tso_headers_free
== NULL
) {
952 if (efx_tsoh_block_alloc(tx_queue
))
955 EFX_BUG_ON_PARANOID(!tx_queue
->tso_headers_free
);
956 tsoh
= tx_queue
->tso_headers_free
;
957 tx_queue
->tso_headers_free
= tsoh
->next
;
960 tx_queue
->tso_long_headers
++;
961 tsoh
= efx_tsoh_heap_alloc(tx_queue
, st
->header_len
);
966 header
= TSOH_BUFFER(tsoh
);
967 tsoh_th
= (struct tcphdr
*)(header
+ SKB_TCP_OFF(skb
));
969 /* Copy and update the headers. */
970 memcpy(header
, skb
->data
, st
->header_len
);
972 tsoh_th
->seq
= htonl(st
->seqnum
);
973 st
->seqnum
+= skb_shinfo(skb
)->gso_size
;
974 if (st
->out_len
> skb_shinfo(skb
)->gso_size
) {
975 /* This packet will not finish the TSO burst. */
976 ip_length
= st
->full_packet_size
- ETH_HDR_LEN(skb
);
980 /* This packet will be the last in the TSO burst. */
981 ip_length
= st
->header_len
- ETH_HDR_LEN(skb
) + st
->out_len
;
982 tsoh_th
->fin
= tcp_hdr(skb
)->fin
;
983 tsoh_th
->psh
= tcp_hdr(skb
)->psh
;
986 if (st
->protocol
== htons(ETH_P_IP
)) {
987 struct iphdr
*tsoh_iph
=
988 (struct iphdr
*)(header
+ SKB_IPV4_OFF(skb
));
990 tsoh_iph
->tot_len
= htons(ip_length
);
992 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
993 tsoh_iph
->id
= htons(st
->ipv4_id
);
996 struct ipv6hdr
*tsoh_iph
=
997 (struct ipv6hdr
*)(header
+ SKB_IPV6_OFF(skb
));
999 tsoh_iph
->payload_len
= htons(ip_length
- sizeof(*tsoh_iph
));
1002 st
->packet_space
= skb_shinfo(skb
)->gso_size
;
1003 ++tx_queue
->tso_packets
;
1005 /* Form a descriptor for this header. */
1006 efx_tso_put_header(tx_queue
, tsoh
, st
->header_len
);
1013 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1014 * @tx_queue: Efx TX queue
1015 * @skb: Socket buffer
1017 * Context: You must hold netif_tx_lock() to call this function.
1019 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1020 * @skb was not enqueued. In all cases @skb is consumed. Return
1021 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1023 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
1024 struct sk_buff
*skb
)
1026 struct efx_nic
*efx
= tx_queue
->efx
;
1027 int frag_i
, rc
, rc2
= NETDEV_TX_OK
;
1028 struct tso_state state
;
1030 /* Find the packet protocol and sanity-check it */
1031 state
.protocol
= efx_tso_check_protocol(skb
);
1033 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
1035 tso_start(&state
, skb
);
1037 /* Assume that skb header area contains exactly the headers, and
1038 * all payload is in the frag list.
1040 if (skb_headlen(skb
) == state
.header_len
) {
1041 /* Grab the first payload fragment. */
1042 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
< 1);
1044 rc
= tso_get_fragment(&state
, efx
,
1045 skb_shinfo(skb
)->frags
+ frag_i
);
1049 rc
= tso_get_head_fragment(&state
, efx
, skb
);
1055 if (tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1059 rc
= tso_fill_packet_with_fragment(tx_queue
, skb
, &state
);
1061 rc2
= NETDEV_TX_BUSY
;
1065 /* Move onto the next fragment? */
1066 if (state
.in_len
== 0) {
1067 if (++frag_i
>= skb_shinfo(skb
)->nr_frags
)
1068 /* End of payload reached. */
1070 rc
= tso_get_fragment(&state
, efx
,
1071 skb_shinfo(skb
)->frags
+ frag_i
);
1076 /* Start at new packet? */
1077 if (state
.packet_space
== 0 &&
1078 tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1082 /* Pass off to hardware */
1083 efx_nic_push_buffers(tx_queue
);
1085 tx_queue
->tso_bursts
++;
1086 return NETDEV_TX_OK
;
1089 netif_err(efx
, tx_err
, efx
->net_dev
,
1090 "Out of memory for TSO headers, or PCI mapping error\n");
1091 dev_kfree_skb_any(skb
);
1094 /* Free the DMA mapping we were in the process of writing out */
1095 if (state
.unmap_len
) {
1096 if (state
.unmap_single
)
1097 pci_unmap_single(efx
->pci_dev
, state
.unmap_addr
,
1098 state
.unmap_len
, PCI_DMA_TODEVICE
);
1100 pci_unmap_page(efx
->pci_dev
, state
.unmap_addr
,
1101 state
.unmap_len
, PCI_DMA_TODEVICE
);
1104 efx_enqueue_unwind(tx_queue
);
1110 * Free up all TSO datastructures associated with tx_queue. This
1111 * routine should be called only once the tx_queue is both empty and
1112 * will no longer be used.
1114 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
)
1118 if (tx_queue
->buffer
) {
1119 for (i
= 0; i
<= tx_queue
->ptr_mask
; ++i
)
1120 efx_tsoh_free(tx_queue
, &tx_queue
->buffer
[i
]);
1123 while (tx_queue
->tso_headers_free
!= NULL
)
1124 efx_tsoh_block_free(tx_queue
, tx_queue
->tso_headers_free
,
1125 tx_queue
->efx
->pci_dev
);