1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/if_ether.h>
16 #include <linux/highmem.h>
17 #include "net_driver.h"
21 #include "workarounds.h"
24 * TX descriptor ring full threshold
26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue
29 #define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
31 /* We want to be able to nest calls to netif_stop_queue(), since each
32 * channel can have an individual stop on the queue.
34 void efx_stop_queue(struct efx_nic
*efx
)
36 spin_lock_bh(&efx
->netif_stop_lock
);
37 EFX_TRACE(efx
, "stop TX queue\n");
39 atomic_inc(&efx
->netif_stop_count
);
40 netif_stop_queue(efx
->net_dev
);
42 spin_unlock_bh(&efx
->netif_stop_lock
);
45 /* Wake netif's TX queue
46 * We want to be able to nest calls to netif_stop_queue(), since each
47 * channel can have an individual stop on the queue.
49 void efx_wake_queue(struct efx_nic
*efx
)
52 if (atomic_dec_and_lock(&efx
->netif_stop_count
,
53 &efx
->netif_stop_lock
)) {
54 EFX_TRACE(efx
, "waking TX queue\n");
55 netif_wake_queue(efx
->net_dev
);
56 spin_unlock(&efx
->netif_stop_lock
);
61 static void efx_dequeue_buffer(struct efx_tx_queue
*tx_queue
,
62 struct efx_tx_buffer
*buffer
)
64 if (buffer
->unmap_len
) {
65 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
66 dma_addr_t unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
68 if (buffer
->unmap_single
)
69 pci_unmap_single(pci_dev
, unmap_addr
, buffer
->unmap_len
,
72 pci_unmap_page(pci_dev
, unmap_addr
, buffer
->unmap_len
,
74 buffer
->unmap_len
= 0;
75 buffer
->unmap_single
= false;
79 dev_kfree_skb_any((struct sk_buff
*) buffer
->skb
);
81 EFX_TRACE(tx_queue
->efx
, "TX queue %d transmission id %x "
82 "complete\n", tx_queue
->queue
, read_ptr
);
87 * struct efx_tso_header - a DMA mapped buffer for packet headers
88 * @next: Linked list of free ones.
89 * The list is protected by the TX queue lock.
90 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
91 * @dma_addr: The DMA address of the header below.
93 * This controls the memory used for a TSO header. Use TSOH_DATA()
94 * to find the packet header data. Use TSOH_SIZE() to calculate the
95 * total size required for a given packet header length. TSO headers
96 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
98 struct efx_tso_header
{
100 struct efx_tso_header
*next
;
106 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
107 struct sk_buff
*skb
);
108 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
);
109 static void efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
,
110 struct efx_tso_header
*tsoh
);
112 static void efx_tsoh_free(struct efx_tx_queue
*tx_queue
,
113 struct efx_tx_buffer
*buffer
)
116 if (likely(!buffer
->tsoh
->unmap_len
)) {
117 buffer
->tsoh
->next
= tx_queue
->tso_headers_free
;
118 tx_queue
->tso_headers_free
= buffer
->tsoh
;
120 efx_tsoh_heap_free(tx_queue
, buffer
->tsoh
);
128 * Add a socket buffer to a TX queue
130 * This maps all fragments of a socket buffer for DMA and adds them to
131 * the TX queue. The queue's insert pointer will be incremented by
132 * the number of fragments in the socket buffer.
134 * If any DMA mapping fails, any mapped fragments will be unmapped,
135 * the queue's insert pointer will be restored to its original value.
137 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
138 * You must hold netif_tx_lock() to call this function.
140 static netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue
*tx_queue
,
143 struct efx_nic
*efx
= tx_queue
->efx
;
144 struct pci_dev
*pci_dev
= efx
->pci_dev
;
145 struct efx_tx_buffer
*buffer
;
146 skb_frag_t
*fragment
;
149 unsigned int len
, unmap_len
= 0, fill_level
, insert_ptr
, misalign
;
150 dma_addr_t dma_addr
, unmap_addr
= 0;
151 unsigned int dma_len
;
154 netdev_tx_t rc
= NETDEV_TX_OK
;
156 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
158 if (skb_shinfo((struct sk_buff
*)skb
)->gso_size
)
159 return efx_enqueue_skb_tso(tx_queue
, skb
);
161 /* Get size of the initial fragment */
162 len
= skb_headlen(skb
);
164 /* Pad if necessary */
165 if (EFX_WORKAROUND_15592(efx
) && skb
->len
<= 32) {
166 EFX_BUG_ON_PARANOID(skb
->data_len
);
168 if (skb_pad(skb
, len
- skb
->len
))
172 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
173 q_space
= EFX_TXQ_MASK
- 1 - fill_level
;
175 /* Map for DMA. Use pci_map_single rather than pci_map_page
176 * since this is more efficient on machines with sparse
180 dma_addr
= pci_map_single(pci_dev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
182 /* Process all fragments */
184 if (unlikely(pci_dma_mapping_error(pci_dev
, dma_addr
)))
187 /* Store fields for marking in the per-fragment final
190 unmap_addr
= dma_addr
;
192 /* Add to TX queue, splitting across DMA boundaries */
194 if (unlikely(q_space
-- <= 0)) {
195 /* It might be that completions have
196 * happened since the xmit path last
197 * checked. Update the xmit path's
198 * copy of read_count.
201 /* This memory barrier protects the
202 * change of stopped from the access
205 tx_queue
->old_read_count
=
206 *(volatile unsigned *)
207 &tx_queue
->read_count
;
208 fill_level
= (tx_queue
->insert_count
209 - tx_queue
->old_read_count
);
210 q_space
= EFX_TXQ_MASK
- 1 - fill_level
;
211 if (unlikely(q_space
-- <= 0))
217 insert_ptr
= tx_queue
->insert_count
& EFX_TXQ_MASK
;
218 buffer
= &tx_queue
->buffer
[insert_ptr
];
219 efx_tsoh_free(tx_queue
, buffer
);
220 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
221 EFX_BUG_ON_PARANOID(buffer
->skb
);
222 EFX_BUG_ON_PARANOID(buffer
->len
);
223 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
224 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
226 dma_len
= (((~dma_addr
) & efx
->type
->tx_dma_mask
) + 1);
227 if (likely(dma_len
> len
))
230 misalign
= (unsigned)dma_addr
& efx
->type
->bug5391_mask
;
231 if (misalign
&& dma_len
+ misalign
> 512)
232 dma_len
= 512 - misalign
;
234 /* Fill out per descriptor fields */
235 buffer
->len
= dma_len
;
236 buffer
->dma_addr
= dma_addr
;
239 ++tx_queue
->insert_count
;
242 /* Transfer ownership of the unmapping to the final buffer */
243 buffer
->unmap_single
= unmap_single
;
244 buffer
->unmap_len
= unmap_len
;
247 /* Get address and size of next fragment */
248 if (i
>= skb_shinfo(skb
)->nr_frags
)
250 fragment
= &skb_shinfo(skb
)->frags
[i
];
251 len
= fragment
->size
;
252 page
= fragment
->page
;
253 page_offset
= fragment
->page_offset
;
256 unmap_single
= false;
257 dma_addr
= pci_map_page(pci_dev
, page
, page_offset
, len
,
261 /* Transfer ownership of the skb to the final buffer */
263 buffer
->continuation
= false;
265 /* Pass off to hardware */
266 falcon_push_buffers(tx_queue
);
271 EFX_ERR_RL(efx
, " TX queue %d could not map skb with %d bytes %d "
272 "fragments for DMA\n", tx_queue
->queue
, skb
->len
,
273 skb_shinfo(skb
)->nr_frags
+ 1);
275 /* Mark the packet as transmitted, and free the SKB ourselves */
276 dev_kfree_skb_any((struct sk_buff
*)skb
);
282 if (tx_queue
->stopped
== 1)
286 /* Work backwards until we hit the original insert pointer value */
287 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
288 --tx_queue
->insert_count
;
289 insert_ptr
= tx_queue
->insert_count
& EFX_TXQ_MASK
;
290 buffer
= &tx_queue
->buffer
[insert_ptr
];
291 efx_dequeue_buffer(tx_queue
, buffer
);
295 /* Free the fragment we were mid-way through pushing */
298 pci_unmap_single(pci_dev
, unmap_addr
, unmap_len
,
301 pci_unmap_page(pci_dev
, unmap_addr
, unmap_len
,
308 /* Remove packets from the TX queue
310 * This removes packets from the TX queue, up to and including the
313 static void efx_dequeue_buffers(struct efx_tx_queue
*tx_queue
,
316 struct efx_nic
*efx
= tx_queue
->efx
;
317 unsigned int stop_index
, read_ptr
;
319 stop_index
= (index
+ 1) & EFX_TXQ_MASK
;
320 read_ptr
= tx_queue
->read_count
& EFX_TXQ_MASK
;
322 while (read_ptr
!= stop_index
) {
323 struct efx_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
324 if (unlikely(buffer
->len
== 0)) {
325 EFX_ERR(tx_queue
->efx
, "TX queue %d spurious TX "
326 "completion id %x\n", tx_queue
->queue
,
328 efx_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
332 efx_dequeue_buffer(tx_queue
, buffer
);
333 buffer
->continuation
= true;
336 ++tx_queue
->read_count
;
337 read_ptr
= tx_queue
->read_count
& EFX_TXQ_MASK
;
341 /* Initiate a packet transmission on the specified TX queue.
342 * Note that returning anything other than NETDEV_TX_OK will cause the
343 * OS to free the skb.
345 * This function is split out from efx_hard_start_xmit to allow the
346 * loopback test to direct packets via specific TX queues. It is
347 * therefore a non-static inline, so as not to penalise performance
348 * for non-loopback transmissions.
350 * Context: netif_tx_lock held
352 inline netdev_tx_t
efx_xmit(struct efx_nic
*efx
,
353 struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
355 /* Map fragments for DMA and add to TX queue */
356 return efx_enqueue_skb(tx_queue
, skb
);
359 /* Initiate a packet transmission. We use one channel per CPU
360 * (sharing when we have more CPUs than channels). On Falcon, the TX
361 * completion events will be directed back to the CPU that transmitted
362 * the packet, which should be cache-efficient.
364 * Context: non-blocking.
365 * Note that returning anything other than NETDEV_TX_OK will cause the
366 * OS to free the skb.
368 netdev_tx_t
efx_hard_start_xmit(struct sk_buff
*skb
,
369 struct net_device
*net_dev
)
371 struct efx_nic
*efx
= netdev_priv(net_dev
);
372 struct efx_tx_queue
*tx_queue
;
374 if (unlikely(efx
->port_inhibited
))
375 return NETDEV_TX_BUSY
;
377 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
378 tx_queue
= &efx
->tx_queue
[EFX_TX_QUEUE_OFFLOAD_CSUM
];
380 tx_queue
= &efx
->tx_queue
[EFX_TX_QUEUE_NO_CSUM
];
382 return efx_xmit(efx
, tx_queue
, skb
);
385 void efx_xmit_done(struct efx_tx_queue
*tx_queue
, unsigned int index
)
388 struct efx_nic
*efx
= tx_queue
->efx
;
390 EFX_BUG_ON_PARANOID(index
> EFX_TXQ_MASK
);
392 efx_dequeue_buffers(tx_queue
, index
);
394 /* See if we need to restart the netif queue. This barrier
395 * separates the update of read_count from the test of
398 if (unlikely(tx_queue
->stopped
) && likely(efx
->port_enabled
)) {
399 fill_level
= tx_queue
->insert_count
- tx_queue
->read_count
;
400 if (fill_level
< EFX_TXQ_THRESHOLD
) {
401 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx
));
403 /* Do this under netif_tx_lock(), to avoid racing
404 * with efx_xmit(). */
405 netif_tx_lock(efx
->net_dev
);
406 if (tx_queue
->stopped
) {
407 tx_queue
->stopped
= 0;
410 netif_tx_unlock(efx
->net_dev
);
415 int efx_probe_tx_queue(struct efx_tx_queue
*tx_queue
)
417 struct efx_nic
*efx
= tx_queue
->efx
;
418 unsigned int txq_size
;
421 EFX_LOG(efx
, "creating TX queue %d\n", tx_queue
->queue
);
423 /* Allocate software ring */
424 txq_size
= EFX_TXQ_SIZE
* sizeof(*tx_queue
->buffer
);
425 tx_queue
->buffer
= kzalloc(txq_size
, GFP_KERNEL
);
426 if (!tx_queue
->buffer
)
428 for (i
= 0; i
<= EFX_TXQ_MASK
; ++i
)
429 tx_queue
->buffer
[i
].continuation
= true;
431 /* Allocate hardware ring */
432 rc
= falcon_probe_tx(tx_queue
);
439 kfree(tx_queue
->buffer
);
440 tx_queue
->buffer
= NULL
;
444 void efx_init_tx_queue(struct efx_tx_queue
*tx_queue
)
446 EFX_LOG(tx_queue
->efx
, "initialising TX queue %d\n", tx_queue
->queue
);
448 tx_queue
->insert_count
= 0;
449 tx_queue
->write_count
= 0;
450 tx_queue
->read_count
= 0;
451 tx_queue
->old_read_count
= 0;
452 BUG_ON(tx_queue
->stopped
);
454 /* Set up TX descriptor ring */
455 falcon_init_tx(tx_queue
);
458 void efx_release_tx_buffers(struct efx_tx_queue
*tx_queue
)
460 struct efx_tx_buffer
*buffer
;
462 if (!tx_queue
->buffer
)
465 /* Free any buffers left in the ring */
466 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
467 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
& EFX_TXQ_MASK
];
468 efx_dequeue_buffer(tx_queue
, buffer
);
469 buffer
->continuation
= true;
472 ++tx_queue
->read_count
;
476 void efx_fini_tx_queue(struct efx_tx_queue
*tx_queue
)
478 EFX_LOG(tx_queue
->efx
, "shutting down TX queue %d\n", tx_queue
->queue
);
480 /* Flush TX queue, remove descriptor ring */
481 falcon_fini_tx(tx_queue
);
483 efx_release_tx_buffers(tx_queue
);
485 /* Free up TSO header cache */
486 efx_fini_tso(tx_queue
);
488 /* Release queue's stop on port, if any */
489 if (tx_queue
->stopped
) {
490 tx_queue
->stopped
= 0;
491 efx_wake_queue(tx_queue
->efx
);
495 void efx_remove_tx_queue(struct efx_tx_queue
*tx_queue
)
497 EFX_LOG(tx_queue
->efx
, "destroying TX queue %d\n", tx_queue
->queue
);
498 falcon_remove_tx(tx_queue
);
500 kfree(tx_queue
->buffer
);
501 tx_queue
->buffer
= NULL
;
505 /* Efx TCP segmentation acceleration.
507 * Why? Because by doing it here in the driver we can go significantly
508 * faster than the GSO.
510 * Requires TX checksum offload support.
513 /* Number of bytes inserted at the start of a TSO header buffer,
514 * similar to NET_IP_ALIGN.
516 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
517 #define TSOH_OFFSET 0
519 #define TSOH_OFFSET NET_IP_ALIGN
522 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
524 /* Total size of struct efx_tso_header, buffer and padding */
525 #define TSOH_SIZE(hdr_len) \
526 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
528 /* Size of blocks on free list. Larger blocks must be allocated from
531 #define TSOH_STD_SIZE 128
533 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
534 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
535 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
536 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
539 * struct tso_state - TSO state for an SKB
540 * @out_len: Remaining length in current segment
541 * @seqnum: Current sequence number
542 * @ipv4_id: Current IPv4 ID, host endian
543 * @packet_space: Remaining space in current packet
544 * @dma_addr: DMA address of current position
545 * @in_len: Remaining length in current SKB fragment
546 * @unmap_len: Length of SKB fragment
547 * @unmap_addr: DMA address of SKB fragment
548 * @unmap_single: DMA single vs page mapping flag
549 * @header_len: Number of bytes of header
550 * @full_packet_size: Number of bytes to put in each outgoing segment
552 * The state used during segmentation. It is put into this data structure
553 * just to make it easy to pass into inline functions.
556 /* Output position */
560 unsigned packet_space
;
566 dma_addr_t unmap_addr
;
570 int full_packet_size
;
575 * Verify that our various assumptions about sk_buffs and the conditions
576 * under which TSO will be attempted hold true.
578 static void efx_tso_check_safe(struct sk_buff
*skb
)
580 __be16 protocol
= skb
->protocol
;
582 EFX_BUG_ON_PARANOID(((struct ethhdr
*)skb
->data
)->h_proto
!=
584 if (protocol
== htons(ETH_P_8021Q
)) {
585 /* Find the encapsulated protocol; reset network header
586 * and transport header based on that. */
587 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
588 protocol
= veh
->h_vlan_encapsulated_proto
;
589 skb_set_network_header(skb
, sizeof(*veh
));
590 if (protocol
== htons(ETH_P_IP
))
591 skb_set_transport_header(skb
, sizeof(*veh
) +
592 4 * ip_hdr(skb
)->ihl
);
595 EFX_BUG_ON_PARANOID(protocol
!= htons(ETH_P_IP
));
596 EFX_BUG_ON_PARANOID(ip_hdr(skb
)->protocol
!= IPPROTO_TCP
);
597 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb
), skb
->data
)
598 + (tcp_hdr(skb
)->doff
<< 2u)) >
604 * Allocate a page worth of efx_tso_header structures, and string them
605 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
607 static int efx_tsoh_block_alloc(struct efx_tx_queue
*tx_queue
)
610 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
611 struct efx_tso_header
*tsoh
;
615 base_kva
= pci_alloc_consistent(pci_dev
, PAGE_SIZE
, &dma_addr
);
616 if (base_kva
== NULL
) {
617 EFX_ERR(tx_queue
->efx
, "Unable to allocate page for TSO"
622 /* pci_alloc_consistent() allocates pages. */
623 EFX_BUG_ON_PARANOID(dma_addr
& (PAGE_SIZE
- 1u));
625 for (kva
= base_kva
; kva
< base_kva
+ PAGE_SIZE
; kva
+= TSOH_STD_SIZE
) {
626 tsoh
= (struct efx_tso_header
*)kva
;
627 tsoh
->dma_addr
= dma_addr
+ (TSOH_BUFFER(tsoh
) - base_kva
);
628 tsoh
->next
= tx_queue
->tso_headers_free
;
629 tx_queue
->tso_headers_free
= tsoh
;
636 /* Free up a TSO header, and all others in the same page. */
637 static void efx_tsoh_block_free(struct efx_tx_queue
*tx_queue
,
638 struct efx_tso_header
*tsoh
,
639 struct pci_dev
*pci_dev
)
641 struct efx_tso_header
**p
;
642 unsigned long base_kva
;
645 base_kva
= (unsigned long)tsoh
& PAGE_MASK
;
646 base_dma
= tsoh
->dma_addr
& PAGE_MASK
;
648 p
= &tx_queue
->tso_headers_free
;
650 if (((unsigned long)*p
& PAGE_MASK
) == base_kva
)
656 pci_free_consistent(pci_dev
, PAGE_SIZE
, (void *)base_kva
, base_dma
);
659 static struct efx_tso_header
*
660 efx_tsoh_heap_alloc(struct efx_tx_queue
*tx_queue
, size_t header_len
)
662 struct efx_tso_header
*tsoh
;
664 tsoh
= kmalloc(TSOH_SIZE(header_len
), GFP_ATOMIC
| GFP_DMA
);
668 tsoh
->dma_addr
= pci_map_single(tx_queue
->efx
->pci_dev
,
669 TSOH_BUFFER(tsoh
), header_len
,
671 if (unlikely(pci_dma_mapping_error(tx_queue
->efx
->pci_dev
,
677 tsoh
->unmap_len
= header_len
;
682 efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
, struct efx_tso_header
*tsoh
)
684 pci_unmap_single(tx_queue
->efx
->pci_dev
,
685 tsoh
->dma_addr
, tsoh
->unmap_len
,
691 * efx_tx_queue_insert - push descriptors onto the TX queue
692 * @tx_queue: Efx TX queue
693 * @dma_addr: DMA address of fragment
694 * @len: Length of fragment
695 * @final_buffer: The final buffer inserted into the queue
697 * Push descriptors onto the TX queue. Return 0 on success or 1 if
700 static int efx_tx_queue_insert(struct efx_tx_queue
*tx_queue
,
701 dma_addr_t dma_addr
, unsigned len
,
702 struct efx_tx_buffer
**final_buffer
)
704 struct efx_tx_buffer
*buffer
;
705 struct efx_nic
*efx
= tx_queue
->efx
;
706 unsigned dma_len
, fill_level
, insert_ptr
, misalign
;
709 EFX_BUG_ON_PARANOID(len
<= 0);
711 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
712 /* -1 as there is no way to represent all descriptors used */
713 q_space
= EFX_TXQ_MASK
- 1 - fill_level
;
716 if (unlikely(q_space
-- <= 0)) {
717 /* It might be that completions have happened
718 * since the xmit path last checked. Update
719 * the xmit path's copy of read_count.
722 /* This memory barrier protects the change of
723 * stopped from the access of read_count. */
725 tx_queue
->old_read_count
=
726 *(volatile unsigned *)&tx_queue
->read_count
;
727 fill_level
= (tx_queue
->insert_count
728 - tx_queue
->old_read_count
);
729 q_space
= EFX_TXQ_MASK
- 1 - fill_level
;
730 if (unlikely(q_space
-- <= 0)) {
731 *final_buffer
= NULL
;
738 insert_ptr
= tx_queue
->insert_count
& EFX_TXQ_MASK
;
739 buffer
= &tx_queue
->buffer
[insert_ptr
];
740 ++tx_queue
->insert_count
;
742 EFX_BUG_ON_PARANOID(tx_queue
->insert_count
-
743 tx_queue
->read_count
>
746 efx_tsoh_free(tx_queue
, buffer
);
747 EFX_BUG_ON_PARANOID(buffer
->len
);
748 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
749 EFX_BUG_ON_PARANOID(buffer
->skb
);
750 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
751 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
753 buffer
->dma_addr
= dma_addr
;
755 /* Ensure we do not cross a boundary unsupported by H/W */
756 dma_len
= (~dma_addr
& efx
->type
->tx_dma_mask
) + 1;
758 misalign
= (unsigned)dma_addr
& efx
->type
->bug5391_mask
;
759 if (misalign
&& dma_len
+ misalign
> 512)
760 dma_len
= 512 - misalign
;
762 /* If there is enough space to send then do so */
766 buffer
->len
= dma_len
; /* Don't set the other members */
771 EFX_BUG_ON_PARANOID(!len
);
773 *final_buffer
= buffer
;
779 * Put a TSO header into the TX queue.
781 * This is special-cased because we know that it is small enough to fit in
782 * a single fragment, and we know it doesn't cross a page boundary. It
783 * also allows us to not worry about end-of-packet etc.
785 static void efx_tso_put_header(struct efx_tx_queue
*tx_queue
,
786 struct efx_tso_header
*tsoh
, unsigned len
)
788 struct efx_tx_buffer
*buffer
;
790 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
& EFX_TXQ_MASK
];
791 efx_tsoh_free(tx_queue
, buffer
);
792 EFX_BUG_ON_PARANOID(buffer
->len
);
793 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
794 EFX_BUG_ON_PARANOID(buffer
->skb
);
795 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
796 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
798 buffer
->dma_addr
= tsoh
->dma_addr
;
801 ++tx_queue
->insert_count
;
805 /* Remove descriptors put into a tx_queue. */
806 static void efx_enqueue_unwind(struct efx_tx_queue
*tx_queue
)
808 struct efx_tx_buffer
*buffer
;
809 dma_addr_t unmap_addr
;
811 /* Work backwards until we hit the original insert pointer value */
812 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
813 --tx_queue
->insert_count
;
814 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
&
816 efx_tsoh_free(tx_queue
, buffer
);
817 EFX_BUG_ON_PARANOID(buffer
->skb
);
819 buffer
->continuation
= true;
820 if (buffer
->unmap_len
) {
821 unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
823 if (buffer
->unmap_single
)
824 pci_unmap_single(tx_queue
->efx
->pci_dev
,
825 unmap_addr
, buffer
->unmap_len
,
828 pci_unmap_page(tx_queue
->efx
->pci_dev
,
829 unmap_addr
, buffer
->unmap_len
,
831 buffer
->unmap_len
= 0;
837 /* Parse the SKB header and initialise state. */
838 static void tso_start(struct tso_state
*st
, const struct sk_buff
*skb
)
840 /* All ethernet/IP/TCP headers combined size is TCP header size
841 * plus offset of TCP header relative to start of packet.
843 st
->header_len
= ((tcp_hdr(skb
)->doff
<< 2u)
844 + PTR_DIFF(tcp_hdr(skb
), skb
->data
));
845 st
->full_packet_size
= st
->header_len
+ skb_shinfo(skb
)->gso_size
;
847 st
->ipv4_id
= ntohs(ip_hdr(skb
)->id
);
848 st
->seqnum
= ntohl(tcp_hdr(skb
)->seq
);
850 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->urg
);
851 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->syn
);
852 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->rst
);
854 st
->packet_space
= st
->full_packet_size
;
855 st
->out_len
= skb
->len
- st
->header_len
;
857 st
->unmap_single
= false;
860 static int tso_get_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
863 st
->unmap_addr
= pci_map_page(efx
->pci_dev
, frag
->page
,
864 frag
->page_offset
, frag
->size
,
866 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
867 st
->unmap_single
= false;
868 st
->unmap_len
= frag
->size
;
869 st
->in_len
= frag
->size
;
870 st
->dma_addr
= st
->unmap_addr
;
876 static int tso_get_head_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
877 const struct sk_buff
*skb
)
879 int hl
= st
->header_len
;
880 int len
= skb_headlen(skb
) - hl
;
882 st
->unmap_addr
= pci_map_single(efx
->pci_dev
, skb
->data
+ hl
,
883 len
, PCI_DMA_TODEVICE
);
884 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
885 st
->unmap_single
= true;
888 st
->dma_addr
= st
->unmap_addr
;
896 * tso_fill_packet_with_fragment - form descriptors for the current fragment
897 * @tx_queue: Efx TX queue
898 * @skb: Socket buffer
901 * Form descriptors for the current fragment, until we reach the end
902 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
903 * space in @tx_queue.
905 static int tso_fill_packet_with_fragment(struct efx_tx_queue
*tx_queue
,
906 const struct sk_buff
*skb
,
907 struct tso_state
*st
)
909 struct efx_tx_buffer
*buffer
;
910 int n
, end_of_packet
, rc
;
914 if (st
->packet_space
== 0)
917 EFX_BUG_ON_PARANOID(st
->in_len
<= 0);
918 EFX_BUG_ON_PARANOID(st
->packet_space
<= 0);
920 n
= min(st
->in_len
, st
->packet_space
);
922 st
->packet_space
-= n
;
926 rc
= efx_tx_queue_insert(tx_queue
, st
->dma_addr
, n
, &buffer
);
927 if (likely(rc
== 0)) {
928 if (st
->out_len
== 0)
929 /* Transfer ownership of the skb */
932 end_of_packet
= st
->out_len
== 0 || st
->packet_space
== 0;
933 buffer
->continuation
= !end_of_packet
;
935 if (st
->in_len
== 0) {
936 /* Transfer ownership of the pci mapping */
937 buffer
->unmap_len
= st
->unmap_len
;
938 buffer
->unmap_single
= st
->unmap_single
;
949 * tso_start_new_packet - generate a new header and prepare for the new packet
950 * @tx_queue: Efx TX queue
951 * @skb: Socket buffer
954 * Generate a new header and prepare for the new packet. Return 0 on
955 * success, or -1 if failed to alloc header.
957 static int tso_start_new_packet(struct efx_tx_queue
*tx_queue
,
958 const struct sk_buff
*skb
,
959 struct tso_state
*st
)
961 struct efx_tso_header
*tsoh
;
962 struct iphdr
*tsoh_iph
;
963 struct tcphdr
*tsoh_th
;
967 /* Allocate a DMA-mapped header buffer. */
968 if (likely(TSOH_SIZE(st
->header_len
) <= TSOH_STD_SIZE
)) {
969 if (tx_queue
->tso_headers_free
== NULL
) {
970 if (efx_tsoh_block_alloc(tx_queue
))
973 EFX_BUG_ON_PARANOID(!tx_queue
->tso_headers_free
);
974 tsoh
= tx_queue
->tso_headers_free
;
975 tx_queue
->tso_headers_free
= tsoh
->next
;
978 tx_queue
->tso_long_headers
++;
979 tsoh
= efx_tsoh_heap_alloc(tx_queue
, st
->header_len
);
984 header
= TSOH_BUFFER(tsoh
);
985 tsoh_th
= (struct tcphdr
*)(header
+ SKB_TCP_OFF(skb
));
986 tsoh_iph
= (struct iphdr
*)(header
+ SKB_IPV4_OFF(skb
));
988 /* Copy and update the headers. */
989 memcpy(header
, skb
->data
, st
->header_len
);
991 tsoh_th
->seq
= htonl(st
->seqnum
);
992 st
->seqnum
+= skb_shinfo(skb
)->gso_size
;
993 if (st
->out_len
> skb_shinfo(skb
)->gso_size
) {
994 /* This packet will not finish the TSO burst. */
995 ip_length
= st
->full_packet_size
- ETH_HDR_LEN(skb
);
999 /* This packet will be the last in the TSO burst. */
1000 ip_length
= st
->header_len
- ETH_HDR_LEN(skb
) + st
->out_len
;
1001 tsoh_th
->fin
= tcp_hdr(skb
)->fin
;
1002 tsoh_th
->psh
= tcp_hdr(skb
)->psh
;
1004 tsoh_iph
->tot_len
= htons(ip_length
);
1006 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1007 tsoh_iph
->id
= htons(st
->ipv4_id
);
1010 st
->packet_space
= skb_shinfo(skb
)->gso_size
;
1011 ++tx_queue
->tso_packets
;
1013 /* Form a descriptor for this header. */
1014 efx_tso_put_header(tx_queue
, tsoh
, st
->header_len
);
1021 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1022 * @tx_queue: Efx TX queue
1023 * @skb: Socket buffer
1025 * Context: You must hold netif_tx_lock() to call this function.
1027 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1028 * @skb was not enqueued. In all cases @skb is consumed. Return
1029 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1031 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
1032 struct sk_buff
*skb
)
1034 struct efx_nic
*efx
= tx_queue
->efx
;
1035 int frag_i
, rc
, rc2
= NETDEV_TX_OK
;
1036 struct tso_state state
;
1038 /* Verify TSO is safe - these checks should never fail. */
1039 efx_tso_check_safe(skb
);
1041 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
1043 tso_start(&state
, skb
);
1045 /* Assume that skb header area contains exactly the headers, and
1046 * all payload is in the frag list.
1048 if (skb_headlen(skb
) == state
.header_len
) {
1049 /* Grab the first payload fragment. */
1050 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
< 1);
1052 rc
= tso_get_fragment(&state
, efx
,
1053 skb_shinfo(skb
)->frags
+ frag_i
);
1057 rc
= tso_get_head_fragment(&state
, efx
, skb
);
1063 if (tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1067 rc
= tso_fill_packet_with_fragment(tx_queue
, skb
, &state
);
1071 /* Move onto the next fragment? */
1072 if (state
.in_len
== 0) {
1073 if (++frag_i
>= skb_shinfo(skb
)->nr_frags
)
1074 /* End of payload reached. */
1076 rc
= tso_get_fragment(&state
, efx
,
1077 skb_shinfo(skb
)->frags
+ frag_i
);
1082 /* Start at new packet? */
1083 if (state
.packet_space
== 0 &&
1084 tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1088 /* Pass off to hardware */
1089 falcon_push_buffers(tx_queue
);
1091 tx_queue
->tso_bursts
++;
1092 return NETDEV_TX_OK
;
1095 EFX_ERR(efx
, "Out of memory for TSO headers, or PCI mapping error\n");
1096 dev_kfree_skb_any((struct sk_buff
*)skb
);
1100 rc2
= NETDEV_TX_BUSY
;
1102 /* Stop the queue if it wasn't stopped before. */
1103 if (tx_queue
->stopped
== 1)
1104 efx_stop_queue(efx
);
1107 /* Free the DMA mapping we were in the process of writing out */
1108 if (state
.unmap_len
) {
1109 if (state
.unmap_single
)
1110 pci_unmap_single(efx
->pci_dev
, state
.unmap_addr
,
1111 state
.unmap_len
, PCI_DMA_TODEVICE
);
1113 pci_unmap_page(efx
->pci_dev
, state
.unmap_addr
,
1114 state
.unmap_len
, PCI_DMA_TODEVICE
);
1117 efx_enqueue_unwind(tx_queue
);
1123 * Free up all TSO datastructures associated with tx_queue. This
1124 * routine should be called only once the tx_queue is both empty and
1125 * will no longer be used.
1127 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
)
1131 if (tx_queue
->buffer
) {
1132 for (i
= 0; i
<= EFX_TXQ_MASK
; ++i
)
1133 efx_tsoh_free(tx_queue
, &tx_queue
->buffer
[i
]);
1136 while (tx_queue
->tso_headers_free
!= NULL
)
1137 efx_tsoh_block_free(tx_queue
, tx_queue
->tso_headers_free
,
1138 tx_queue
->efx
->pci_dev
);