1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
23 #include "workarounds.h"
26 * TX descriptor ring full threshold
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
31 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
33 /* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * because of the 2 hardware queues associated with each core queue,
35 * but also so that we can inhibit TX for reasons other than a full
37 void efx_stop_queue(struct efx_channel
*channel
)
39 struct efx_nic
*efx
= channel
->efx
;
40 struct efx_tx_queue
*tx_queue
= efx_channel_get_tx_queue(channel
, 0);
45 spin_lock_bh(&channel
->tx_stop_lock
);
46 netif_vdbg(efx
, tx_queued
, efx
->net_dev
, "stop TX queue\n");
48 atomic_inc(&channel
->tx_stop_count
);
50 netdev_get_tx_queue(efx
->net_dev
,
51 tx_queue
->queue
/ EFX_TXQ_TYPES
));
53 spin_unlock_bh(&channel
->tx_stop_lock
);
56 /* Decrement core TX queue stop count and wake it if the count is 0 */
57 void efx_wake_queue(struct efx_channel
*channel
)
59 struct efx_nic
*efx
= channel
->efx
;
60 struct efx_tx_queue
*tx_queue
= efx_channel_get_tx_queue(channel
, 0);
66 if (atomic_dec_and_lock(&channel
->tx_stop_count
,
67 &channel
->tx_stop_lock
)) {
68 netif_vdbg(efx
, tx_queued
, efx
->net_dev
, "waking TX queue\n");
70 netdev_get_tx_queue(efx
->net_dev
,
71 tx_queue
->queue
/ EFX_TXQ_TYPES
));
72 spin_unlock(&channel
->tx_stop_lock
);
77 static void efx_dequeue_buffer(struct efx_tx_queue
*tx_queue
,
78 struct efx_tx_buffer
*buffer
)
80 if (buffer
->unmap_len
) {
81 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
82 dma_addr_t unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
84 if (buffer
->unmap_single
)
85 pci_unmap_single(pci_dev
, unmap_addr
, buffer
->unmap_len
,
88 pci_unmap_page(pci_dev
, unmap_addr
, buffer
->unmap_len
,
90 buffer
->unmap_len
= 0;
91 buffer
->unmap_single
= false;
95 dev_kfree_skb_any((struct sk_buff
*) buffer
->skb
);
97 netif_vdbg(tx_queue
->efx
, tx_done
, tx_queue
->efx
->net_dev
,
98 "TX queue %d transmission id %x complete\n",
99 tx_queue
->queue
, tx_queue
->read_count
);
104 * struct efx_tso_header - a DMA mapped buffer for packet headers
105 * @next: Linked list of free ones.
106 * The list is protected by the TX queue lock.
107 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
108 * @dma_addr: The DMA address of the header below.
110 * This controls the memory used for a TSO header. Use TSOH_DATA()
111 * to find the packet header data. Use TSOH_SIZE() to calculate the
112 * total size required for a given packet header length. TSO headers
113 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
115 struct efx_tso_header
{
117 struct efx_tso_header
*next
;
123 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
124 struct sk_buff
*skb
);
125 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
);
126 static void efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
,
127 struct efx_tso_header
*tsoh
);
129 static void efx_tsoh_free(struct efx_tx_queue
*tx_queue
,
130 struct efx_tx_buffer
*buffer
)
133 if (likely(!buffer
->tsoh
->unmap_len
)) {
134 buffer
->tsoh
->next
= tx_queue
->tso_headers_free
;
135 tx_queue
->tso_headers_free
= buffer
->tsoh
;
137 efx_tsoh_heap_free(tx_queue
, buffer
->tsoh
);
144 static inline unsigned
145 efx_max_tx_len(struct efx_nic
*efx
, dma_addr_t dma_addr
)
147 /* Depending on the NIC revision, we can use descriptor
148 * lengths up to 8K or 8K-1. However, since PCI Express
149 * devices must split read requests at 4K boundaries, there is
150 * little benefit from using descriptors that cross those
151 * boundaries and we keep things simple by not doing so.
153 unsigned len
= (~dma_addr
& 0xfff) + 1;
155 /* Work around hardware bug for unaligned buffers. */
156 if (EFX_WORKAROUND_5391(efx
) && (dma_addr
& 0xf))
157 len
= min_t(unsigned, len
, 512 - (dma_addr
& 0xf));
163 * Add a socket buffer to a TX queue
165 * This maps all fragments of a socket buffer for DMA and adds them to
166 * the TX queue. The queue's insert pointer will be incremented by
167 * the number of fragments in the socket buffer.
169 * If any DMA mapping fails, any mapped fragments will be unmapped,
170 * the queue's insert pointer will be restored to its original value.
172 * This function is split out from efx_hard_start_xmit to allow the
173 * loopback test to direct packets via specific TX queues.
175 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
176 * You must hold netif_tx_lock() to call this function.
178 netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
180 struct efx_nic
*efx
= tx_queue
->efx
;
181 struct pci_dev
*pci_dev
= efx
->pci_dev
;
182 struct efx_tx_buffer
*buffer
;
183 skb_frag_t
*fragment
;
186 unsigned int len
, unmap_len
= 0, fill_level
, insert_ptr
;
187 dma_addr_t dma_addr
, unmap_addr
= 0;
188 unsigned int dma_len
;
191 netdev_tx_t rc
= NETDEV_TX_OK
;
193 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
195 if (skb_shinfo(skb
)->gso_size
)
196 return efx_enqueue_skb_tso(tx_queue
, skb
);
198 /* Get size of the initial fragment */
199 len
= skb_headlen(skb
);
201 /* Pad if necessary */
202 if (EFX_WORKAROUND_15592(efx
) && skb
->len
<= 32) {
203 EFX_BUG_ON_PARANOID(skb
->data_len
);
205 if (skb_pad(skb
, len
- skb
->len
))
209 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
210 q_space
= efx
->txq_entries
- 1 - fill_level
;
212 /* Map for DMA. Use pci_map_single rather than pci_map_page
213 * since this is more efficient on machines with sparse
217 dma_addr
= pci_map_single(pci_dev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
219 /* Process all fragments */
221 if (unlikely(pci_dma_mapping_error(pci_dev
, dma_addr
)))
224 /* Store fields for marking in the per-fragment final
227 unmap_addr
= dma_addr
;
229 /* Add to TX queue, splitting across DMA boundaries */
231 if (unlikely(q_space
-- <= 0)) {
232 /* It might be that completions have
233 * happened since the xmit path last
234 * checked. Update the xmit path's
235 * copy of read_count.
238 /* This memory barrier protects the
239 * change of stopped from the access
242 tx_queue
->old_read_count
=
243 ACCESS_ONCE(tx_queue
->read_count
);
244 fill_level
= (tx_queue
->insert_count
245 - tx_queue
->old_read_count
);
246 q_space
= efx
->txq_entries
- 1 - fill_level
;
247 if (unlikely(q_space
-- <= 0))
253 insert_ptr
= tx_queue
->insert_count
& tx_queue
->ptr_mask
;
254 buffer
= &tx_queue
->buffer
[insert_ptr
];
255 efx_tsoh_free(tx_queue
, buffer
);
256 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
257 EFX_BUG_ON_PARANOID(buffer
->skb
);
258 EFX_BUG_ON_PARANOID(buffer
->len
);
259 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
260 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
262 dma_len
= efx_max_tx_len(efx
, dma_addr
);
263 if (likely(dma_len
>= len
))
266 /* Fill out per descriptor fields */
267 buffer
->len
= dma_len
;
268 buffer
->dma_addr
= dma_addr
;
271 ++tx_queue
->insert_count
;
274 /* Transfer ownership of the unmapping to the final buffer */
275 buffer
->unmap_single
= unmap_single
;
276 buffer
->unmap_len
= unmap_len
;
279 /* Get address and size of next fragment */
280 if (i
>= skb_shinfo(skb
)->nr_frags
)
282 fragment
= &skb_shinfo(skb
)->frags
[i
];
283 len
= fragment
->size
;
284 page
= fragment
->page
;
285 page_offset
= fragment
->page_offset
;
288 unmap_single
= false;
289 dma_addr
= pci_map_page(pci_dev
, page
, page_offset
, len
,
293 /* Transfer ownership of the skb to the final buffer */
295 buffer
->continuation
= false;
297 /* Pass off to hardware */
298 efx_nic_push_buffers(tx_queue
);
303 netif_err(efx
, tx_err
, efx
->net_dev
,
304 " TX queue %d could not map skb with %d bytes %d "
305 "fragments for DMA\n", tx_queue
->queue
, skb
->len
,
306 skb_shinfo(skb
)->nr_frags
+ 1);
308 /* Mark the packet as transmitted, and free the SKB ourselves */
309 dev_kfree_skb_any(skb
);
315 if (tx_queue
->stopped
== 1)
316 efx_stop_queue(tx_queue
->channel
);
319 /* Work backwards until we hit the original insert pointer value */
320 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
321 --tx_queue
->insert_count
;
322 insert_ptr
= tx_queue
->insert_count
& tx_queue
->ptr_mask
;
323 buffer
= &tx_queue
->buffer
[insert_ptr
];
324 efx_dequeue_buffer(tx_queue
, buffer
);
328 /* Free the fragment we were mid-way through pushing */
331 pci_unmap_single(pci_dev
, unmap_addr
, unmap_len
,
334 pci_unmap_page(pci_dev
, unmap_addr
, unmap_len
,
341 /* Remove packets from the TX queue
343 * This removes packets from the TX queue, up to and including the
346 static void efx_dequeue_buffers(struct efx_tx_queue
*tx_queue
,
349 struct efx_nic
*efx
= tx_queue
->efx
;
350 unsigned int stop_index
, read_ptr
;
352 stop_index
= (index
+ 1) & tx_queue
->ptr_mask
;
353 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
355 while (read_ptr
!= stop_index
) {
356 struct efx_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
357 if (unlikely(buffer
->len
== 0)) {
358 netif_err(efx
, tx_err
, efx
->net_dev
,
359 "TX queue %d spurious TX completion id %x\n",
360 tx_queue
->queue
, read_ptr
);
361 efx_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
365 efx_dequeue_buffer(tx_queue
, buffer
);
366 buffer
->continuation
= true;
369 ++tx_queue
->read_count
;
370 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
374 /* Initiate a packet transmission. We use one channel per CPU
375 * (sharing when we have more CPUs than channels). On Falcon, the TX
376 * completion events will be directed back to the CPU that transmitted
377 * the packet, which should be cache-efficient.
379 * Context: non-blocking.
380 * Note that returning anything other than NETDEV_TX_OK will cause the
381 * OS to free the skb.
383 netdev_tx_t
efx_hard_start_xmit(struct sk_buff
*skb
,
384 struct net_device
*net_dev
)
386 struct efx_nic
*efx
= netdev_priv(net_dev
);
387 struct efx_tx_queue
*tx_queue
;
389 if (unlikely(efx
->port_inhibited
))
390 return NETDEV_TX_BUSY
;
392 tx_queue
= efx_get_tx_queue(efx
, skb_get_queue_mapping(skb
),
393 skb
->ip_summed
== CHECKSUM_PARTIAL
?
394 EFX_TXQ_TYPE_OFFLOAD
: 0);
396 return efx_enqueue_skb(tx_queue
, skb
);
399 void efx_xmit_done(struct efx_tx_queue
*tx_queue
, unsigned int index
)
402 struct efx_nic
*efx
= tx_queue
->efx
;
403 struct netdev_queue
*queue
;
405 EFX_BUG_ON_PARANOID(index
> tx_queue
->ptr_mask
);
407 efx_dequeue_buffers(tx_queue
, index
);
409 /* See if we need to restart the netif queue. This barrier
410 * separates the update of read_count from the test of
413 if (unlikely(tx_queue
->stopped
) && likely(efx
->port_enabled
)) {
414 fill_level
= tx_queue
->insert_count
- tx_queue
->read_count
;
415 if (fill_level
< EFX_TXQ_THRESHOLD(efx
)) {
416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx
));
418 /* Do this under netif_tx_lock(), to avoid racing
419 * with efx_xmit(). */
420 queue
= netdev_get_tx_queue(
422 tx_queue
->queue
/ EFX_TXQ_TYPES
);
423 __netif_tx_lock(queue
, smp_processor_id());
424 if (tx_queue
->stopped
) {
425 tx_queue
->stopped
= 0;
426 efx_wake_queue(tx_queue
->channel
);
428 __netif_tx_unlock(queue
);
432 /* Check whether the hardware queue is now empty */
433 if ((int)(tx_queue
->read_count
- tx_queue
->old_write_count
) >= 0) {
434 tx_queue
->old_write_count
= ACCESS_ONCE(tx_queue
->write_count
);
435 if (tx_queue
->read_count
== tx_queue
->old_write_count
) {
437 tx_queue
->empty_read_count
=
438 tx_queue
->read_count
| EFX_EMPTY_COUNT_VALID
;
443 int efx_probe_tx_queue(struct efx_tx_queue
*tx_queue
)
445 struct efx_nic
*efx
= tx_queue
->efx
;
446 unsigned int entries
;
449 /* Create the smallest power-of-two aligned ring */
450 entries
= max(roundup_pow_of_two(efx
->txq_entries
), EFX_MIN_DMAQ_SIZE
);
451 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
452 tx_queue
->ptr_mask
= entries
- 1;
454 netif_dbg(efx
, probe
, efx
->net_dev
,
455 "creating TX queue %d size %#x mask %#x\n",
456 tx_queue
->queue
, efx
->txq_entries
, tx_queue
->ptr_mask
);
458 /* Allocate software ring */
459 tx_queue
->buffer
= kzalloc(entries
* sizeof(*tx_queue
->buffer
),
461 if (!tx_queue
->buffer
)
463 for (i
= 0; i
<= tx_queue
->ptr_mask
; ++i
)
464 tx_queue
->buffer
[i
].continuation
= true;
466 /* Allocate hardware ring */
467 rc
= efx_nic_probe_tx(tx_queue
);
474 kfree(tx_queue
->buffer
);
475 tx_queue
->buffer
= NULL
;
479 void efx_init_tx_queue(struct efx_tx_queue
*tx_queue
)
481 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
482 "initialising TX queue %d\n", tx_queue
->queue
);
484 tx_queue
->insert_count
= 0;
485 tx_queue
->write_count
= 0;
486 tx_queue
->old_write_count
= 0;
487 tx_queue
->read_count
= 0;
488 tx_queue
->old_read_count
= 0;
489 tx_queue
->empty_read_count
= 0 | EFX_EMPTY_COUNT_VALID
;
490 BUG_ON(tx_queue
->stopped
);
492 /* Set up TX descriptor ring */
493 efx_nic_init_tx(tx_queue
);
496 void efx_release_tx_buffers(struct efx_tx_queue
*tx_queue
)
498 struct efx_tx_buffer
*buffer
;
500 if (!tx_queue
->buffer
)
503 /* Free any buffers left in the ring */
504 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
505 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
& tx_queue
->ptr_mask
];
506 efx_dequeue_buffer(tx_queue
, buffer
);
507 buffer
->continuation
= true;
510 ++tx_queue
->read_count
;
514 void efx_fini_tx_queue(struct efx_tx_queue
*tx_queue
)
516 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
517 "shutting down TX queue %d\n", tx_queue
->queue
);
519 /* Flush TX queue, remove descriptor ring */
520 efx_nic_fini_tx(tx_queue
);
522 efx_release_tx_buffers(tx_queue
);
524 /* Free up TSO header cache */
525 efx_fini_tso(tx_queue
);
527 /* Release queue's stop on port, if any */
528 if (tx_queue
->stopped
) {
529 tx_queue
->stopped
= 0;
530 efx_wake_queue(tx_queue
->channel
);
534 void efx_remove_tx_queue(struct efx_tx_queue
*tx_queue
)
536 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
537 "destroying TX queue %d\n", tx_queue
->queue
);
538 efx_nic_remove_tx(tx_queue
);
540 kfree(tx_queue
->buffer
);
541 tx_queue
->buffer
= NULL
;
545 /* Efx TCP segmentation acceleration.
547 * Why? Because by doing it here in the driver we can go significantly
548 * faster than the GSO.
550 * Requires TX checksum offload support.
553 /* Number of bytes inserted at the start of a TSO header buffer,
554 * similar to NET_IP_ALIGN.
556 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
557 #define TSOH_OFFSET 0
559 #define TSOH_OFFSET NET_IP_ALIGN
562 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
564 /* Total size of struct efx_tso_header, buffer and padding */
565 #define TSOH_SIZE(hdr_len) \
566 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
568 /* Size of blocks on free list. Larger blocks must be allocated from
571 #define TSOH_STD_SIZE 128
573 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
574 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
575 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
576 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
577 #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
580 * struct tso_state - TSO state for an SKB
581 * @out_len: Remaining length in current segment
582 * @seqnum: Current sequence number
583 * @ipv4_id: Current IPv4 ID, host endian
584 * @packet_space: Remaining space in current packet
585 * @dma_addr: DMA address of current position
586 * @in_len: Remaining length in current SKB fragment
587 * @unmap_len: Length of SKB fragment
588 * @unmap_addr: DMA address of SKB fragment
589 * @unmap_single: DMA single vs page mapping flag
590 * @protocol: Network protocol (after any VLAN header)
591 * @header_len: Number of bytes of header
592 * @full_packet_size: Number of bytes to put in each outgoing segment
594 * The state used during segmentation. It is put into this data structure
595 * just to make it easy to pass into inline functions.
598 /* Output position */
602 unsigned packet_space
;
608 dma_addr_t unmap_addr
;
613 int full_packet_size
;
618 * Verify that our various assumptions about sk_buffs and the conditions
619 * under which TSO will be attempted hold true. Return the protocol number.
621 static __be16
efx_tso_check_protocol(struct sk_buff
*skb
)
623 __be16 protocol
= skb
->protocol
;
625 EFX_BUG_ON_PARANOID(((struct ethhdr
*)skb
->data
)->h_proto
!=
627 if (protocol
== htons(ETH_P_8021Q
)) {
628 /* Find the encapsulated protocol; reset network header
629 * and transport header based on that. */
630 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
631 protocol
= veh
->h_vlan_encapsulated_proto
;
632 skb_set_network_header(skb
, sizeof(*veh
));
633 if (protocol
== htons(ETH_P_IP
))
634 skb_set_transport_header(skb
, sizeof(*veh
) +
635 4 * ip_hdr(skb
)->ihl
);
636 else if (protocol
== htons(ETH_P_IPV6
))
637 skb_set_transport_header(skb
, sizeof(*veh
) +
638 sizeof(struct ipv6hdr
));
641 if (protocol
== htons(ETH_P_IP
)) {
642 EFX_BUG_ON_PARANOID(ip_hdr(skb
)->protocol
!= IPPROTO_TCP
);
644 EFX_BUG_ON_PARANOID(protocol
!= htons(ETH_P_IPV6
));
645 EFX_BUG_ON_PARANOID(ipv6_hdr(skb
)->nexthdr
!= NEXTHDR_TCP
);
647 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb
), skb
->data
)
648 + (tcp_hdr(skb
)->doff
<< 2u)) >
656 * Allocate a page worth of efx_tso_header structures, and string them
657 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
659 static int efx_tsoh_block_alloc(struct efx_tx_queue
*tx_queue
)
662 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
663 struct efx_tso_header
*tsoh
;
667 base_kva
= pci_alloc_consistent(pci_dev
, PAGE_SIZE
, &dma_addr
);
668 if (base_kva
== NULL
) {
669 netif_err(tx_queue
->efx
, tx_err
, tx_queue
->efx
->net_dev
,
670 "Unable to allocate page for TSO headers\n");
674 /* pci_alloc_consistent() allocates pages. */
675 EFX_BUG_ON_PARANOID(dma_addr
& (PAGE_SIZE
- 1u));
677 for (kva
= base_kva
; kva
< base_kva
+ PAGE_SIZE
; kva
+= TSOH_STD_SIZE
) {
678 tsoh
= (struct efx_tso_header
*)kva
;
679 tsoh
->dma_addr
= dma_addr
+ (TSOH_BUFFER(tsoh
) - base_kva
);
680 tsoh
->next
= tx_queue
->tso_headers_free
;
681 tx_queue
->tso_headers_free
= tsoh
;
688 /* Free up a TSO header, and all others in the same page. */
689 static void efx_tsoh_block_free(struct efx_tx_queue
*tx_queue
,
690 struct efx_tso_header
*tsoh
,
691 struct pci_dev
*pci_dev
)
693 struct efx_tso_header
**p
;
694 unsigned long base_kva
;
697 base_kva
= (unsigned long)tsoh
& PAGE_MASK
;
698 base_dma
= tsoh
->dma_addr
& PAGE_MASK
;
700 p
= &tx_queue
->tso_headers_free
;
702 if (((unsigned long)*p
& PAGE_MASK
) == base_kva
)
708 pci_free_consistent(pci_dev
, PAGE_SIZE
, (void *)base_kva
, base_dma
);
711 static struct efx_tso_header
*
712 efx_tsoh_heap_alloc(struct efx_tx_queue
*tx_queue
, size_t header_len
)
714 struct efx_tso_header
*tsoh
;
716 tsoh
= kmalloc(TSOH_SIZE(header_len
), GFP_ATOMIC
| GFP_DMA
);
720 tsoh
->dma_addr
= pci_map_single(tx_queue
->efx
->pci_dev
,
721 TSOH_BUFFER(tsoh
), header_len
,
723 if (unlikely(pci_dma_mapping_error(tx_queue
->efx
->pci_dev
,
729 tsoh
->unmap_len
= header_len
;
734 efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
, struct efx_tso_header
*tsoh
)
736 pci_unmap_single(tx_queue
->efx
->pci_dev
,
737 tsoh
->dma_addr
, tsoh
->unmap_len
,
743 * efx_tx_queue_insert - push descriptors onto the TX queue
744 * @tx_queue: Efx TX queue
745 * @dma_addr: DMA address of fragment
746 * @len: Length of fragment
747 * @final_buffer: The final buffer inserted into the queue
749 * Push descriptors onto the TX queue. Return 0 on success or 1 if
752 static int efx_tx_queue_insert(struct efx_tx_queue
*tx_queue
,
753 dma_addr_t dma_addr
, unsigned len
,
754 struct efx_tx_buffer
**final_buffer
)
756 struct efx_tx_buffer
*buffer
;
757 struct efx_nic
*efx
= tx_queue
->efx
;
758 unsigned dma_len
, fill_level
, insert_ptr
;
761 EFX_BUG_ON_PARANOID(len
<= 0);
763 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
764 /* -1 as there is no way to represent all descriptors used */
765 q_space
= efx
->txq_entries
- 1 - fill_level
;
768 if (unlikely(q_space
-- <= 0)) {
769 /* It might be that completions have happened
770 * since the xmit path last checked. Update
771 * the xmit path's copy of read_count.
774 /* This memory barrier protects the change of
775 * stopped from the access of read_count. */
777 tx_queue
->old_read_count
=
778 ACCESS_ONCE(tx_queue
->read_count
);
779 fill_level
= (tx_queue
->insert_count
780 - tx_queue
->old_read_count
);
781 q_space
= efx
->txq_entries
- 1 - fill_level
;
782 if (unlikely(q_space
-- <= 0)) {
783 *final_buffer
= NULL
;
790 insert_ptr
= tx_queue
->insert_count
& tx_queue
->ptr_mask
;
791 buffer
= &tx_queue
->buffer
[insert_ptr
];
792 ++tx_queue
->insert_count
;
794 EFX_BUG_ON_PARANOID(tx_queue
->insert_count
-
795 tx_queue
->read_count
>=
798 efx_tsoh_free(tx_queue
, buffer
);
799 EFX_BUG_ON_PARANOID(buffer
->len
);
800 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
801 EFX_BUG_ON_PARANOID(buffer
->skb
);
802 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
803 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
805 buffer
->dma_addr
= dma_addr
;
807 dma_len
= efx_max_tx_len(efx
, dma_addr
);
809 /* If there is enough space to send then do so */
813 buffer
->len
= dma_len
; /* Don't set the other members */
818 EFX_BUG_ON_PARANOID(!len
);
820 *final_buffer
= buffer
;
826 * Put a TSO header into the TX queue.
828 * This is special-cased because we know that it is small enough to fit in
829 * a single fragment, and we know it doesn't cross a page boundary. It
830 * also allows us to not worry about end-of-packet etc.
832 static void efx_tso_put_header(struct efx_tx_queue
*tx_queue
,
833 struct efx_tso_header
*tsoh
, unsigned len
)
835 struct efx_tx_buffer
*buffer
;
837 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
& tx_queue
->ptr_mask
];
838 efx_tsoh_free(tx_queue
, buffer
);
839 EFX_BUG_ON_PARANOID(buffer
->len
);
840 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
841 EFX_BUG_ON_PARANOID(buffer
->skb
);
842 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
843 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
845 buffer
->dma_addr
= tsoh
->dma_addr
;
848 ++tx_queue
->insert_count
;
852 /* Remove descriptors put into a tx_queue. */
853 static void efx_enqueue_unwind(struct efx_tx_queue
*tx_queue
)
855 struct efx_tx_buffer
*buffer
;
856 dma_addr_t unmap_addr
;
858 /* Work backwards until we hit the original insert pointer value */
859 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
860 --tx_queue
->insert_count
;
861 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
&
863 efx_tsoh_free(tx_queue
, buffer
);
864 EFX_BUG_ON_PARANOID(buffer
->skb
);
865 if (buffer
->unmap_len
) {
866 unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
868 if (buffer
->unmap_single
)
869 pci_unmap_single(tx_queue
->efx
->pci_dev
,
870 unmap_addr
, buffer
->unmap_len
,
873 pci_unmap_page(tx_queue
->efx
->pci_dev
,
874 unmap_addr
, buffer
->unmap_len
,
876 buffer
->unmap_len
= 0;
879 buffer
->continuation
= true;
884 /* Parse the SKB header and initialise state. */
885 static void tso_start(struct tso_state
*st
, const struct sk_buff
*skb
)
887 /* All ethernet/IP/TCP headers combined size is TCP header size
888 * plus offset of TCP header relative to start of packet.
890 st
->header_len
= ((tcp_hdr(skb
)->doff
<< 2u)
891 + PTR_DIFF(tcp_hdr(skb
), skb
->data
));
892 st
->full_packet_size
= st
->header_len
+ skb_shinfo(skb
)->gso_size
;
894 if (st
->protocol
== htons(ETH_P_IP
))
895 st
->ipv4_id
= ntohs(ip_hdr(skb
)->id
);
898 st
->seqnum
= ntohl(tcp_hdr(skb
)->seq
);
900 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->urg
);
901 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->syn
);
902 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->rst
);
904 st
->packet_space
= st
->full_packet_size
;
905 st
->out_len
= skb
->len
- st
->header_len
;
907 st
->unmap_single
= false;
910 static int tso_get_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
913 st
->unmap_addr
= pci_map_page(efx
->pci_dev
, frag
->page
,
914 frag
->page_offset
, frag
->size
,
916 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
917 st
->unmap_single
= false;
918 st
->unmap_len
= frag
->size
;
919 st
->in_len
= frag
->size
;
920 st
->dma_addr
= st
->unmap_addr
;
926 static int tso_get_head_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
927 const struct sk_buff
*skb
)
929 int hl
= st
->header_len
;
930 int len
= skb_headlen(skb
) - hl
;
932 st
->unmap_addr
= pci_map_single(efx
->pci_dev
, skb
->data
+ hl
,
933 len
, PCI_DMA_TODEVICE
);
934 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
935 st
->unmap_single
= true;
938 st
->dma_addr
= st
->unmap_addr
;
946 * tso_fill_packet_with_fragment - form descriptors for the current fragment
947 * @tx_queue: Efx TX queue
948 * @skb: Socket buffer
951 * Form descriptors for the current fragment, until we reach the end
952 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
953 * space in @tx_queue.
955 static int tso_fill_packet_with_fragment(struct efx_tx_queue
*tx_queue
,
956 const struct sk_buff
*skb
,
957 struct tso_state
*st
)
959 struct efx_tx_buffer
*buffer
;
960 int n
, end_of_packet
, rc
;
964 if (st
->packet_space
== 0)
967 EFX_BUG_ON_PARANOID(st
->in_len
<= 0);
968 EFX_BUG_ON_PARANOID(st
->packet_space
<= 0);
970 n
= min(st
->in_len
, st
->packet_space
);
972 st
->packet_space
-= n
;
976 rc
= efx_tx_queue_insert(tx_queue
, st
->dma_addr
, n
, &buffer
);
977 if (likely(rc
== 0)) {
978 if (st
->out_len
== 0)
979 /* Transfer ownership of the skb */
982 end_of_packet
= st
->out_len
== 0 || st
->packet_space
== 0;
983 buffer
->continuation
= !end_of_packet
;
985 if (st
->in_len
== 0) {
986 /* Transfer ownership of the pci mapping */
987 buffer
->unmap_len
= st
->unmap_len
;
988 buffer
->unmap_single
= st
->unmap_single
;
999 * tso_start_new_packet - generate a new header and prepare for the new packet
1000 * @tx_queue: Efx TX queue
1001 * @skb: Socket buffer
1004 * Generate a new header and prepare for the new packet. Return 0 on
1005 * success, or -1 if failed to alloc header.
1007 static int tso_start_new_packet(struct efx_tx_queue
*tx_queue
,
1008 const struct sk_buff
*skb
,
1009 struct tso_state
*st
)
1011 struct efx_tso_header
*tsoh
;
1012 struct tcphdr
*tsoh_th
;
1016 /* Allocate a DMA-mapped header buffer. */
1017 if (likely(TSOH_SIZE(st
->header_len
) <= TSOH_STD_SIZE
)) {
1018 if (tx_queue
->tso_headers_free
== NULL
) {
1019 if (efx_tsoh_block_alloc(tx_queue
))
1022 EFX_BUG_ON_PARANOID(!tx_queue
->tso_headers_free
);
1023 tsoh
= tx_queue
->tso_headers_free
;
1024 tx_queue
->tso_headers_free
= tsoh
->next
;
1025 tsoh
->unmap_len
= 0;
1027 tx_queue
->tso_long_headers
++;
1028 tsoh
= efx_tsoh_heap_alloc(tx_queue
, st
->header_len
);
1029 if (unlikely(!tsoh
))
1033 header
= TSOH_BUFFER(tsoh
);
1034 tsoh_th
= (struct tcphdr
*)(header
+ SKB_TCP_OFF(skb
));
1036 /* Copy and update the headers. */
1037 memcpy(header
, skb
->data
, st
->header_len
);
1039 tsoh_th
->seq
= htonl(st
->seqnum
);
1040 st
->seqnum
+= skb_shinfo(skb
)->gso_size
;
1041 if (st
->out_len
> skb_shinfo(skb
)->gso_size
) {
1042 /* This packet will not finish the TSO burst. */
1043 ip_length
= st
->full_packet_size
- ETH_HDR_LEN(skb
);
1047 /* This packet will be the last in the TSO burst. */
1048 ip_length
= st
->header_len
- ETH_HDR_LEN(skb
) + st
->out_len
;
1049 tsoh_th
->fin
= tcp_hdr(skb
)->fin
;
1050 tsoh_th
->psh
= tcp_hdr(skb
)->psh
;
1053 if (st
->protocol
== htons(ETH_P_IP
)) {
1054 struct iphdr
*tsoh_iph
=
1055 (struct iphdr
*)(header
+ SKB_IPV4_OFF(skb
));
1057 tsoh_iph
->tot_len
= htons(ip_length
);
1059 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1060 tsoh_iph
->id
= htons(st
->ipv4_id
);
1063 struct ipv6hdr
*tsoh_iph
=
1064 (struct ipv6hdr
*)(header
+ SKB_IPV6_OFF(skb
));
1066 tsoh_iph
->payload_len
= htons(ip_length
- sizeof(*tsoh_iph
));
1069 st
->packet_space
= skb_shinfo(skb
)->gso_size
;
1070 ++tx_queue
->tso_packets
;
1072 /* Form a descriptor for this header. */
1073 efx_tso_put_header(tx_queue
, tsoh
, st
->header_len
);
1080 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1081 * @tx_queue: Efx TX queue
1082 * @skb: Socket buffer
1084 * Context: You must hold netif_tx_lock() to call this function.
1086 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1087 * @skb was not enqueued. In all cases @skb is consumed. Return
1088 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1090 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
1091 struct sk_buff
*skb
)
1093 struct efx_nic
*efx
= tx_queue
->efx
;
1094 int frag_i
, rc
, rc2
= NETDEV_TX_OK
;
1095 struct tso_state state
;
1097 /* Find the packet protocol and sanity-check it */
1098 state
.protocol
= efx_tso_check_protocol(skb
);
1100 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
1102 tso_start(&state
, skb
);
1104 /* Assume that skb header area contains exactly the headers, and
1105 * all payload is in the frag list.
1107 if (skb_headlen(skb
) == state
.header_len
) {
1108 /* Grab the first payload fragment. */
1109 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
< 1);
1111 rc
= tso_get_fragment(&state
, efx
,
1112 skb_shinfo(skb
)->frags
+ frag_i
);
1116 rc
= tso_get_head_fragment(&state
, efx
, skb
);
1122 if (tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1126 rc
= tso_fill_packet_with_fragment(tx_queue
, skb
, &state
);
1130 /* Move onto the next fragment? */
1131 if (state
.in_len
== 0) {
1132 if (++frag_i
>= skb_shinfo(skb
)->nr_frags
)
1133 /* End of payload reached. */
1135 rc
= tso_get_fragment(&state
, efx
,
1136 skb_shinfo(skb
)->frags
+ frag_i
);
1141 /* Start at new packet? */
1142 if (state
.packet_space
== 0 &&
1143 tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1147 /* Pass off to hardware */
1148 efx_nic_push_buffers(tx_queue
);
1150 tx_queue
->tso_bursts
++;
1151 return NETDEV_TX_OK
;
1154 netif_err(efx
, tx_err
, efx
->net_dev
,
1155 "Out of memory for TSO headers, or PCI mapping error\n");
1156 dev_kfree_skb_any(skb
);
1160 rc2
= NETDEV_TX_BUSY
;
1162 /* Stop the queue if it wasn't stopped before. */
1163 if (tx_queue
->stopped
== 1)
1164 efx_stop_queue(tx_queue
->channel
);
1167 /* Free the DMA mapping we were in the process of writing out */
1168 if (state
.unmap_len
) {
1169 if (state
.unmap_single
)
1170 pci_unmap_single(efx
->pci_dev
, state
.unmap_addr
,
1171 state
.unmap_len
, PCI_DMA_TODEVICE
);
1173 pci_unmap_page(efx
->pci_dev
, state
.unmap_addr
,
1174 state
.unmap_len
, PCI_DMA_TODEVICE
);
1177 efx_enqueue_unwind(tx_queue
);
1183 * Free up all TSO datastructures associated with tx_queue. This
1184 * routine should be called only once the tx_queue is both empty and
1185 * will no longer be used.
1187 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
)
1191 if (tx_queue
->buffer
) {
1192 for (i
= 0; i
<= tx_queue
->ptr_mask
; ++i
)
1193 efx_tsoh_free(tx_queue
, &tx_queue
->buffer
[i
]);
1196 while (tx_queue
->tso_headers_free
!= NULL
)
1197 efx_tsoh_block_free(tx_queue
, tx_queue
->tso_headers_free
,
1198 tx_queue
->efx
->pci_dev
);