1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
17 #include <net/checksum.h>
18 #include "net_driver.h"
22 #include "workarounds.h"
24 /* Number of RX descriptors pushed at once. */
25 #define EFX_RX_BATCH 8
27 /* Size of buffer allocated for skb header area. */
28 #define EFX_SKB_HEADERS 64u
31 * rx_alloc_method - RX buffer allocation method
33 * This driver supports two methods for allocating and using RX buffers:
34 * each RX buffer may be backed by an skb or by an order-n page.
36 * When LRO is in use then the second method has a lower overhead,
37 * since we don't have to allocate then free skbs on reassembled frames.
40 * - RX_ALLOC_METHOD_AUTO = 0
41 * - RX_ALLOC_METHOD_SKB = 1
42 * - RX_ALLOC_METHOD_PAGE = 2
44 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
45 * controlled by the parameters below.
47 * - Since pushing and popping descriptors are separated by the rx_queue
48 * size, so the watermarks should be ~rxd_size.
49 * - The performance win by using page-based allocation for LRO is less
50 * than the performance hit of using page-based allocation of non-LRO,
51 * so the watermarks should reflect this.
53 * Per channel we maintain a single variable, updated by each channel:
55 * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
56 * RX_ALLOC_FACTOR_SKB)
57 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
58 * limits the hysteresis), and update the allocation strategy:
60 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
61 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
63 static int rx_alloc_method
= RX_ALLOC_METHOD_PAGE
;
65 #define RX_ALLOC_LEVEL_LRO 0x2000
66 #define RX_ALLOC_LEVEL_MAX 0x3000
67 #define RX_ALLOC_FACTOR_LRO 1
68 #define RX_ALLOC_FACTOR_SKB (-2)
70 /* This is the percentage fill level below which new RX descriptors
71 * will be added to the RX descriptor ring.
73 static unsigned int rx_refill_threshold
= 90;
75 /* This is the percentage fill level to which an RX queue will be refilled
76 * when the "RX refill threshold" is reached.
78 static unsigned int rx_refill_limit
= 95;
81 * RX maximum head room required.
83 * This must be at least 1 to prevent overflow and at least 2 to allow
86 #define EFX_RXD_HEAD_ROOM 2
88 /* Macros for zero-order pages (potentially) containing multiple RX buffers */
89 #define RX_DATA_OFFSET(_data) \
90 (((unsigned long) (_data)) & (PAGE_SIZE-1))
91 #define RX_BUF_OFFSET(_rx_buf) \
92 RX_DATA_OFFSET((_rx_buf)->data)
94 #define RX_PAGE_SIZE(_efx) \
95 (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
98 /**************************************************************************
100 * Linux generic LRO handling
102 **************************************************************************
105 static int efx_lro_get_skb_hdr(struct sk_buff
*skb
, void **ip_hdr
,
106 void **tcpudp_hdr
, u64
*hdr_flags
, void *priv
)
108 struct efx_channel
*channel
= (struct efx_channel
*)priv
;
112 iph
= (struct iphdr
*)skb
->data
;
113 if (skb
->protocol
!= htons(ETH_P_IP
) || iph
->protocol
!= IPPROTO_TCP
)
116 th
= (struct tcphdr
*)(skb
->data
+ iph
->ihl
* 4);
120 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
122 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_LRO
;
125 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
129 static int efx_get_frag_hdr(struct skb_frag_struct
*frag
, void **mac_hdr
,
130 void **ip_hdr
, void **tcpudp_hdr
, u64
*hdr_flags
,
133 struct efx_channel
*channel
= (struct efx_channel
*)priv
;
137 /* We support EtherII and VLAN encapsulated IPv4 */
138 eh
= (struct ethhdr
*)(page_address(frag
->page
) + frag
->page_offset
);
141 if (eh
->h_proto
== htons(ETH_P_IP
)) {
142 iph
= (struct iphdr
*)(eh
+ 1);
144 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)eh
;
145 if (veh
->h_vlan_encapsulated_proto
!= htons(ETH_P_IP
))
148 iph
= (struct iphdr
*)(veh
+ 1);
152 /* We can only do LRO over TCP */
153 if (iph
->protocol
!= IPPROTO_TCP
)
156 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
157 *tcpudp_hdr
= (struct tcphdr
*)((u8
*) iph
+ iph
->ihl
* 4);
159 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_LRO
;
162 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
166 int efx_lro_init(struct net_lro_mgr
*lro_mgr
, struct efx_nic
*efx
)
168 size_t s
= sizeof(struct net_lro_desc
) * EFX_MAX_LRO_DESCRIPTORS
;
169 struct net_lro_desc
*lro_arr
;
171 /* Allocate the LRO descriptors structure */
172 lro_arr
= kzalloc(s
, GFP_KERNEL
);
176 lro_mgr
->lro_arr
= lro_arr
;
177 lro_mgr
->max_desc
= EFX_MAX_LRO_DESCRIPTORS
;
178 lro_mgr
->max_aggr
= EFX_MAX_LRO_AGGR
;
179 lro_mgr
->frag_align_pad
= EFX_PAGE_SKB_ALIGN
;
181 lro_mgr
->get_skb_header
= efx_lro_get_skb_hdr
;
182 lro_mgr
->get_frag_header
= efx_get_frag_hdr
;
183 lro_mgr
->dev
= efx
->net_dev
;
185 lro_mgr
->features
= LRO_F_NAPI
;
187 /* We can pass packets up with the checksum intact */
188 lro_mgr
->ip_summed
= CHECKSUM_UNNECESSARY
;
190 lro_mgr
->ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
195 void efx_lro_fini(struct net_lro_mgr
*lro_mgr
)
197 kfree(lro_mgr
->lro_arr
);
198 lro_mgr
->lro_arr
= NULL
;
202 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
204 * @rx_queue: Efx RX queue
205 * @rx_buf: RX buffer structure to populate
207 * This allocates memory for a new receive buffer, maps it for DMA,
208 * and populates a struct efx_rx_buffer with the relevant
209 * information. Return a negative error code or 0 on success.
211 static inline int efx_init_rx_buffer_skb(struct efx_rx_queue
*rx_queue
,
212 struct efx_rx_buffer
*rx_buf
)
214 struct efx_nic
*efx
= rx_queue
->efx
;
215 struct net_device
*net_dev
= efx
->net_dev
;
216 int skb_len
= efx
->rx_buffer_len
;
218 rx_buf
->skb
= netdev_alloc_skb(net_dev
, skb_len
);
219 if (unlikely(!rx_buf
->skb
))
222 /* Adjust the SKB for padding and checksum */
223 skb_reserve(rx_buf
->skb
, NET_IP_ALIGN
);
224 rx_buf
->len
= skb_len
- NET_IP_ALIGN
;
225 rx_buf
->data
= (char *)rx_buf
->skb
->data
;
226 rx_buf
->skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
228 rx_buf
->dma_addr
= pci_map_single(efx
->pci_dev
,
229 rx_buf
->data
, rx_buf
->len
,
232 if (unlikely(pci_dma_mapping_error(rx_buf
->dma_addr
))) {
233 dev_kfree_skb_any(rx_buf
->skb
);
242 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
244 * @rx_queue: Efx RX queue
245 * @rx_buf: RX buffer structure to populate
247 * This allocates memory for a new receive buffer, maps it for DMA,
248 * and populates a struct efx_rx_buffer with the relevant
249 * information. Return a negative error code or 0 on success.
251 static inline int efx_init_rx_buffer_page(struct efx_rx_queue
*rx_queue
,
252 struct efx_rx_buffer
*rx_buf
)
254 struct efx_nic
*efx
= rx_queue
->efx
;
255 int bytes
, space
, offset
;
257 bytes
= efx
->rx_buffer_len
- EFX_PAGE_IP_ALIGN
;
259 /* If there is space left in the previously allocated page,
260 * then use it. Otherwise allocate a new one */
261 rx_buf
->page
= rx_queue
->buf_page
;
262 if (rx_buf
->page
== NULL
) {
265 rx_buf
->page
= alloc_pages(__GFP_COLD
| __GFP_COMP
| GFP_ATOMIC
,
266 efx
->rx_buffer_order
);
267 if (unlikely(rx_buf
->page
== NULL
))
270 dma_addr
= pci_map_page(efx
->pci_dev
, rx_buf
->page
,
271 0, RX_PAGE_SIZE(efx
),
274 if (unlikely(pci_dma_mapping_error(dma_addr
))) {
275 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
280 rx_queue
->buf_page
= rx_buf
->page
;
281 rx_queue
->buf_dma_addr
= dma_addr
;
282 rx_queue
->buf_data
= ((char *) page_address(rx_buf
->page
) +
286 offset
= RX_DATA_OFFSET(rx_queue
->buf_data
);
288 rx_buf
->dma_addr
= rx_queue
->buf_dma_addr
+ offset
;
289 rx_buf
->data
= rx_queue
->buf_data
;
291 /* Try to pack multiple buffers per page */
292 if (efx
->rx_buffer_order
== 0) {
293 /* The next buffer starts on the next 512 byte boundary */
294 rx_queue
->buf_data
+= ((bytes
+ 0x1ff) & ~0x1ff);
295 offset
+= ((bytes
+ 0x1ff) & ~0x1ff);
297 space
= RX_PAGE_SIZE(efx
) - offset
;
298 if (space
>= bytes
) {
299 /* Refs dropped on kernel releasing each skb */
300 get_page(rx_queue
->buf_page
);
305 /* This is the final RX buffer for this page, so mark it for
307 rx_queue
->buf_page
= NULL
;
308 rx_buf
->unmap_addr
= rx_queue
->buf_dma_addr
;
314 /* This allocates memory for a new receive buffer, maps it for DMA,
315 * and populates a struct efx_rx_buffer with the relevant
318 static inline int efx_init_rx_buffer(struct efx_rx_queue
*rx_queue
,
319 struct efx_rx_buffer
*new_rx_buf
)
323 if (rx_queue
->channel
->rx_alloc_push_pages
) {
324 new_rx_buf
->skb
= NULL
;
325 rc
= efx_init_rx_buffer_page(rx_queue
, new_rx_buf
);
326 rx_queue
->alloc_page_count
++;
328 new_rx_buf
->page
= NULL
;
329 rc
= efx_init_rx_buffer_skb(rx_queue
, new_rx_buf
);
330 rx_queue
->alloc_skb_count
++;
333 if (unlikely(rc
< 0))
334 EFX_LOG_RL(rx_queue
->efx
, "%s RXQ[%d] =%d\n", __func__
,
335 rx_queue
->queue
, rc
);
339 static inline void efx_unmap_rx_buffer(struct efx_nic
*efx
,
340 struct efx_rx_buffer
*rx_buf
)
343 EFX_BUG_ON_PARANOID(rx_buf
->skb
);
344 if (rx_buf
->unmap_addr
) {
345 pci_unmap_page(efx
->pci_dev
, rx_buf
->unmap_addr
,
346 RX_PAGE_SIZE(efx
), PCI_DMA_FROMDEVICE
);
347 rx_buf
->unmap_addr
= 0;
349 } else if (likely(rx_buf
->skb
)) {
350 pci_unmap_single(efx
->pci_dev
, rx_buf
->dma_addr
,
351 rx_buf
->len
, PCI_DMA_FROMDEVICE
);
355 static inline void efx_free_rx_buffer(struct efx_nic
*efx
,
356 struct efx_rx_buffer
*rx_buf
)
359 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
361 } else if (likely(rx_buf
->skb
)) {
362 dev_kfree_skb_any(rx_buf
->skb
);
367 static inline void efx_fini_rx_buffer(struct efx_rx_queue
*rx_queue
,
368 struct efx_rx_buffer
*rx_buf
)
370 efx_unmap_rx_buffer(rx_queue
->efx
, rx_buf
);
371 efx_free_rx_buffer(rx_queue
->efx
, rx_buf
);
375 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
376 * @rx_queue: RX descriptor queue
377 * @retry: Recheck the fill level
378 * This will aim to fill the RX descriptor queue up to
379 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
380 * memory to do so, the caller should retry.
382 static int __efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
,
385 struct efx_rx_buffer
*rx_buf
;
386 unsigned fill_level
, index
;
387 int i
, space
, rc
= 0;
389 /* Calculate current fill level. Do this outside the lock,
390 * because most of the time we'll end up not wanting to do the
393 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
394 EFX_BUG_ON_PARANOID(fill_level
>
395 rx_queue
->efx
->type
->rxd_ring_mask
+ 1);
397 /* Don't fill if we don't need to */
398 if (fill_level
>= rx_queue
->fast_fill_trigger
)
401 /* Record minimum fill level */
402 if (unlikely(fill_level
< rx_queue
->min_fill
))
404 rx_queue
->min_fill
= fill_level
;
406 /* Acquire RX add lock. If this lock is contended, then a fast
407 * fill must already be in progress (e.g. in the refill
408 * tasklet), so we don't need to do anything
410 if (!spin_trylock_bh(&rx_queue
->add_lock
))
414 /* Recalculate current fill level now that we have the lock */
415 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
416 EFX_BUG_ON_PARANOID(fill_level
>
417 rx_queue
->efx
->type
->rxd_ring_mask
+ 1);
418 space
= rx_queue
->fast_fill_limit
- fill_level
;
419 if (space
< EFX_RX_BATCH
)
422 EFX_TRACE(rx_queue
->efx
, "RX queue %d fast-filling descriptor ring from"
423 " level %d to level %d using %s allocation\n",
424 rx_queue
->queue
, fill_level
, rx_queue
->fast_fill_limit
,
425 rx_queue
->channel
->rx_alloc_push_pages
? "page" : "skb");
428 for (i
= 0; i
< EFX_RX_BATCH
; ++i
) {
429 index
= (rx_queue
->added_count
&
430 rx_queue
->efx
->type
->rxd_ring_mask
);
431 rx_buf
= efx_rx_buffer(rx_queue
, index
);
432 rc
= efx_init_rx_buffer(rx_queue
, rx_buf
);
435 ++rx_queue
->added_count
;
437 } while ((space
-= EFX_RX_BATCH
) >= EFX_RX_BATCH
);
439 EFX_TRACE(rx_queue
->efx
, "RX queue %d fast-filled descriptor ring "
440 "to level %d\n", rx_queue
->queue
,
441 rx_queue
->added_count
- rx_queue
->removed_count
);
444 /* Send write pointer to card. */
445 falcon_notify_rx_desc(rx_queue
);
447 /* If the fast fill is running inside from the refill tasklet, then
448 * for SMP systems it may be running on a different CPU to
449 * RX event processing, which means that the fill level may now be
451 if (unlikely(retry
&& (rc
== 0)))
455 spin_unlock_bh(&rx_queue
->add_lock
);
461 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
462 * @rx_queue: RX descriptor queue
464 * This will aim to fill the RX descriptor queue up to
465 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
466 * it will schedule a work item to immediately continue the fast fill
468 void efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
)
472 rc
= __efx_fast_push_rx_descriptors(rx_queue
, 0);
474 /* Schedule the work item to run immediately. The hope is
475 * that work is immediately pending to free some memory
476 * (e.g. an RX event or TX completion)
478 efx_schedule_slow_fill(rx_queue
, 0);
482 void efx_rx_work(struct work_struct
*data
)
484 struct efx_rx_queue
*rx_queue
;
487 rx_queue
= container_of(data
, struct efx_rx_queue
, work
.work
);
489 if (unlikely(!rx_queue
->channel
->enabled
))
492 EFX_TRACE(rx_queue
->efx
, "RX queue %d worker thread executing on CPU "
493 "%d\n", rx_queue
->queue
, raw_smp_processor_id());
495 ++rx_queue
->slow_fill_count
;
496 /* Push new RX descriptors, allowing at least 1 jiffy for
497 * the kernel to free some more memory. */
498 rc
= __efx_fast_push_rx_descriptors(rx_queue
, 1);
500 efx_schedule_slow_fill(rx_queue
, 1);
503 static inline void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
504 struct efx_rx_buffer
*rx_buf
,
505 int len
, int *discard
,
508 struct efx_nic
*efx
= rx_queue
->efx
;
509 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
511 if (likely(len
<= max_len
))
514 /* The packet must be discarded, but this is only a fatal error
515 * if the caller indicated it was
519 if ((len
> rx_buf
->len
) && EFX_WORKAROUND_8071(efx
)) {
520 EFX_ERR_RL(efx
, " RX queue %d seriously overlength "
521 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
522 rx_queue
->queue
, len
, max_len
,
523 efx
->type
->rx_buffer_padding
);
524 /* If this buffer was skb-allocated, then the meta
525 * data at the end of the skb will be trashed. So
526 * we have no choice but to leak the fragment.
528 *leak_packet
= (rx_buf
->skb
!= NULL
);
529 efx_schedule_reset(efx
, RESET_TYPE_RX_RECOVERY
);
531 EFX_ERR_RL(efx
, " RX queue %d overlength RX event "
532 "(0x%x > 0x%x)\n", rx_queue
->queue
, len
, max_len
);
535 rx_queue
->channel
->n_rx_overlength
++;
538 /* Pass a received packet up through the generic LRO stack
540 * Handles driverlink veto, and passes the fragment up via
541 * the appropriate LRO method
543 static inline void efx_rx_packet_lro(struct efx_channel
*channel
,
544 struct efx_rx_buffer
*rx_buf
)
546 struct net_lro_mgr
*lro_mgr
= &channel
->lro_mgr
;
547 void *priv
= channel
;
549 /* Pass the skb/page into the LRO engine */
551 struct skb_frag_struct frags
;
553 frags
.page
= rx_buf
->page
;
554 frags
.page_offset
= RX_BUF_OFFSET(rx_buf
);
555 frags
.size
= rx_buf
->len
;
557 lro_receive_frags(lro_mgr
, &frags
, rx_buf
->len
,
558 rx_buf
->len
, priv
, 0);
560 EFX_BUG_ON_PARANOID(rx_buf
->skb
);
563 EFX_BUG_ON_PARANOID(!rx_buf
->skb
);
565 lro_receive_skb(lro_mgr
, rx_buf
->skb
, priv
);
570 /* Allocate and construct an SKB around a struct page.*/
571 static inline struct sk_buff
*efx_rx_mk_skb(struct efx_rx_buffer
*rx_buf
,
577 /* Allocate an SKB to store the headers */
578 skb
= netdev_alloc_skb(efx
->net_dev
, hdr_len
+ EFX_PAGE_SKB_ALIGN
);
579 if (unlikely(skb
== NULL
)) {
580 EFX_ERR_RL(efx
, "RX out of memory for skb\n");
584 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
);
585 EFX_BUG_ON_PARANOID(rx_buf
->len
< hdr_len
);
587 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
588 skb_reserve(skb
, EFX_PAGE_SKB_ALIGN
);
590 skb
->len
= rx_buf
->len
;
591 skb
->truesize
= rx_buf
->len
+ sizeof(struct sk_buff
);
592 memcpy(skb
->data
, rx_buf
->data
, hdr_len
);
593 skb
->tail
+= hdr_len
;
595 /* Append the remaining page onto the frag list */
596 if (unlikely(rx_buf
->len
> hdr_len
)) {
597 struct skb_frag_struct
*frag
= skb_shinfo(skb
)->frags
;
598 frag
->page
= rx_buf
->page
;
599 frag
->page_offset
= RX_BUF_OFFSET(rx_buf
) + hdr_len
;
600 frag
->size
= skb
->len
- hdr_len
;
601 skb_shinfo(skb
)->nr_frags
= 1;
602 skb
->data_len
= frag
->size
;
604 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
608 /* Ownership has transferred from the rx_buf to skb */
611 /* Move past the ethernet header */
612 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
617 void efx_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
618 unsigned int len
, int checksummed
, int discard
)
620 struct efx_nic
*efx
= rx_queue
->efx
;
621 struct efx_rx_buffer
*rx_buf
;
624 rx_buf
= efx_rx_buffer(rx_queue
, index
);
625 EFX_BUG_ON_PARANOID(!rx_buf
->data
);
626 EFX_BUG_ON_PARANOID(rx_buf
->skb
&& rx_buf
->page
);
627 EFX_BUG_ON_PARANOID(!(rx_buf
->skb
|| rx_buf
->page
));
629 /* This allows the refill path to post another buffer.
630 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
631 * isn't overwritten yet.
633 rx_queue
->removed_count
++;
635 /* Validate the length encoded in the event vs the descriptor pushed */
636 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
,
637 &discard
, &leak_packet
);
639 EFX_TRACE(efx
, "RX queue %d received id %x at %llx+%x %s%s\n",
640 rx_queue
->queue
, index
,
641 (unsigned long long)rx_buf
->dma_addr
, len
,
642 (checksummed
? " [SUMMED]" : ""),
643 (discard
? " [DISCARD]" : ""));
645 /* Discard packet, if instructed to do so */
646 if (unlikely(discard
)) {
647 if (unlikely(leak_packet
))
648 rx_queue
->channel
->n_skbuff_leaks
++;
650 /* We haven't called efx_unmap_rx_buffer yet,
651 * so fini the entire rx_buffer here */
652 efx_fini_rx_buffer(rx_queue
, rx_buf
);
656 /* Release card resources - assumes all RX buffers consumed in-order
659 efx_unmap_rx_buffer(efx
, rx_buf
);
661 /* Prefetch nice and early so data will (hopefully) be in cache by
662 * the time we look at it.
664 prefetch(rx_buf
->data
);
666 /* Pipeline receives so that we give time for packet headers to be
667 * prefetched into cache.
670 if (rx_queue
->channel
->rx_pkt
)
671 __efx_rx_packet(rx_queue
->channel
,
672 rx_queue
->channel
->rx_pkt
,
673 rx_queue
->channel
->rx_pkt_csummed
);
674 rx_queue
->channel
->rx_pkt
= rx_buf
;
675 rx_queue
->channel
->rx_pkt_csummed
= checksummed
;
678 /* Handle a received packet. Second half: Touches packet payload. */
679 void __efx_rx_packet(struct efx_channel
*channel
,
680 struct efx_rx_buffer
*rx_buf
, int checksummed
)
682 struct efx_nic
*efx
= channel
->efx
;
684 int lro
= efx
->net_dev
->features
& NETIF_F_LRO
;
687 prefetch(skb_shinfo(rx_buf
->skb
));
689 skb_put(rx_buf
->skb
, rx_buf
->len
);
691 /* Move past the ethernet header. rx_buf->data still points
692 * at the ethernet header */
693 rx_buf
->skb
->protocol
= eth_type_trans(rx_buf
->skb
,
697 /* Both our generic-LRO and SFC-SSR support skb and page based
698 * allocation, but neither support switching from one to the
699 * other on the fly. If we spot that the allocation mode has
700 * changed, then flush the LRO state.
702 if (unlikely(channel
->rx_alloc_pop_pages
!= (rx_buf
->page
!= NULL
))) {
703 efx_flush_lro(channel
);
704 channel
->rx_alloc_pop_pages
= (rx_buf
->page
!= NULL
);
706 if (likely(checksummed
&& lro
)) {
707 efx_rx_packet_lro(channel
, rx_buf
);
711 /* Form an skb if required */
713 int hdr_len
= min(rx_buf
->len
, EFX_SKB_HEADERS
);
714 skb
= efx_rx_mk_skb(rx_buf
, efx
, hdr_len
);
715 if (unlikely(skb
== NULL
)) {
716 efx_free_rx_buffer(efx
, rx_buf
);
720 /* We now own the SKB */
725 EFX_BUG_ON_PARANOID(rx_buf
->page
);
726 EFX_BUG_ON_PARANOID(rx_buf
->skb
);
727 EFX_BUG_ON_PARANOID(!skb
);
729 /* Set the SKB flags */
730 if (unlikely(!checksummed
|| !efx
->rx_checksum_enabled
))
731 skb
->ip_summed
= CHECKSUM_NONE
;
733 /* Pass the packet up */
734 netif_receive_skb(skb
);
736 /* Update allocation strategy method */
737 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
740 efx
->net_dev
->last_rx
= jiffies
;
743 void efx_rx_strategy(struct efx_channel
*channel
)
745 enum efx_rx_alloc_method method
= rx_alloc_method
;
747 /* Only makes sense to use page based allocation if LRO is enabled */
748 if (!(channel
->efx
->net_dev
->features
& NETIF_F_LRO
)) {
749 method
= RX_ALLOC_METHOD_SKB
;
750 } else if (method
== RX_ALLOC_METHOD_AUTO
) {
751 /* Constrain the rx_alloc_level */
752 if (channel
->rx_alloc_level
< 0)
753 channel
->rx_alloc_level
= 0;
754 else if (channel
->rx_alloc_level
> RX_ALLOC_LEVEL_MAX
)
755 channel
->rx_alloc_level
= RX_ALLOC_LEVEL_MAX
;
757 /* Decide on the allocation method */
758 method
= ((channel
->rx_alloc_level
> RX_ALLOC_LEVEL_LRO
) ?
759 RX_ALLOC_METHOD_PAGE
: RX_ALLOC_METHOD_SKB
);
762 /* Push the option */
763 channel
->rx_alloc_push_pages
= (method
== RX_ALLOC_METHOD_PAGE
);
766 int efx_probe_rx_queue(struct efx_rx_queue
*rx_queue
)
768 struct efx_nic
*efx
= rx_queue
->efx
;
769 unsigned int rxq_size
;
772 EFX_LOG(efx
, "creating RX queue %d\n", rx_queue
->queue
);
774 /* Allocate RX buffers */
775 rxq_size
= (efx
->type
->rxd_ring_mask
+ 1) * sizeof(*rx_queue
->buffer
);
776 rx_queue
->buffer
= kzalloc(rxq_size
, GFP_KERNEL
);
777 if (!rx_queue
->buffer
) {
782 rc
= falcon_probe_rx(rx_queue
);
789 kfree(rx_queue
->buffer
);
790 rx_queue
->buffer
= NULL
;
797 int efx_init_rx_queue(struct efx_rx_queue
*rx_queue
)
799 struct efx_nic
*efx
= rx_queue
->efx
;
800 unsigned int max_fill
, trigger
, limit
;
802 EFX_LOG(rx_queue
->efx
, "initialising RX queue %d\n", rx_queue
->queue
);
804 /* Initialise ptr fields */
805 rx_queue
->added_count
= 0;
806 rx_queue
->notified_count
= 0;
807 rx_queue
->removed_count
= 0;
808 rx_queue
->min_fill
= -1U;
809 rx_queue
->min_overfill
= -1U;
811 /* Initialise limit fields */
812 max_fill
= efx
->type
->rxd_ring_mask
+ 1 - EFX_RXD_HEAD_ROOM
;
813 trigger
= max_fill
* min(rx_refill_threshold
, 100U) / 100U;
814 limit
= max_fill
* min(rx_refill_limit
, 100U) / 100U;
816 rx_queue
->max_fill
= max_fill
;
817 rx_queue
->fast_fill_trigger
= trigger
;
818 rx_queue
->fast_fill_limit
= limit
;
820 /* Set up RX descriptor ring */
821 return falcon_init_rx(rx_queue
);
824 void efx_fini_rx_queue(struct efx_rx_queue
*rx_queue
)
827 struct efx_rx_buffer
*rx_buf
;
829 EFX_LOG(rx_queue
->efx
, "shutting down RX queue %d\n", rx_queue
->queue
);
831 falcon_fini_rx(rx_queue
);
833 /* Release RX buffers NB start at index 0 not current HW ptr */
834 if (rx_queue
->buffer
) {
835 for (i
= 0; i
<= rx_queue
->efx
->type
->rxd_ring_mask
; i
++) {
836 rx_buf
= efx_rx_buffer(rx_queue
, i
);
837 efx_fini_rx_buffer(rx_queue
, rx_buf
);
841 /* For a page that is part-way through splitting into RX buffers */
842 if (rx_queue
->buf_page
!= NULL
) {
843 pci_unmap_page(rx_queue
->efx
->pci_dev
, rx_queue
->buf_dma_addr
,
844 RX_PAGE_SIZE(rx_queue
->efx
), PCI_DMA_FROMDEVICE
);
845 __free_pages(rx_queue
->buf_page
,
846 rx_queue
->efx
->rx_buffer_order
);
847 rx_queue
->buf_page
= NULL
;
851 void efx_remove_rx_queue(struct efx_rx_queue
*rx_queue
)
853 EFX_LOG(rx_queue
->efx
, "destroying RX queue %d\n", rx_queue
->queue
);
855 falcon_remove_rx(rx_queue
);
857 kfree(rx_queue
->buffer
);
858 rx_queue
->buffer
= NULL
;
862 void efx_flush_lro(struct efx_channel
*channel
)
864 lro_flush_all(&channel
->lro_mgr
);
868 module_param(rx_alloc_method
, int, 0644);
869 MODULE_PARM_DESC(rx_alloc_method
, "Allocation method used for RX buffers");
871 module_param(rx_refill_threshold
, uint
, 0444);
872 MODULE_PARM_DESC(rx_refill_threshold
,
873 "RX descriptor ring fast/slow fill threshold (%)");