1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
19 #include <linux/iommu.h>
21 #include <net/checksum.h>
22 #include "net_driver.h"
26 #include "workarounds.h"
28 /* Number of RX descriptors pushed at once. */
29 #define EFX_RX_BATCH 8
31 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
32 * ring, this number is divided by the number of buffers per page to calculate
33 * the number of pages to store in the RX page recycle ring.
35 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
36 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_BATCH)
38 /* Maximum length for an RX descriptor sharing a page */
39 #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state) \
42 /* Size of buffer allocated for skb header area. */
43 #define EFX_SKB_HEADERS 64u
45 /* This is the percentage fill level below which new RX descriptors
46 * will be added to the RX descriptor ring.
48 static unsigned int rx_refill_threshold
;
50 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
51 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
55 * RX maximum head room required.
57 * This must be at least 1 to prevent overflow, plus one packet-worth
58 * to allow pipelined receives.
60 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
62 static inline u8
*efx_rx_buf_va(struct efx_rx_buffer
*buf
)
64 return page_address(buf
->page
) + buf
->page_offset
;
67 static inline u32
efx_rx_buf_hash(const u8
*eh
)
69 /* The ethernet header is always directly after any hash. */
70 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
71 return __le32_to_cpup((const __le32
*)(eh
- 4));
73 const u8
*data
= eh
- 4;
81 static inline struct efx_rx_buffer
*
82 efx_rx_buf_next(struct efx_rx_queue
*rx_queue
, struct efx_rx_buffer
*rx_buf
)
84 if (unlikely(rx_buf
== efx_rx_buffer(rx_queue
, rx_queue
->ptr_mask
)))
85 return efx_rx_buffer(rx_queue
, 0);
90 static inline void efx_sync_rx_buffer(struct efx_nic
*efx
,
91 struct efx_rx_buffer
*rx_buf
,
94 dma_sync_single_for_cpu(&efx
->pci_dev
->dev
, rx_buf
->dma_addr
, len
,
98 /* Check the RX page recycle ring for a page that can be reused. */
99 static struct page
*efx_reuse_page(struct efx_rx_queue
*rx_queue
)
101 struct efx_nic
*efx
= rx_queue
->efx
;
103 struct efx_rx_page_state
*state
;
106 index
= rx_queue
->page_remove
& rx_queue
->page_ptr_mask
;
107 page
= rx_queue
->page_ring
[index
];
111 rx_queue
->page_ring
[index
] = NULL
;
112 /* page_remove cannot exceed page_add. */
113 if (rx_queue
->page_remove
!= rx_queue
->page_add
)
114 ++rx_queue
->page_remove
;
116 /* If page_count is 1 then we hold the only reference to this page. */
117 if (page_count(page
) == 1) {
118 ++rx_queue
->page_recycle_count
;
121 state
= page_address(page
);
122 dma_unmap_page(&efx
->pci_dev
->dev
, state
->dma_addr
,
123 PAGE_SIZE
<< efx
->rx_buffer_order
,
126 ++rx_queue
->page_recycle_failed
;
133 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
135 * @rx_queue: Efx RX queue
137 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
138 * and populates struct efx_rx_buffers for each one. Return a negative error
139 * code or 0 on success. If a single page can be split between two buffers,
140 * then the page will either be inserted fully, or not at at all.
142 static int efx_init_rx_buffers(struct efx_rx_queue
*rx_queue
)
144 struct efx_nic
*efx
= rx_queue
->efx
;
145 struct efx_rx_buffer
*rx_buf
;
147 unsigned int page_offset
;
148 struct efx_rx_page_state
*state
;
150 unsigned index
, count
;
152 /* We can split a page between two buffers */
153 BUILD_BUG_ON(EFX_RX_BATCH
& 1);
155 for (count
= 0; count
< EFX_RX_BATCH
; ++count
) {
156 page
= efx_reuse_page(rx_queue
);
158 page
= alloc_pages(__GFP_COLD
| __GFP_COMP
| GFP_ATOMIC
,
159 efx
->rx_buffer_order
);
160 if (unlikely(page
== NULL
))
163 dma_map_page(&efx
->pci_dev
->dev
, page
, 0,
164 PAGE_SIZE
<< efx
->rx_buffer_order
,
166 if (unlikely(dma_mapping_error(&efx
->pci_dev
->dev
,
168 __free_pages(page
, efx
->rx_buffer_order
);
171 state
= page_address(page
);
172 state
->dma_addr
= dma_addr
;
174 state
= page_address(page
);
175 dma_addr
= state
->dma_addr
;
179 dma_addr
+= sizeof(struct efx_rx_page_state
);
180 page_offset
= sizeof(struct efx_rx_page_state
);
183 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
184 rx_buf
= efx_rx_buffer(rx_queue
, index
);
185 rx_buf
->dma_addr
= dma_addr
+ EFX_PAGE_IP_ALIGN
;
187 rx_buf
->page_offset
= page_offset
+ EFX_PAGE_IP_ALIGN
;
188 rx_buf
->len
= efx
->rx_dma_len
;
189 ++rx_queue
->added_count
;
191 if ((~count
& 1) && (efx
->rx_dma_len
<= EFX_RX_HALF_PAGE
)) {
192 /* Use the second half of the page */
195 dma_addr
+= (PAGE_SIZE
>> 1);
196 page_offset
+= (PAGE_SIZE
>> 1);
201 rx_buf
->flags
= EFX_RX_BUF_LAST_IN_PAGE
;
207 /* Unmap a DMA-mapped page. This function is only called for the final RX
210 static void efx_unmap_rx_buffer(struct efx_nic
*efx
,
211 struct efx_rx_buffer
*rx_buf
)
213 struct page
*page
= rx_buf
->page
;
216 struct efx_rx_page_state
*state
= page_address(page
);
217 dma_unmap_page(&efx
->pci_dev
->dev
,
219 PAGE_SIZE
<< efx
->rx_buffer_order
,
224 static void efx_free_rx_buffer(struct efx_rx_buffer
*rx_buf
)
227 put_page(rx_buf
->page
);
232 /* Attempt to recycle the page if there is an RX recycle ring; the page can
233 * only be added if this is the final RX buffer, to prevent pages being used in
234 * the descriptor ring and appearing in the recycle ring simultaneously.
236 static void efx_recycle_rx_page(struct efx_channel
*channel
,
237 struct efx_rx_buffer
*rx_buf
)
239 struct page
*page
= rx_buf
->page
;
240 struct efx_rx_queue
*rx_queue
= efx_channel_get_rx_queue(channel
);
241 struct efx_nic
*efx
= rx_queue
->efx
;
244 /* Only recycle the page after processing the final buffer. */
245 if (!(rx_buf
->flags
& EFX_RX_BUF_LAST_IN_PAGE
))
248 index
= rx_queue
->page_add
& rx_queue
->page_ptr_mask
;
249 if (rx_queue
->page_ring
[index
] == NULL
) {
250 unsigned read_index
= rx_queue
->page_remove
&
251 rx_queue
->page_ptr_mask
;
253 /* The next slot in the recycle ring is available, but
254 * increment page_remove if the read pointer currently
257 if (read_index
== index
)
258 ++rx_queue
->page_remove
;
259 rx_queue
->page_ring
[index
] = page
;
260 ++rx_queue
->page_add
;
263 ++rx_queue
->page_recycle_full
;
264 efx_unmap_rx_buffer(efx
, rx_buf
);
265 put_page(rx_buf
->page
);
268 static void efx_fini_rx_buffer(struct efx_rx_queue
*rx_queue
,
269 struct efx_rx_buffer
*rx_buf
)
271 /* Release the page reference we hold for the buffer. */
273 put_page(rx_buf
->page
);
275 /* If this is the last buffer in a page, unmap and free it. */
276 if (rx_buf
->flags
& EFX_RX_BUF_LAST_IN_PAGE
) {
277 efx_unmap_rx_buffer(rx_queue
->efx
, rx_buf
);
278 efx_free_rx_buffer(rx_buf
);
283 /* Recycle the pages that are used by buffers that have just been received. */
284 static void efx_recycle_rx_buffers(struct efx_channel
*channel
,
285 struct efx_rx_buffer
*rx_buf
,
286 unsigned int n_frags
)
288 struct efx_rx_queue
*rx_queue
= efx_channel_get_rx_queue(channel
);
291 efx_recycle_rx_page(channel
, rx_buf
);
292 rx_buf
= efx_rx_buf_next(rx_queue
, rx_buf
);
297 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
298 * @rx_queue: RX descriptor queue
300 * This will aim to fill the RX descriptor queue up to
301 * @rx_queue->@max_fill. If there is insufficient atomic
302 * memory to do so, a slow fill will be scheduled.
304 * The caller must provide serialisation (none is used here). In practise,
305 * this means this function must run from the NAPI handler, or be called
306 * when NAPI is disabled.
308 void efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
)
313 /* Calculate current fill level, and exit if we don't need to fill */
314 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
315 EFX_BUG_ON_PARANOID(fill_level
> rx_queue
->efx
->rxq_entries
);
316 if (fill_level
>= rx_queue
->fast_fill_trigger
)
319 /* Record minimum fill level */
320 if (unlikely(fill_level
< rx_queue
->min_fill
)) {
322 rx_queue
->min_fill
= fill_level
;
325 space
= rx_queue
->max_fill
- fill_level
;
326 EFX_BUG_ON_PARANOID(space
< EFX_RX_BATCH
);
328 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
329 "RX queue %d fast-filling descriptor ring from"
330 " level %d to level %d\n",
331 efx_rx_queue_index(rx_queue
), fill_level
,
336 rc
= efx_init_rx_buffers(rx_queue
);
338 /* Ensure that we don't leave the rx queue empty */
339 if (rx_queue
->added_count
== rx_queue
->removed_count
)
340 efx_schedule_slow_fill(rx_queue
);
343 } while ((space
-= EFX_RX_BATCH
) >= EFX_RX_BATCH
);
345 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
346 "RX queue %d fast-filled descriptor ring "
347 "to level %d\n", efx_rx_queue_index(rx_queue
),
348 rx_queue
->added_count
- rx_queue
->removed_count
);
351 if (rx_queue
->notified_count
!= rx_queue
->added_count
)
352 efx_nic_notify_rx_desc(rx_queue
);
355 void efx_rx_slow_fill(unsigned long context
)
357 struct efx_rx_queue
*rx_queue
= (struct efx_rx_queue
*)context
;
359 /* Post an event to cause NAPI to run and refill the queue */
360 efx_nic_generate_fill_event(rx_queue
);
361 ++rx_queue
->slow_fill_count
;
364 static void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
365 struct efx_rx_buffer
*rx_buf
,
368 struct efx_nic
*efx
= rx_queue
->efx
;
369 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
371 if (likely(len
<= max_len
))
374 /* The packet must be discarded, but this is only a fatal error
375 * if the caller indicated it was
377 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
379 if ((len
> rx_buf
->len
) && EFX_WORKAROUND_8071(efx
)) {
381 netif_err(efx
, rx_err
, efx
->net_dev
,
382 " RX queue %d seriously overlength "
383 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
384 efx_rx_queue_index(rx_queue
), len
, max_len
,
385 efx
->type
->rx_buffer_padding
);
386 efx_schedule_reset(efx
, RESET_TYPE_RX_RECOVERY
);
389 netif_err(efx
, rx_err
, efx
->net_dev
,
390 " RX queue %d overlength RX event "
392 efx_rx_queue_index(rx_queue
), len
, max_len
);
395 efx_rx_queue_channel(rx_queue
)->n_rx_overlength
++;
398 /* Pass a received packet up through GRO. GRO can handle pages
399 * regardless of checksum state and skbs with a good checksum.
402 efx_rx_packet_gro(struct efx_channel
*channel
, struct efx_rx_buffer
*rx_buf
,
403 unsigned int n_frags
, u8
*eh
)
405 struct napi_struct
*napi
= &channel
->napi_str
;
406 gro_result_t gro_result
;
407 struct efx_nic
*efx
= channel
->efx
;
410 skb
= napi_get_frags(napi
);
411 if (unlikely(!skb
)) {
413 put_page(rx_buf
->page
);
415 rx_buf
= efx_rx_buf_next(&channel
->rx_queue
, rx_buf
);
420 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
421 skb
->rxhash
= efx_rx_buf_hash(eh
);
422 skb
->ip_summed
= ((rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ?
423 CHECKSUM_UNNECESSARY
: CHECKSUM_NONE
);
426 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
427 rx_buf
->page
, rx_buf
->page_offset
,
430 skb
->len
+= rx_buf
->len
;
431 if (skb_shinfo(skb
)->nr_frags
== n_frags
)
434 rx_buf
= efx_rx_buf_next(&channel
->rx_queue
, rx_buf
);
437 skb
->data_len
= skb
->len
;
438 skb
->truesize
+= n_frags
* efx
->rx_buffer_truesize
;
440 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
442 gro_result
= napi_gro_frags(napi
);
443 if (gro_result
!= GRO_DROP
)
444 channel
->irq_mod_score
+= 2;
447 /* Allocate and construct an SKB around page fragments */
448 static struct sk_buff
*efx_rx_mk_skb(struct efx_channel
*channel
,
449 struct efx_rx_buffer
*rx_buf
,
450 unsigned int n_frags
,
453 struct efx_nic
*efx
= channel
->efx
;
456 /* Allocate an SKB to store the headers */
457 skb
= netdev_alloc_skb(efx
->net_dev
, hdr_len
+ EFX_PAGE_SKB_ALIGN
);
458 if (unlikely(skb
== NULL
))
461 EFX_BUG_ON_PARANOID(rx_buf
->len
< hdr_len
);
463 skb_reserve(skb
, EFX_PAGE_SKB_ALIGN
);
464 memcpy(__skb_put(skb
, hdr_len
), eh
, hdr_len
);
466 /* Append the remaining page(s) onto the frag list */
467 if (rx_buf
->len
> hdr_len
) {
468 rx_buf
->page_offset
+= hdr_len
;
469 rx_buf
->len
-= hdr_len
;
472 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
473 rx_buf
->page
, rx_buf
->page_offset
,
476 skb
->len
+= rx_buf
->len
;
477 skb
->data_len
+= rx_buf
->len
;
478 if (skb_shinfo(skb
)->nr_frags
== n_frags
)
481 rx_buf
= efx_rx_buf_next(&channel
->rx_queue
, rx_buf
);
484 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
489 skb
->truesize
+= n_frags
* efx
->rx_buffer_truesize
;
491 /* Move past the ethernet header */
492 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
497 void efx_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
498 unsigned int n_frags
, unsigned int len
, u16 flags
)
500 struct efx_nic
*efx
= rx_queue
->efx
;
501 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
502 struct efx_rx_buffer
*rx_buf
;
504 rx_buf
= efx_rx_buffer(rx_queue
, index
);
505 rx_buf
->flags
|= flags
;
507 /* Validate the number of fragments and completed length */
509 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
);
510 } else if (unlikely(n_frags
> EFX_RX_MAX_FRAGS
) ||
511 unlikely(len
<= (n_frags
- 1) * EFX_RX_USR_BUF_SIZE
) ||
512 unlikely(len
> n_frags
* EFX_RX_USR_BUF_SIZE
) ||
513 unlikely(!efx
->rx_scatter
)) {
514 /* If this isn't an explicit discard request, either
515 * the hardware or the driver is broken.
517 WARN_ON(!(len
== 0 && rx_buf
->flags
& EFX_RX_PKT_DISCARD
));
518 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
521 netif_vdbg(efx
, rx_status
, efx
->net_dev
,
522 "RX queue %d received ids %x-%x len %d %s%s\n",
523 efx_rx_queue_index(rx_queue
), index
,
524 (index
+ n_frags
- 1) & rx_queue
->ptr_mask
, len
,
525 (rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ? " [SUMMED]" : "",
526 (rx_buf
->flags
& EFX_RX_PKT_DISCARD
) ? " [DISCARD]" : "");
528 /* Discard packet, if instructed to do so. Process the
529 * previous receive first.
531 if (unlikely(rx_buf
->flags
& EFX_RX_PKT_DISCARD
)) {
532 efx_rx_flush_packet(channel
);
533 put_page(rx_buf
->page
);
534 efx_recycle_rx_buffers(channel
, rx_buf
, n_frags
);
541 /* Release and/or sync the DMA mapping - assumes all RX buffers
542 * consumed in-order per RX queue.
544 efx_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
546 /* Prefetch nice and early so data will (hopefully) be in cache by
547 * the time we look at it.
549 prefetch(efx_rx_buf_va(rx_buf
));
551 rx_buf
->page_offset
+= efx
->type
->rx_buffer_hash_size
;
552 rx_buf
->len
-= efx
->type
->rx_buffer_hash_size
;
555 /* Release/sync DMA mapping for additional fragments.
556 * Fix length for last fragment.
558 unsigned int tail_frags
= n_frags
- 1;
561 rx_buf
= efx_rx_buf_next(rx_queue
, rx_buf
);
562 if (--tail_frags
== 0)
564 efx_sync_rx_buffer(efx
, rx_buf
, EFX_RX_USR_BUF_SIZE
);
566 rx_buf
->len
= len
- (n_frags
- 1) * EFX_RX_USR_BUF_SIZE
;
567 efx_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
570 /* All fragments have been DMA-synced, so recycle buffers and pages. */
571 rx_buf
= efx_rx_buffer(rx_queue
, index
);
572 efx_recycle_rx_buffers(channel
, rx_buf
, n_frags
);
574 /* Pipeline receives so that we give time for packet headers to be
575 * prefetched into cache.
577 efx_rx_flush_packet(channel
);
578 channel
->rx_pkt_n_frags
= n_frags
;
579 channel
->rx_pkt_index
= index
;
582 static void efx_rx_deliver(struct efx_channel
*channel
, u8
*eh
,
583 struct efx_rx_buffer
*rx_buf
,
584 unsigned int n_frags
)
587 u16 hdr_len
= min_t(u16
, rx_buf
->len
, EFX_SKB_HEADERS
);
589 skb
= efx_rx_mk_skb(channel
, rx_buf
, n_frags
, eh
, hdr_len
);
590 if (unlikely(skb
== NULL
)) {
591 efx_free_rx_buffer(rx_buf
);
594 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
596 /* Set the SKB flags */
597 skb_checksum_none_assert(skb
);
599 if (channel
->type
->receive_skb
)
600 if (channel
->type
->receive_skb(channel
, skb
))
603 /* Pass the packet up */
604 netif_receive_skb(skb
);
607 /* Handle a received packet. Second half: Touches packet payload. */
608 void __efx_rx_packet(struct efx_channel
*channel
)
610 struct efx_nic
*efx
= channel
->efx
;
611 struct efx_rx_buffer
*rx_buf
=
612 efx_rx_buffer(&channel
->rx_queue
, channel
->rx_pkt_index
);
613 u8
*eh
= efx_rx_buf_va(rx_buf
);
615 /* If we're in loopback test, then pass the packet directly to the
616 * loopback layer, and free the rx_buf here
618 if (unlikely(efx
->loopback_selftest
)) {
619 efx_loopback_rx_packet(efx
, eh
, rx_buf
->len
);
620 efx_free_rx_buffer(rx_buf
);
624 if (unlikely(!(efx
->net_dev
->features
& NETIF_F_RXCSUM
)))
625 rx_buf
->flags
&= ~EFX_RX_PKT_CSUMMED
;
627 if (!channel
->type
->receive_skb
)
628 efx_rx_packet_gro(channel
, rx_buf
, channel
->rx_pkt_n_frags
, eh
);
630 efx_rx_deliver(channel
, eh
, rx_buf
, channel
->rx_pkt_n_frags
);
632 channel
->rx_pkt_n_frags
= 0;
635 int efx_probe_rx_queue(struct efx_rx_queue
*rx_queue
)
637 struct efx_nic
*efx
= rx_queue
->efx
;
638 unsigned int entries
;
641 /* Create the smallest power-of-two aligned ring */
642 entries
= max(roundup_pow_of_two(efx
->rxq_entries
), EFX_MIN_DMAQ_SIZE
);
643 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
644 rx_queue
->ptr_mask
= entries
- 1;
646 netif_dbg(efx
, probe
, efx
->net_dev
,
647 "creating RX queue %d size %#x mask %#x\n",
648 efx_rx_queue_index(rx_queue
), efx
->rxq_entries
,
651 /* Allocate RX buffers */
652 rx_queue
->buffer
= kcalloc(entries
, sizeof(*rx_queue
->buffer
),
654 if (!rx_queue
->buffer
)
657 rc
= efx_nic_probe_rx(rx_queue
);
659 kfree(rx_queue
->buffer
);
660 rx_queue
->buffer
= NULL
;
666 void efx_init_rx_recycle_ring(struct efx_nic
*efx
,
667 struct efx_rx_queue
*rx_queue
)
669 unsigned int bufs_in_recycle_ring
, page_ring_size
;
671 /* Set the RX recycle ring size */
673 bufs_in_recycle_ring
= EFX_RECYCLE_RING_SIZE_IOMMU
;
675 if (efx
->pci_dev
->dev
.iommu_group
)
676 bufs_in_recycle_ring
= EFX_RECYCLE_RING_SIZE_IOMMU
;
678 bufs_in_recycle_ring
= EFX_RECYCLE_RING_SIZE_NOIOMMU
;
679 #endif /* CONFIG_PPC64 */
681 page_ring_size
= roundup_pow_of_two(bufs_in_recycle_ring
/
682 efx
->rx_bufs_per_page
);
683 rx_queue
->page_ring
= kcalloc(page_ring_size
,
684 sizeof(*rx_queue
->page_ring
), GFP_KERNEL
);
685 rx_queue
->page_ptr_mask
= page_ring_size
- 1;
688 void efx_init_rx_queue(struct efx_rx_queue
*rx_queue
)
690 struct efx_nic
*efx
= rx_queue
->efx
;
691 unsigned int max_fill
, trigger
, max_trigger
;
693 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
694 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue
));
696 /* Initialise ptr fields */
697 rx_queue
->added_count
= 0;
698 rx_queue
->notified_count
= 0;
699 rx_queue
->removed_count
= 0;
700 rx_queue
->min_fill
= -1U;
701 efx_init_rx_recycle_ring(efx
, rx_queue
);
703 rx_queue
->page_remove
= 0;
704 rx_queue
->page_add
= rx_queue
->page_ptr_mask
+ 1;
705 rx_queue
->page_recycle_count
= 0;
706 rx_queue
->page_recycle_failed
= 0;
707 rx_queue
->page_recycle_full
= 0;
709 /* Initialise limit fields */
710 max_fill
= efx
->rxq_entries
- EFX_RXD_HEAD_ROOM
;
711 max_trigger
= max_fill
- EFX_RX_BATCH
;
712 if (rx_refill_threshold
!= 0) {
713 trigger
= max_fill
* min(rx_refill_threshold
, 100U) / 100U;
714 if (trigger
> max_trigger
)
715 trigger
= max_trigger
;
717 trigger
= max_trigger
;
720 rx_queue
->max_fill
= max_fill
;
721 rx_queue
->fast_fill_trigger
= trigger
;
723 /* Set up RX descriptor ring */
724 rx_queue
->enabled
= true;
725 efx_nic_init_rx(rx_queue
);
728 void efx_fini_rx_queue(struct efx_rx_queue
*rx_queue
)
731 struct efx_nic
*efx
= rx_queue
->efx
;
732 struct efx_rx_buffer
*rx_buf
;
734 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
735 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue
));
737 /* A flush failure might have left rx_queue->enabled */
738 rx_queue
->enabled
= false;
740 del_timer_sync(&rx_queue
->slow_fill
);
741 efx_nic_fini_rx(rx_queue
);
743 /* Release RX buffers from the current read ptr to the write ptr */
744 if (rx_queue
->buffer
) {
745 for (i
= rx_queue
->removed_count
; i
< rx_queue
->added_count
;
747 unsigned index
= i
& rx_queue
->ptr_mask
;
748 rx_buf
= efx_rx_buffer(rx_queue
, index
);
749 efx_fini_rx_buffer(rx_queue
, rx_buf
);
753 /* Unmap and release the pages in the recycle ring. Remove the ring. */
754 for (i
= 0; i
<= rx_queue
->page_ptr_mask
; i
++) {
755 struct page
*page
= rx_queue
->page_ring
[i
];
756 struct efx_rx_page_state
*state
;
761 state
= page_address(page
);
762 dma_unmap_page(&efx
->pci_dev
->dev
, state
->dma_addr
,
763 PAGE_SIZE
<< efx
->rx_buffer_order
,
767 kfree(rx_queue
->page_ring
);
768 rx_queue
->page_ring
= NULL
;
771 void efx_remove_rx_queue(struct efx_rx_queue
*rx_queue
)
773 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
774 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue
));
776 efx_nic_remove_rx(rx_queue
);
778 kfree(rx_queue
->buffer
);
779 rx_queue
->buffer
= NULL
;
783 module_param(rx_refill_threshold
, uint
, 0444);
784 MODULE_PARM_DESC(rx_refill_threshold
,
785 "RX descriptor ring refill threshold (%)");