4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright 2017 Joyent, Inc.
33 /* function prototypes */
34 static mblk_t
*ixgbe_rx_bind(ixgbe_rx_data_t
*, uint32_t, uint32_t);
35 static mblk_t
*ixgbe_rx_copy(ixgbe_rx_data_t
*, uint32_t, uint32_t);
36 static void ixgbe_rx_assoc_hcksum(mblk_t
*, uint32_t);
37 static mblk_t
*ixgbe_lro_bind(ixgbe_rx_data_t
*, uint32_t, uint32_t, uint32_t);
38 static mblk_t
*ixgbe_lro_copy(ixgbe_rx_data_t
*, uint32_t, uint32_t, uint32_t);
39 static int ixgbe_lro_get_start(ixgbe_rx_data_t
*, uint32_t);
40 static uint32_t ixgbe_lro_get_first(ixgbe_rx_data_t
*, uint32_t);
43 #pragma inline(ixgbe_rx_assoc_hcksum)
44 #pragma inline(ixgbe_lro_get_start)
45 #pragma inline(ixgbe_lro_get_first)
49 * ixgbe_rx_recycle - The call-back function to reclaim rx buffer.
51 * This function is called when an mp is freed by the user thru
52 * freeb call (Only for mp constructed through desballoc call).
53 * It returns back the freed buffer to the free list.
56 ixgbe_rx_recycle(caddr_t arg
)
59 ixgbe_rx_ring_t
*rx_ring
;
60 ixgbe_rx_data_t
*rx_data
;
61 rx_control_block_t
*recycle_rcb
;
65 recycle_rcb
= (rx_control_block_t
*)(uintptr_t)arg
;
66 rx_data
= recycle_rcb
->rx_data
;
67 rx_ring
= rx_data
->rx_ring
;
68 ixgbe
= rx_ring
->ixgbe
;
70 if (recycle_rcb
->ref_cnt
== 0) {
72 * This case only happens when rx buffers are being freed
73 * in ixgbe_stop() and freemsg() is called.
78 ASSERT(recycle_rcb
->mp
== NULL
);
81 * Using the recycled data buffer to generate a new mblk
83 recycle_rcb
->mp
= desballoc((unsigned char *)
84 recycle_rcb
->rx_buf
.address
,
85 recycle_rcb
->rx_buf
.size
,
86 0, &recycle_rcb
->free_rtn
);
89 * Put the recycled rx control block into free list
91 mutex_enter(&rx_data
->recycle_lock
);
93 free_index
= rx_data
->rcb_tail
;
94 ASSERT(rx_data
->free_list
[free_index
] == NULL
);
96 rx_data
->free_list
[free_index
] = recycle_rcb
;
97 rx_data
->rcb_tail
= NEXT_INDEX(free_index
, 1, rx_data
->free_list_size
);
99 mutex_exit(&rx_data
->recycle_lock
);
102 * The atomic operation on the number of the available rx control
103 * blocks in the free list is used to make the recycling mutual
104 * exclusive with the receiving.
106 atomic_inc_32(&rx_data
->rcb_free
);
107 ASSERT(rx_data
->rcb_free
<= rx_data
->free_list_size
);
110 * Considering the case that the interface is unplumbed
111 * and there are still some buffers held by the upper layer.
112 * When the buffer is returned back, we need to free it.
114 ref_cnt
= atomic_dec_32_nv(&recycle_rcb
->ref_cnt
);
116 if (recycle_rcb
->mp
!= NULL
) {
117 freemsg(recycle_rcb
->mp
);
118 recycle_rcb
->mp
= NULL
;
121 ixgbe_free_dma_buffer(&recycle_rcb
->rx_buf
);
123 mutex_enter(&ixgbe
->rx_pending_lock
);
124 atomic_dec_32(&rx_data
->rcb_pending
);
125 atomic_dec_32(&ixgbe
->rcb_pending
);
128 * When there is not any buffer belonging to this rx_data
129 * held by the upper layer, the rx_data can be freed.
131 if ((rx_data
->flag
& IXGBE_RX_STOPPED
) &&
132 (rx_data
->rcb_pending
== 0))
133 ixgbe_free_rx_ring_data(rx_data
);
135 mutex_exit(&ixgbe
->rx_pending_lock
);
140 * ixgbe_rx_copy - Use copy to process the received packet.
142 * This function will use bcopy to process the packet
143 * and send the copied packet upstream.
146 ixgbe_rx_copy(ixgbe_rx_data_t
*rx_data
, uint32_t index
, uint32_t pkt_len
)
149 rx_control_block_t
*current_rcb
;
152 ixgbe
= rx_data
->rx_ring
->ixgbe
;
153 current_rcb
= rx_data
->work_list
[index
];
155 DMA_SYNC(¤t_rcb
->rx_buf
, DDI_DMA_SYNC_FORKERNEL
);
157 if (ixgbe_check_dma_handle(current_rcb
->rx_buf
.dma_handle
) !=
159 ddi_fm_service_impact(ixgbe
->dip
, DDI_SERVICE_DEGRADED
);
160 atomic_or_32(&ixgbe
->ixgbe_state
, IXGBE_ERROR
);
165 * Allocate buffer to receive this packet
167 mp
= allocb(pkt_len
+ IPHDR_ALIGN_ROOM
, 0);
169 ixgbe_log(ixgbe
, "ixgbe_rx_copy: allocate buffer failed");
174 * Copy the data received into the new cluster
176 mp
->b_rptr
+= IPHDR_ALIGN_ROOM
;
177 bcopy(current_rcb
->rx_buf
.address
, mp
->b_rptr
, pkt_len
);
178 mp
->b_wptr
= mp
->b_rptr
+ pkt_len
;
184 * ixgbe_rx_bind - Use existing DMA buffer to build mblk for receiving.
186 * This function will use pre-bound DMA buffer to receive the packet
187 * and build mblk that will be sent upstream.
190 ixgbe_rx_bind(ixgbe_rx_data_t
*rx_data
, uint32_t index
, uint32_t pkt_len
)
192 rx_control_block_t
*current_rcb
;
193 rx_control_block_t
*free_rcb
;
196 ixgbe_t
*ixgbe
= rx_data
->rx_ring
->ixgbe
;
199 * If the free list is empty, we cannot proceed to send
200 * the current DMA buffer upstream. We'll have to return
201 * and use bcopy to process the packet.
203 if (ixgbe_atomic_reserve(&rx_data
->rcb_free
, 1) < 0)
206 current_rcb
= rx_data
->work_list
[index
];
208 * If the mp of the rx control block is NULL, try to do
211 if (current_rcb
->mp
== NULL
) {
212 current_rcb
->mp
= desballoc((unsigned char *)
213 current_rcb
->rx_buf
.address
,
214 current_rcb
->rx_buf
.size
,
215 0, ¤t_rcb
->free_rtn
);
217 * If it is failed to built a mblk using the current
218 * DMA buffer, we have to return and use bcopy to
219 * process the packet.
221 if (current_rcb
->mp
== NULL
) {
222 atomic_inc_32(&rx_data
->rcb_free
);
227 * Sync up the data received
229 DMA_SYNC(¤t_rcb
->rx_buf
, DDI_DMA_SYNC_FORKERNEL
);
231 if (ixgbe_check_dma_handle(current_rcb
->rx_buf
.dma_handle
) !=
233 ddi_fm_service_impact(ixgbe
->dip
, DDI_SERVICE_DEGRADED
);
234 atomic_inc_32(&rx_data
->rcb_free
);
235 atomic_or_32(&ixgbe
->ixgbe_state
, IXGBE_ERROR
);
239 mp
= current_rcb
->mp
;
240 current_rcb
->mp
= NULL
;
241 atomic_inc_32(¤t_rcb
->ref_cnt
);
243 mp
->b_wptr
= mp
->b_rptr
+ pkt_len
;
244 mp
->b_next
= mp
->b_cont
= NULL
;
247 * Strip off one free rx control block from the free list
249 free_index
= rx_data
->rcb_head
;
250 free_rcb
= rx_data
->free_list
[free_index
];
251 ASSERT(free_rcb
!= NULL
);
252 rx_data
->free_list
[free_index
] = NULL
;
253 rx_data
->rcb_head
= NEXT_INDEX(free_index
, 1, rx_data
->free_list_size
);
256 * Put the rx control block to the work list
258 rx_data
->work_list
[index
] = free_rcb
;
264 * ixgbe_lro_bind - Use existing DMA buffer to build LRO mblk for receiving.
266 * This function will use pre-bound DMA buffers to receive the packet
267 * and build LRO mblk that will be sent upstream.
270 ixgbe_lro_bind(ixgbe_rx_data_t
*rx_data
, uint32_t lro_start
,
271 uint32_t lro_num
, uint32_t pkt_len
)
273 rx_control_block_t
*current_rcb
;
274 union ixgbe_adv_rx_desc
*current_rbd
;
275 rx_control_block_t
*free_rcb
;
278 uint32_t last_pkt_len
;
283 ixgbe_t
*ixgbe
= rx_data
->rx_ring
->ixgbe
;
286 * If the free list is empty, we cannot proceed to send
287 * the current DMA buffer upstream. We'll have to return
288 * and use bcopy to process the packet.
290 if (ixgbe_atomic_reserve(&rx_data
->rcb_free
, lro_num
) < 0)
292 current_rcb
= rx_data
->work_list
[lro_start
];
295 * If any one of the rx data blocks can not support
296 * lro bind operation, We'll have to return and use
297 * bcopy to process the lro packet.
299 for (i
= lro_num
; i
> 0; i
--) {
301 * Sync up the data received
303 DMA_SYNC(¤t_rcb
->rx_buf
, DDI_DMA_SYNC_FORKERNEL
);
305 if (ixgbe_check_dma_handle(current_rcb
->rx_buf
.dma_handle
) !=
307 ddi_fm_service_impact(ixgbe
->dip
, DDI_SERVICE_DEGRADED
);
308 atomic_add_32(&rx_data
->rcb_free
, lro_num
);
309 atomic_or_32(&ixgbe
->ixgbe_state
, IXGBE_ERROR
);
314 * If the mp of the rx control block is NULL, try to do
317 if (current_rcb
->mp
== NULL
) {
318 current_rcb
->mp
= desballoc((unsigned char *)
319 current_rcb
->rx_buf
.address
,
320 current_rcb
->rx_buf
.size
,
321 0, ¤t_rcb
->free_rtn
);
323 * If it is failed to built a mblk using the current
324 * DMA buffer, we have to return and use bcopy to
325 * process the packet.
327 if (current_rcb
->mp
== NULL
) {
328 atomic_add_32(&rx_data
->rcb_free
, lro_num
);
332 if (current_rcb
->lro_next
!= -1)
333 lro_next
= current_rcb
->lro_next
;
334 current_rcb
= rx_data
->work_list
[lro_next
];
338 mblk_tail
= &mblk_head
;
339 lro_next
= lro_start
;
340 last_pkt_len
= pkt_len
- ixgbe
->rx_buf_size
* (lro_num
- 1);
341 current_rcb
= rx_data
->work_list
[lro_next
];
342 current_rbd
= &rx_data
->rbd_ring
[lro_next
];
344 mp
= current_rcb
->mp
;
345 current_rcb
->mp
= NULL
;
346 atomic_inc_32(¤t_rcb
->ref_cnt
);
348 mp
->b_wptr
= mp
->b_rptr
+ ixgbe
->rx_buf_size
;
350 mp
->b_wptr
= mp
->b_rptr
+ last_pkt_len
;
351 mp
->b_next
= mp
->b_cont
= NULL
;
353 mblk_tail
= &mp
->b_cont
;
356 * Strip off one free rx control block from the free list
358 free_index
= rx_data
->rcb_head
;
359 free_rcb
= rx_data
->free_list
[free_index
];
360 ASSERT(free_rcb
!= NULL
);
361 rx_data
->free_list
[free_index
] = NULL
;
362 rx_data
->rcb_head
= NEXT_INDEX(free_index
, 1,
363 rx_data
->free_list_size
);
366 * Put the rx control block to the work list
368 rx_data
->work_list
[lro_next
] = free_rcb
;
369 lro_next
= current_rcb
->lro_next
;
370 current_rcb
->lro_next
= -1;
371 current_rcb
->lro_prev
= -1;
372 current_rcb
->lro_pkt
= B_FALSE
;
373 current_rbd
->read
.pkt_addr
= free_rcb
->rx_buf
.dma_address
;
374 current_rbd
->read
.hdr_addr
= 0;
377 current_rcb
= rx_data
->work_list
[lro_next
];
378 current_rbd
= &rx_data
->rbd_ring
[lro_next
];
384 * ixgbe_lro_copy - Use copy to process the received LRO packet.
386 * This function will use bcopy to process the LRO packet
387 * and send the copied packet upstream.
390 ixgbe_lro_copy(ixgbe_rx_data_t
*rx_data
, uint32_t lro_start
,
391 uint32_t lro_num
, uint32_t pkt_len
)
394 rx_control_block_t
*current_rcb
;
395 union ixgbe_adv_rx_desc
*current_rbd
;
397 uint32_t last_pkt_len
;
401 ixgbe
= rx_data
->rx_ring
->ixgbe
;
404 * Allocate buffer to receive this LRO packet
406 mp
= allocb(pkt_len
+ IPHDR_ALIGN_ROOM
, 0);
408 ixgbe_log(ixgbe
, "LRO copy MP alloc failed");
412 current_rcb
= rx_data
->work_list
[lro_start
];
415 * Sync up the LRO packet data received
417 for (i
= lro_num
; i
> 0; i
--) {
418 DMA_SYNC(¤t_rcb
->rx_buf
, DDI_DMA_SYNC_FORKERNEL
);
420 if (ixgbe_check_dma_handle(current_rcb
->rx_buf
.dma_handle
) !=
422 ddi_fm_service_impact(ixgbe
->dip
, DDI_SERVICE_DEGRADED
);
423 atomic_or_32(&ixgbe
->ixgbe_state
, IXGBE_ERROR
);
426 if (current_rcb
->lro_next
!= -1)
427 lro_next
= current_rcb
->lro_next
;
428 current_rcb
= rx_data
->work_list
[lro_next
];
430 lro_next
= lro_start
;
431 current_rcb
= rx_data
->work_list
[lro_next
];
432 current_rbd
= &rx_data
->rbd_ring
[lro_next
];
433 last_pkt_len
= pkt_len
- ixgbe
->rx_buf_size
* (lro_num
- 1);
436 * Copy the data received into the new cluster
438 mp
->b_rptr
+= IPHDR_ALIGN_ROOM
;
439 mp
->b_wptr
+= IPHDR_ALIGN_ROOM
;
442 bcopy(current_rcb
->rx_buf
.address
, mp
->b_wptr
,
444 mp
->b_wptr
+= ixgbe
->rx_buf_size
;
446 bcopy(current_rcb
->rx_buf
.address
, mp
->b_wptr
,
448 mp
->b_wptr
+= last_pkt_len
;
450 lro_next
= current_rcb
->lro_next
;
451 current_rcb
->lro_next
= -1;
452 current_rcb
->lro_prev
= -1;
453 current_rcb
->lro_pkt
= B_FALSE
;
454 current_rbd
->read
.pkt_addr
= current_rcb
->rx_buf
.dma_address
;
455 current_rbd
->read
.hdr_addr
= 0;
458 current_rcb
= rx_data
->work_list
[lro_next
];
459 current_rbd
= &rx_data
->rbd_ring
[lro_next
];
466 * ixgbe_lro_get_start - get the start rcb index in one LRO packet
469 ixgbe_lro_get_start(ixgbe_rx_data_t
*rx_data
, uint32_t rx_next
)
473 uint32_t lro_num
= 1;
474 rx_control_block_t
*prev_rcb
;
475 rx_control_block_t
*current_rcb
= rx_data
->work_list
[rx_next
];
476 lro_prev
= current_rcb
->lro_prev
;
478 while (lro_prev
!= -1) {
480 prev_rcb
= rx_data
->work_list
[lro_prev
];
481 lro_start
= lro_prev
;
482 lro_prev
= prev_rcb
->lro_prev
;
484 rx_data
->lro_num
= lro_num
;
489 * ixgbe_lro_get_first - get the first LRO rcb index
492 ixgbe_lro_get_first(ixgbe_rx_data_t
*rx_data
, uint32_t rx_next
)
494 rx_control_block_t
*current_rcb
;
496 lro_first
= rx_data
->lro_first
;
497 current_rcb
= rx_data
->work_list
[lro_first
];
498 while ((!current_rcb
->lro_pkt
) && (lro_first
!= rx_next
)) {
499 lro_first
= NEXT_INDEX(lro_first
, 1, rx_data
->ring_size
);
500 current_rcb
= rx_data
->work_list
[lro_first
];
502 rx_data
->lro_first
= lro_first
;
507 * ixgbe_rx_assoc_hcksum - Check the rx hardware checksum status and associate
511 ixgbe_rx_assoc_hcksum(mblk_t
*mp
, uint32_t status_error
)
513 uint32_t hcksum_flags
= 0;
516 * Check TCP/UDP checksum
518 if ((status_error
& IXGBE_RXD_STAT_L4CS
) &&
519 !(status_error
& IXGBE_RXDADV_ERR_TCPE
))
520 hcksum_flags
|= HCK_FULLCKSUM_OK
;
525 if ((status_error
& IXGBE_RXD_STAT_IPCS
) &&
526 !(status_error
& IXGBE_RXDADV_ERR_IPE
))
527 hcksum_flags
|= HCK_IPV4_HDRCKSUM_OK
;
529 if (hcksum_flags
!= 0) {
530 mac_hcksum_set(mp
, 0, 0, 0, 0, hcksum_flags
);
535 * ixgbe_ring_rx - Receive the data of one ring.
537 * This function goes throught h/w descriptor in one specified rx ring,
538 * receives the data if the descriptor status shows the data is ready.
539 * It returns a chain of mblks containing the received data, to be
540 * passed up to mac_rx().
543 ixgbe_ring_rx(ixgbe_rx_ring_t
*rx_ring
, int poll_bytes
)
545 union ixgbe_adv_rx_desc
*current_rbd
;
546 rx_control_block_t
*current_rcb
;
553 uint32_t status_error
;
560 uint32_t received_bytes
;
561 ixgbe_t
*ixgbe
= rx_ring
->ixgbe
;
562 ixgbe_rx_data_t
*rx_data
;
564 if ((ixgbe
->ixgbe_state
& IXGBE_SUSPENDED
) ||
565 (ixgbe
->ixgbe_state
& IXGBE_ERROR
) ||
566 (ixgbe
->ixgbe_state
& IXGBE_OVERTEMP
) ||
567 !(ixgbe
->ixgbe_state
& IXGBE_STARTED
))
570 rx_data
= rx_ring
->rx_data
;
573 mblk_tail
= &mblk_head
;
576 * Sync the receive descriptors before accepting the packets
578 DMA_SYNC(&rx_data
->rbd_area
, DDI_DMA_SYNC_FORKERNEL
);
580 if (ixgbe_check_dma_handle(rx_data
->rbd_area
.dma_handle
) != DDI_FM_OK
) {
581 ddi_fm_service_impact(ixgbe
->dip
, DDI_SERVICE_DEGRADED
);
582 atomic_or_32(&ixgbe
->ixgbe_state
, IXGBE_ERROR
);
587 * Get the start point of rx bd ring which should be examined
590 rx_next
= rx_data
->rbd_next
;
591 current_rbd
= &rx_data
->rbd_ring
[rx_next
];
594 status_error
= current_rbd
->wb
.upper
.status_error
;
595 while (status_error
& IXGBE_RXD_STAT_DD
) {
597 * If adapter has found errors, but the error
598 * is hardware checksum error, this does not discard the
599 * packet: let upper layer compute the checksum;
600 * Otherwise discard the packet.
602 if ((status_error
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
) ||
603 ((!ixgbe
->lro_enable
) &&
604 (!(status_error
& IXGBE_RXD_STAT_EOP
)))) {
605 rx_ring
->stat_frame_error
++;
609 if ((status_error
& IXGBE_RXDADV_ERR_TCPE
) ||
610 (status_error
& IXGBE_RXDADV_ERR_IPE
))
611 rx_ring
->stat_cksum_error
++;
613 if (ixgbe
->lro_enable
) {
614 rsc_cnt
= (current_rbd
->wb
.lower
.lo_dword
.data
&
615 IXGBE_RXDADV_RSCCNT_MASK
) >>
616 IXGBE_RXDADV_RSCCNT_SHIFT
;
618 if (status_error
& IXGBE_RXD_STAT_EOP
) {
619 pkt_len
= current_rbd
->wb
.upper
.length
;
620 if (rx_data
->work_list
[rx_next
]->
623 ixgbe_lro_get_start(rx_data
,
625 ixgbe
->lro_pkt_count
++;
627 (rx_data
->lro_num
- 1) *
632 lro_next
= (status_error
&
633 IXGBE_RXDADV_NEXTP_MASK
) >>
634 IXGBE_RXDADV_NEXTP_SHIFT
;
635 rx_data
->work_list
[lro_next
]->lro_prev
637 rx_data
->work_list
[rx_next
]->lro_next
=
639 rx_data
->work_list
[rx_next
]->lro_pkt
=
645 pkt_len
= current_rbd
->wb
.upper
.length
;
648 pkt_len
= current_rbd
->wb
.upper
.length
;
652 if ((poll_bytes
!= IXGBE_POLL_NULL
) &&
653 ((received_bytes
+ pkt_len
) > poll_bytes
))
656 received_bytes
+= pkt_len
;
660 * For packets with length more than the copy threshold,
661 * we'll first try to use the existing DMA buffer to build
662 * an mblk and send the mblk upstream.
664 * If the first method fails, or the packet length is less
665 * than the copy threshold, we'll allocate a new mblk and
666 * copy the packet data to the new mblk.
669 mp
= ixgbe_lro_bind(rx_data
, lro_start
,
670 rx_data
->lro_num
, pkt_len
);
672 mp
= ixgbe_lro_copy(rx_data
, lro_start
,
673 rx_data
->lro_num
, pkt_len
);
675 rx_data
->lro_num
= 0;
678 if (pkt_len
> ixgbe
->rx_copy_thresh
)
679 mp
= ixgbe_rx_bind(rx_data
, rx_next
, pkt_len
);
682 mp
= ixgbe_rx_copy(rx_data
, rx_next
, pkt_len
);
686 * Check h/w checksum offload status
688 if (ixgbe
->rx_hcksum_enable
)
689 ixgbe_rx_assoc_hcksum(mp
, status_error
);
692 mblk_tail
= &mp
->b_next
;
697 * Reset rx descriptor read bits
699 current_rcb
= rx_data
->work_list
[rx_next
];
700 if (ixgbe
->lro_enable
) {
701 if (!current_rcb
->lro_pkt
) {
702 current_rbd
->read
.pkt_addr
=
703 current_rcb
->rx_buf
.dma_address
;
704 current_rbd
->read
.hdr_addr
= 0;
707 current_rbd
->read
.pkt_addr
=
708 current_rcb
->rx_buf
.dma_address
;
709 current_rbd
->read
.hdr_addr
= 0;
712 rx_next
= NEXT_INDEX(rx_next
, 1, rx_data
->ring_size
);
715 * The receive function is in interrupt context, so here
716 * rx_limit_per_intr is used to avoid doing receiving too long
719 if (++pkt_num
> ixgbe
->rx_limit_per_intr
) {
720 rx_ring
->stat_exceed_pkt
++;
724 current_rbd
= &rx_data
->rbd_ring
[rx_next
];
725 status_error
= current_rbd
->wb
.upper
.status_error
;
728 rx_ring
->stat_rbytes
+= received_bytes
;
729 rx_ring
->stat_ipackets
+= pkt_num
;
731 DMA_SYNC(&rx_data
->rbd_area
, DDI_DMA_SYNC_FORDEV
);
733 rx_data
->rbd_next
= rx_next
;
736 * Update the h/w tail accordingly
738 if (ixgbe
->lro_enable
) {
739 lro_first
= ixgbe_lro_get_first(rx_data
, rx_next
);
740 rx_tail
= PREV_INDEX(lro_first
, 1, rx_data
->ring_size
);
742 rx_tail
= PREV_INDEX(rx_next
, 1, rx_data
->ring_size
);
744 IXGBE_WRITE_REG(&ixgbe
->hw
, IXGBE_RDT(rx_ring
->hw_index
), rx_tail
);
746 if (ixgbe_check_acc_handle(ixgbe
->osdep
.reg_handle
) != DDI_FM_OK
) {
747 ddi_fm_service_impact(ixgbe
->dip
, DDI_SERVICE_DEGRADED
);
748 atomic_or_32(&ixgbe
->ixgbe_state
, IXGBE_ERROR
);
755 ixgbe_ring_rx_poll(void *arg
, int n_bytes
)
757 ixgbe_rx_ring_t
*rx_ring
= (ixgbe_rx_ring_t
*)arg
;
760 ASSERT(n_bytes
>= 0);
765 mutex_enter(&rx_ring
->rx_lock
);
766 mp
= ixgbe_ring_rx(rx_ring
, n_bytes
);
767 mutex_exit(&rx_ring
->rx_lock
);