2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * **********************************************************************
31 * This file contains some routines that take care of *
32 * memory allocation for descriptors and buffers. *
34 * **********************************************************************
37 #include "e1000g_sw.h"
38 #include "e1000g_debug.h"
40 #define TX_SW_PKT_AREA_SZ \
41 (sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
43 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t
*);
44 static int e1000g_alloc_rx_descriptors(e1000g_rx_data_t
*);
45 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t
*);
46 static void e1000g_free_rx_descriptors(e1000g_rx_data_t
*);
47 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t
*);
48 static int e1000g_alloc_rx_packets(e1000g_rx_data_t
*);
49 static void e1000g_free_tx_packets(e1000g_tx_ring_t
*);
50 static void e1000g_free_rx_packets(e1000g_rx_data_t
*, boolean_t
);
51 static int e1000g_alloc_dma_buffer(struct e1000g
*,
52 dma_buffer_t
*, size_t, ddi_dma_attr_t
*p_dma_attr
);
55 * In order to avoid address error crossing 64KB boundary
56 * during PCI-X packets receving, e1000g_alloc_dma_buffer_82546
57 * is used by some necessary adapter types.
59 static int e1000g_alloc_dma_buffer_82546(struct e1000g
*,
60 dma_buffer_t
*, size_t, ddi_dma_attr_t
*p_dma_attr
);
61 static int e1000g_dma_mem_alloc_82546(dma_buffer_t
*buf
,
62 size_t size
, size_t *len
);
63 static boolean_t
e1000g_cross_64k_bound(void *, uintptr_t);
65 static void e1000g_free_dma_buffer(dma_buffer_t
*);
67 static int e1000g_alloc_dvma_buffer(struct e1000g
*, dma_buffer_t
*, size_t);
68 static void e1000g_free_dvma_buffer(dma_buffer_t
*);
70 static int e1000g_alloc_descriptors(struct e1000g
*Adapter
);
71 static void e1000g_free_descriptors(struct e1000g
*Adapter
);
72 static int e1000g_alloc_packets(struct e1000g
*Adapter
);
73 static void e1000g_free_packets(struct e1000g
*Adapter
);
74 static p_rx_sw_packet_t
e1000g_alloc_rx_sw_packet(e1000g_rx_data_t
*,
75 ddi_dma_attr_t
*p_dma_attr
);
77 /* DMA access attributes for descriptors <Little Endian> */
78 static ddi_device_acc_attr_t e1000g_desc_acc_attr
= {
84 /* DMA access attributes for DMA buffers */
86 static ddi_device_acc_attr_t e1000g_buf_acc_attr
= {
92 static ddi_device_acc_attr_t e1000g_buf_acc_attr
= {
99 /* DMA attributes for tx mblk buffers */
100 static ddi_dma_attr_t e1000g_tx_dma_attr
= {
101 DMA_ATTR_V0
, /* version of this structure */
102 0, /* lowest usable address */
103 0xffffffffffffffffULL
, /* highest usable address */
104 0x7fffffff, /* maximum DMAable byte count */
105 1, /* alignment in bytes */
106 0x7ff, /* burst sizes (any?) */
107 1, /* minimum transfer */
108 0xffffffffU
, /* maximum transfer */
109 0xffffffffffffffffULL
, /* maximum segment length */
110 MAX_COOKIES
, /* maximum number of segments */
112 DDI_DMA_FLAGERR
, /* dma_attr_flags */
115 /* DMA attributes for pre-allocated rx/tx buffers */
116 static ddi_dma_attr_t e1000g_buf_dma_attr
= {
117 DMA_ATTR_V0
, /* version of this structure */
118 0, /* lowest usable address */
119 0xffffffffffffffffULL
, /* highest usable address */
120 0x7fffffff, /* maximum DMAable byte count */
121 1, /* alignment in bytes */
122 0x7ff, /* burst sizes (any?) */
123 1, /* minimum transfer */
124 0xffffffffU
, /* maximum transfer */
125 0xffffffffffffffffULL
, /* maximum segment length */
126 1, /* maximum number of segments */
128 DDI_DMA_FLAGERR
, /* dma_attr_flags */
131 /* DMA attributes for rx/tx descriptors */
132 static ddi_dma_attr_t e1000g_desc_dma_attr
= {
133 DMA_ATTR_V0
, /* version of this structure */
134 0, /* lowest usable address */
135 0xffffffffffffffffULL
, /* highest usable address */
136 0x7fffffff, /* maximum DMAable byte count */
137 E1000_MDALIGN
, /* default alignment is 4k but can be changed */
138 0x7ff, /* burst sizes (any?) */
139 1, /* minimum transfer */
140 0xffffffffU
, /* maximum transfer */
141 0xffffffffffffffffULL
, /* maximum segment length */
142 1, /* maximum number of segments */
144 DDI_DMA_FLAGERR
, /* dma_attr_flags */
148 static ddi_dma_lim_t e1000g_dma_limits
= {
149 (uint_t
)0, /* dlim_addr_lo */
150 (uint_t
)0xffffffff, /* dlim_addr_hi */
151 (uint_t
)0xffffffff, /* dlim_cntr_max */
152 (uint_t
)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
153 0x1, /* dlim_minxfer */
154 1024 /* dlim_speed */
159 static dma_type_t e1000g_dma_type
= USE_DVMA
;
161 static dma_type_t e1000g_dma_type
= USE_DMA
;
164 extern krwlock_t e1000g_dma_type_lock
;
168 e1000g_alloc_dma_resources(struct e1000g
*Adapter
)
172 result
= DDI_FAILURE
;
174 while ((result
!= DDI_SUCCESS
) &&
175 (Adapter
->tx_desc_num
>= MIN_NUM_TX_DESCRIPTOR
) &&
176 (Adapter
->rx_desc_num
>= MIN_NUM_RX_DESCRIPTOR
) &&
177 (Adapter
->tx_freelist_num
>= MIN_NUM_TX_FREELIST
)) {
179 result
= e1000g_alloc_descriptors(Adapter
);
181 if (result
== DDI_SUCCESS
) {
182 result
= e1000g_alloc_packets(Adapter
);
184 if (result
!= DDI_SUCCESS
)
185 e1000g_free_descriptors(Adapter
);
189 * If the allocation fails due to resource shortage,
190 * we'll reduce the numbers of descriptors/buffers by
191 * half, and try the allocation again.
193 if (result
!= DDI_SUCCESS
) {
195 * We must ensure the number of descriptors
196 * is always a multiple of 8.
198 Adapter
->tx_desc_num
=
199 (Adapter
->tx_desc_num
>> 4) << 3;
200 Adapter
->rx_desc_num
=
201 (Adapter
->rx_desc_num
>> 4) << 3;
203 Adapter
->tx_freelist_num
>>= 1;
211 * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
213 * This routine allocates neccesary DMA buffers for
214 * Transmit Descriptor Area
215 * Receive Descrpitor Area
218 e1000g_alloc_descriptors(struct e1000g
*Adapter
)
221 e1000g_tx_ring_t
*tx_ring
;
222 e1000g_rx_data_t
*rx_data
;
224 if (Adapter
->mem_workaround_82546
&&
225 ((Adapter
->shared
.mac
.type
== e1000_82545
) ||
226 (Adapter
->shared
.mac
.type
== e1000_82546
) ||
227 (Adapter
->shared
.mac
.type
== e1000_82546_rev_3
))) {
228 /* Align on a 64k boundary for these adapter types */
229 Adapter
->desc_align
= E1000_MDALIGN_82546
;
231 /* Align on a 4k boundary for all other adapter types */
232 Adapter
->desc_align
= E1000_MDALIGN
;
235 tx_ring
= Adapter
->tx_ring
;
237 result
= e1000g_alloc_tx_descriptors(tx_ring
);
238 if (result
!= DDI_SUCCESS
)
239 return (DDI_FAILURE
);
241 rx_data
= Adapter
->rx_ring
->rx_data
;
243 result
= e1000g_alloc_rx_descriptors(rx_data
);
244 if (result
!= DDI_SUCCESS
) {
245 e1000g_free_tx_descriptors(tx_ring
);
246 return (DDI_FAILURE
);
249 return (DDI_SUCCESS
);
253 e1000g_free_descriptors(struct e1000g
*Adapter
)
255 e1000g_tx_ring_t
*tx_ring
;
256 e1000g_rx_data_t
*rx_data
;
258 tx_ring
= Adapter
->tx_ring
;
259 rx_data
= Adapter
->rx_ring
->rx_data
;
261 e1000g_free_tx_descriptors(tx_ring
);
262 e1000g_free_rx_descriptors(rx_data
);
266 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t
*tx_ring
)
269 boolean_t alloc_flag
;
275 ddi_dma_cookie_t cookie
;
276 struct e1000g
*Adapter
;
277 ddi_dma_attr_t dma_attr
;
279 Adapter
= tx_ring
->adapter
;
280 devinfo
= Adapter
->dip
;
282 alloc_flag
= B_FALSE
;
283 dma_attr
= e1000g_desc_dma_attr
;
286 * Solaris 7 has a problem with allocating physically contiguous memory
287 * that is aligned on a 4K boundary. The transmit and rx descriptors
288 * need to aligned on a 4kbyte boundary. We first try to allocate the
289 * memory with DMA attributes set to 4K alignment and also no scatter/
290 * gather mechanism specified. In most cases, this does not allocate
291 * memory aligned at a 4Kbyte boundary. We then try asking for memory
292 * aligned on 4K boundary with scatter/gather set to 2. This works when
293 * the amount of memory is less than 4k i.e a page size. If neither of
294 * these options work or if the number of descriptors is greater than
295 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
296 * and then align the memory at a 4k boundary.
298 size
= sizeof (struct e1000_tx_desc
) * Adapter
->tx_desc_num
;
301 * Memory allocation for the transmit buffer descriptors.
303 dma_attr
.dma_attr_sgllen
= 1;
304 dma_attr
.dma_attr_align
= Adapter
->desc_align
;
307 * Allocate a new DMA handle for the transmit descriptor
310 mystat
= ddi_dma_alloc_handle(devinfo
, &dma_attr
,
312 &tx_ring
->tbd_dma_handle
);
314 if (mystat
!= DDI_SUCCESS
) {
315 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
316 "Could not allocate tbd dma handle: %d", mystat
);
317 tx_ring
->tbd_dma_handle
= NULL
;
318 return (DDI_FAILURE
);
322 * Allocate memory to DMA data to and from the transmit
325 mystat
= ddi_dma_mem_alloc(tx_ring
->tbd_dma_handle
,
327 &e1000g_desc_acc_attr
, DDI_DMA_CONSISTENT
,
329 (caddr_t
*)&tx_ring
->tbd_area
,
330 &len
, &tx_ring
->tbd_acc_handle
);
332 if ((mystat
!= DDI_SUCCESS
) ||
333 ((uintptr_t)tx_ring
->tbd_area
& (Adapter
->desc_align
- 1))) {
334 if (mystat
== DDI_SUCCESS
) {
335 ddi_dma_mem_free(&tx_ring
->tbd_acc_handle
);
336 tx_ring
->tbd_acc_handle
= NULL
;
337 tx_ring
->tbd_area
= NULL
;
339 if (tx_ring
->tbd_dma_handle
!= NULL
) {
340 ddi_dma_free_handle(&tx_ring
->tbd_dma_handle
);
341 tx_ring
->tbd_dma_handle
= NULL
;
343 alloc_flag
= B_FALSE
;
348 * Initialize the entire transmit buffer descriptor area to zero
351 bzero(tx_ring
->tbd_area
, len
);
354 * If the previous DMA attributes setting could not give us contiguous
355 * memory or the number of descriptors is greater than the page size,
356 * we allocate extra memory and then align it at appropriate boundary.
359 size
= size
+ Adapter
->desc_align
;
362 * DMA attributes set to no scatter/gather and 16 bit alignment
364 dma_attr
.dma_attr_align
= 1;
365 dma_attr
.dma_attr_sgllen
= 1;
368 * Allocate a new DMA handle for the transmit descriptor memory
371 mystat
= ddi_dma_alloc_handle(devinfo
, &dma_attr
,
373 &tx_ring
->tbd_dma_handle
);
375 if (mystat
!= DDI_SUCCESS
) {
376 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
377 "Could not re-allocate tbd dma handle: %d", mystat
);
378 tx_ring
->tbd_dma_handle
= NULL
;
379 return (DDI_FAILURE
);
383 * Allocate memory to DMA data to and from the transmit
386 mystat
= ddi_dma_mem_alloc(tx_ring
->tbd_dma_handle
,
388 &e1000g_desc_acc_attr
, DDI_DMA_CONSISTENT
,
390 (caddr_t
*)&tx_ring
->tbd_area
,
391 &len
, &tx_ring
->tbd_acc_handle
);
393 if (mystat
!= DDI_SUCCESS
) {
394 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
395 "Could not allocate tbd dma memory: %d", mystat
);
396 tx_ring
->tbd_acc_handle
= NULL
;
397 tx_ring
->tbd_area
= NULL
;
398 if (tx_ring
->tbd_dma_handle
!= NULL
) {
399 ddi_dma_free_handle(&tx_ring
->tbd_dma_handle
);
400 tx_ring
->tbd_dma_handle
= NULL
;
402 return (DDI_FAILURE
);
407 * Initialize the entire transmit buffer descriptor area to zero
409 bzero(tx_ring
->tbd_area
, len
);
411 * Memory has been allocated with the ddi_dma_mem_alloc call,
412 * but has not been aligned.
413 * We now align it on the appropriate boundary.
415 templong
= P2NPHASE((uintptr_t)tx_ring
->tbd_area
,
416 Adapter
->desc_align
);
417 len
= size
- templong
;
418 templong
+= (uintptr_t)tx_ring
->tbd_area
;
419 tx_ring
->tbd_area
= (struct e1000_tx_desc
*)templong
;
420 } /* alignment workaround */
423 * Transmit buffer descriptor memory allocation succeeded
428 * Allocates DMA resources for the memory that was allocated by
429 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
432 mystat
= ddi_dma_addr_bind_handle(tx_ring
->tbd_dma_handle
,
433 (struct as
*)NULL
, (caddr_t
)tx_ring
->tbd_area
,
434 len
, DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
435 DDI_DMA_DONTWAIT
, 0, &cookie
, &cookie_count
);
437 if (mystat
!= DDI_SUCCESS
) {
438 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
439 "Could not bind tbd dma resource: %d", mystat
);
440 if (tx_ring
->tbd_acc_handle
!= NULL
) {
441 ddi_dma_mem_free(&tx_ring
->tbd_acc_handle
);
442 tx_ring
->tbd_acc_handle
= NULL
;
443 tx_ring
->tbd_area
= NULL
;
445 if (tx_ring
->tbd_dma_handle
!= NULL
) {
446 ddi_dma_free_handle(&tx_ring
->tbd_dma_handle
);
447 tx_ring
->tbd_dma_handle
= NULL
;
449 return (DDI_FAILURE
);
452 ASSERT(cookie_count
== 1); /* 1 cookie */
454 if (cookie_count
!= 1) {
455 E1000G_DEBUGLOG_2(Adapter
, E1000G_WARN_LEVEL
,
456 "Could not bind tbd dma resource in a single frag. "
457 "Count - %d Len - %d", cookie_count
, len
);
458 e1000g_free_tx_descriptors(tx_ring
);
459 return (DDI_FAILURE
);
462 tx_ring
->tbd_dma_addr
= cookie
.dmac_laddress
;
463 tx_ring
->tbd_first
= tx_ring
->tbd_area
;
464 tx_ring
->tbd_last
= tx_ring
->tbd_first
+
465 (Adapter
->tx_desc_num
- 1);
467 return (DDI_SUCCESS
);
471 e1000g_alloc_rx_descriptors(e1000g_rx_data_t
*rx_data
)
474 boolean_t alloc_flag
;
480 ddi_dma_cookie_t cookie
;
481 struct e1000g
*Adapter
;
482 ddi_dma_attr_t dma_attr
;
484 Adapter
= rx_data
->rx_ring
->adapter
;
485 devinfo
= Adapter
->dip
;
487 alloc_flag
= B_FALSE
;
488 dma_attr
= e1000g_desc_dma_attr
;
491 * Memory allocation for the receive buffer descriptors.
493 size
= (sizeof (struct e1000_rx_desc
)) * Adapter
->rx_desc_num
;
496 * Asking for aligned memory with DMA attributes set for suitable value
498 dma_attr
.dma_attr_sgllen
= 1;
499 dma_attr
.dma_attr_align
= Adapter
->desc_align
;
502 * Allocate a new DMA handle for the receive descriptors
504 mystat
= ddi_dma_alloc_handle(devinfo
, &dma_attr
,
506 &rx_data
->rbd_dma_handle
);
508 if (mystat
!= DDI_SUCCESS
) {
509 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
510 "Could not allocate rbd dma handle: %d", mystat
);
511 rx_data
->rbd_dma_handle
= NULL
;
512 return (DDI_FAILURE
);
515 * Allocate memory to DMA data to and from the receive
518 mystat
= ddi_dma_mem_alloc(rx_data
->rbd_dma_handle
,
520 &e1000g_desc_acc_attr
, DDI_DMA_CONSISTENT
,
522 (caddr_t
*)&rx_data
->rbd_area
,
523 &len
, &rx_data
->rbd_acc_handle
);
526 * Check if memory allocation succeeded and also if the
527 * allocated memory is aligned correctly.
529 if ((mystat
!= DDI_SUCCESS
) ||
530 ((uintptr_t)rx_data
->rbd_area
& (Adapter
->desc_align
- 1))) {
531 if (mystat
== DDI_SUCCESS
) {
532 ddi_dma_mem_free(&rx_data
->rbd_acc_handle
);
533 rx_data
->rbd_acc_handle
= NULL
;
534 rx_data
->rbd_area
= NULL
;
536 if (rx_data
->rbd_dma_handle
!= NULL
) {
537 ddi_dma_free_handle(&rx_data
->rbd_dma_handle
);
538 rx_data
->rbd_dma_handle
= NULL
;
540 alloc_flag
= B_FALSE
;
545 * Initialize the allocated receive descriptor memory to zero.
548 bzero((caddr_t
)rx_data
->rbd_area
, len
);
551 * If memory allocation did not succeed, do the alignment ourselves
554 dma_attr
.dma_attr_align
= 1;
555 dma_attr
.dma_attr_sgllen
= 1;
556 size
= size
+ Adapter
->desc_align
;
558 * Allocate a new DMA handle for the receive descriptor.
560 mystat
= ddi_dma_alloc_handle(devinfo
, &dma_attr
,
562 &rx_data
->rbd_dma_handle
);
564 if (mystat
!= DDI_SUCCESS
) {
565 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
566 "Could not re-allocate rbd dma handle: %d", mystat
);
567 rx_data
->rbd_dma_handle
= NULL
;
568 return (DDI_FAILURE
);
571 * Allocate memory to DMA data to and from the receive
574 mystat
= ddi_dma_mem_alloc(rx_data
->rbd_dma_handle
,
576 &e1000g_desc_acc_attr
, DDI_DMA_CONSISTENT
,
578 (caddr_t
*)&rx_data
->rbd_area
,
579 &len
, &rx_data
->rbd_acc_handle
);
581 if (mystat
!= DDI_SUCCESS
) {
582 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
583 "Could not allocate rbd dma memory: %d", mystat
);
584 rx_data
->rbd_acc_handle
= NULL
;
585 rx_data
->rbd_area
= NULL
;
586 if (rx_data
->rbd_dma_handle
!= NULL
) {
587 ddi_dma_free_handle(&rx_data
->rbd_dma_handle
);
588 rx_data
->rbd_dma_handle
= NULL
;
590 return (DDI_FAILURE
);
595 * Initialize the allocated receive descriptor memory to zero.
597 bzero((caddr_t
)rx_data
->rbd_area
, len
);
598 templong
= P2NPHASE((uintptr_t)rx_data
->rbd_area
,
599 Adapter
->desc_align
);
600 len
= size
- templong
;
601 templong
+= (uintptr_t)rx_data
->rbd_area
;
602 rx_data
->rbd_area
= (struct e1000_rx_desc
*)templong
;
603 } /* alignment workaround */
606 * The memory allocation of the receive descriptors succeeded
611 * Allocates DMA resources for the memory that was allocated by
612 * the ddi_dma_mem_alloc call.
614 mystat
= ddi_dma_addr_bind_handle(rx_data
->rbd_dma_handle
,
615 (struct as
*)NULL
, (caddr_t
)rx_data
->rbd_area
,
616 len
, DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
617 DDI_DMA_DONTWAIT
, 0, &cookie
, &cookie_count
);
619 if (mystat
!= DDI_SUCCESS
) {
620 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
621 "Could not bind rbd dma resource: %d", mystat
);
622 if (rx_data
->rbd_acc_handle
!= NULL
) {
623 ddi_dma_mem_free(&rx_data
->rbd_acc_handle
);
624 rx_data
->rbd_acc_handle
= NULL
;
625 rx_data
->rbd_area
= NULL
;
627 if (rx_data
->rbd_dma_handle
!= NULL
) {
628 ddi_dma_free_handle(&rx_data
->rbd_dma_handle
);
629 rx_data
->rbd_dma_handle
= NULL
;
631 return (DDI_FAILURE
);
634 ASSERT(cookie_count
== 1);
635 if (cookie_count
!= 1) {
636 E1000G_DEBUGLOG_2(Adapter
, E1000G_WARN_LEVEL
,
637 "Could not bind rbd dma resource in a single frag. "
638 "Count - %d Len - %d", cookie_count
, len
);
639 e1000g_free_rx_descriptors(rx_data
);
640 return (DDI_FAILURE
);
643 rx_data
->rbd_dma_addr
= cookie
.dmac_laddress
;
644 rx_data
->rbd_first
= rx_data
->rbd_area
;
645 rx_data
->rbd_last
= rx_data
->rbd_first
+
646 (Adapter
->rx_desc_num
- 1);
648 return (DDI_SUCCESS
);
652 e1000g_free_rx_descriptors(e1000g_rx_data_t
*rx_data
)
654 if (rx_data
->rbd_dma_handle
!= NULL
) {
655 (void) ddi_dma_unbind_handle(rx_data
->rbd_dma_handle
);
657 if (rx_data
->rbd_acc_handle
!= NULL
) {
658 ddi_dma_mem_free(&rx_data
->rbd_acc_handle
);
659 rx_data
->rbd_acc_handle
= NULL
;
660 rx_data
->rbd_area
= NULL
;
662 if (rx_data
->rbd_dma_handle
!= NULL
) {
663 ddi_dma_free_handle(&rx_data
->rbd_dma_handle
);
664 rx_data
->rbd_dma_handle
= NULL
;
666 rx_data
->rbd_dma_addr
= NULL
;
667 rx_data
->rbd_first
= NULL
;
668 rx_data
->rbd_last
= NULL
;
672 e1000g_free_tx_descriptors(e1000g_tx_ring_t
*tx_ring
)
674 if (tx_ring
->tbd_dma_handle
!= NULL
) {
675 (void) ddi_dma_unbind_handle(tx_ring
->tbd_dma_handle
);
677 if (tx_ring
->tbd_acc_handle
!= NULL
) {
678 ddi_dma_mem_free(&tx_ring
->tbd_acc_handle
);
679 tx_ring
->tbd_acc_handle
= NULL
;
680 tx_ring
->tbd_area
= NULL
;
682 if (tx_ring
->tbd_dma_handle
!= NULL
) {
683 ddi_dma_free_handle(&tx_ring
->tbd_dma_handle
);
684 tx_ring
->tbd_dma_handle
= NULL
;
686 tx_ring
->tbd_dma_addr
= NULL
;
687 tx_ring
->tbd_first
= NULL
;
688 tx_ring
->tbd_last
= NULL
;
693 * e1000g_alloc_packets - allocate DMA buffers for rx/tx
695 * This routine allocates neccesary buffers for
696 * Transmit sw packet structure
697 * DMA handle for Transmit
698 * DMA buffer for Transmit
699 * Receive sw packet structure
700 * DMA buffer for Receive
703 e1000g_alloc_packets(struct e1000g
*Adapter
)
706 e1000g_tx_ring_t
*tx_ring
;
707 e1000g_rx_data_t
*rx_data
;
709 tx_ring
= Adapter
->tx_ring
;
710 rx_data
= Adapter
->rx_ring
->rx_data
;
713 rw_enter(&e1000g_dma_type_lock
, RW_READER
);
715 result
= e1000g_alloc_tx_packets(tx_ring
);
716 if (result
!= DDI_SUCCESS
) {
717 if (e1000g_dma_type
== USE_DVMA
) {
718 rw_exit(&e1000g_dma_type_lock
);
720 rw_enter(&e1000g_dma_type_lock
, RW_WRITER
);
721 e1000g_dma_type
= USE_DMA
;
722 rw_exit(&e1000g_dma_type_lock
);
724 E1000G_DEBUGLOG_0(Adapter
, E1000G_INFO_LEVEL
,
725 "No enough dvma resource for Tx packets, "
726 "trying to allocate dma buffers...\n");
729 rw_exit(&e1000g_dma_type_lock
);
731 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
732 "Failed to allocate dma buffers for Tx packets\n");
733 return (DDI_FAILURE
);
736 result
= e1000g_alloc_rx_packets(rx_data
);
737 if (result
!= DDI_SUCCESS
) {
738 e1000g_free_tx_packets(tx_ring
);
739 if (e1000g_dma_type
== USE_DVMA
) {
740 rw_exit(&e1000g_dma_type_lock
);
742 rw_enter(&e1000g_dma_type_lock
, RW_WRITER
);
743 e1000g_dma_type
= USE_DMA
;
744 rw_exit(&e1000g_dma_type_lock
);
746 E1000G_DEBUGLOG_0(Adapter
, E1000G_INFO_LEVEL
,
747 "No enough dvma resource for Rx packets, "
748 "trying to allocate dma buffers...\n");
751 rw_exit(&e1000g_dma_type_lock
);
753 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
754 "Failed to allocate dma buffers for Rx packets\n");
755 return (DDI_FAILURE
);
758 rw_exit(&e1000g_dma_type_lock
);
760 return (DDI_SUCCESS
);
764 e1000g_free_packets(struct e1000g
*Adapter
)
766 e1000g_tx_ring_t
*tx_ring
;
767 e1000g_rx_data_t
*rx_data
;
769 tx_ring
= Adapter
->tx_ring
;
770 rx_data
= Adapter
->rx_ring
->rx_data
;
772 e1000g_free_tx_packets(tx_ring
);
773 e1000g_free_rx_packets(rx_data
, B_FALSE
);
778 e1000g_alloc_dvma_buffer(struct e1000g
*Adapter
,
779 dma_buffer_t
*buf
, size_t size
)
783 ddi_dma_cookie_t cookie
;
785 if (e1000g_force_detach
)
786 devinfo
= Adapter
->priv_dip
;
788 devinfo
= Adapter
->dip
;
790 mystat
= dvma_reserve(devinfo
,
792 Adapter
->dvma_page_num
,
795 if (mystat
!= DDI_SUCCESS
) {
796 buf
->dma_handle
= NULL
;
797 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
798 "Could not allocate dvma buffer handle: %d\n", mystat
);
799 return (DDI_FAILURE
);
802 buf
->address
= kmem_alloc(size
, KM_NOSLEEP
);
804 if (buf
->address
== NULL
) {
805 if (buf
->dma_handle
!= NULL
) {
806 dvma_release(buf
->dma_handle
);
807 buf
->dma_handle
= NULL
;
809 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
810 "Could not allocate dvma buffer memory\n");
811 return (DDI_FAILURE
);
814 dvma_kaddr_load(buf
->dma_handle
,
815 buf
->address
, size
, 0, &cookie
);
817 buf
->dma_address
= cookie
.dmac_laddress
;
821 return (DDI_SUCCESS
);
825 e1000g_free_dvma_buffer(dma_buffer_t
*buf
)
827 if (buf
->dma_handle
!= NULL
) {
828 dvma_unload(buf
->dma_handle
, 0, -1);
833 buf
->dma_address
= NULL
;
835 if (buf
->address
!= NULL
) {
836 kmem_free(buf
->address
, buf
->size
);
840 if (buf
->dma_handle
!= NULL
) {
841 dvma_release(buf
->dma_handle
);
842 buf
->dma_handle
= NULL
;
851 e1000g_alloc_dma_buffer(struct e1000g
*Adapter
,
852 dma_buffer_t
*buf
, size_t size
, ddi_dma_attr_t
*p_dma_attr
)
856 ddi_dma_cookie_t cookie
;
860 if (e1000g_force_detach
)
861 devinfo
= Adapter
->priv_dip
;
863 devinfo
= Adapter
->dip
;
865 mystat
= ddi_dma_alloc_handle(devinfo
,
870 if (mystat
!= DDI_SUCCESS
) {
871 buf
->dma_handle
= NULL
;
872 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
873 "Could not allocate dma buffer handle: %d\n", mystat
);
874 return (DDI_FAILURE
);
877 mystat
= ddi_dma_mem_alloc(buf
->dma_handle
,
878 size
, &e1000g_buf_acc_attr
, DDI_DMA_STREAMING
,
881 &len
, &buf
->acc_handle
);
883 if (mystat
!= DDI_SUCCESS
) {
884 buf
->acc_handle
= NULL
;
886 if (buf
->dma_handle
!= NULL
) {
887 ddi_dma_free_handle(&buf
->dma_handle
);
888 buf
->dma_handle
= NULL
;
890 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
891 "Could not allocate dma buffer memory: %d\n", mystat
);
892 return (DDI_FAILURE
);
895 mystat
= ddi_dma_addr_bind_handle(buf
->dma_handle
,
898 len
, DDI_DMA_RDWR
| DDI_DMA_STREAMING
,
899 DDI_DMA_DONTWAIT
, 0, &cookie
, &count
);
901 if (mystat
!= DDI_SUCCESS
) {
902 if (buf
->acc_handle
!= NULL
) {
903 ddi_dma_mem_free(&buf
->acc_handle
);
904 buf
->acc_handle
= NULL
;
907 if (buf
->dma_handle
!= NULL
) {
908 ddi_dma_free_handle(&buf
->dma_handle
);
909 buf
->dma_handle
= NULL
;
911 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
912 "Could not bind buffer dma handle: %d\n", mystat
);
913 return (DDI_FAILURE
);
918 if (buf
->dma_handle
!= NULL
) {
919 (void) ddi_dma_unbind_handle(buf
->dma_handle
);
921 if (buf
->acc_handle
!= NULL
) {
922 ddi_dma_mem_free(&buf
->acc_handle
);
923 buf
->acc_handle
= NULL
;
926 if (buf
->dma_handle
!= NULL
) {
927 ddi_dma_free_handle(&buf
->dma_handle
);
928 buf
->dma_handle
= NULL
;
930 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
931 "Could not bind buffer as a single frag. "
932 "Count = %d\n", count
);
933 return (DDI_FAILURE
);
936 buf
->dma_address
= cookie
.dmac_laddress
;
940 return (DDI_SUCCESS
);
944 * e1000g_alloc_dma_buffer_82546 - allocate a dma buffer along with all
945 * necessary handles. Same as e1000g_alloc_dma_buffer() except ensure
946 * that buffer that doesn't cross a 64k boundary.
949 e1000g_alloc_dma_buffer_82546(struct e1000g
*Adapter
,
950 dma_buffer_t
*buf
, size_t size
, ddi_dma_attr_t
*p_dma_attr
)
954 ddi_dma_cookie_t cookie
;
958 if (e1000g_force_detach
)
959 devinfo
= Adapter
->priv_dip
;
961 devinfo
= Adapter
->dip
;
963 mystat
= ddi_dma_alloc_handle(devinfo
,
968 if (mystat
!= DDI_SUCCESS
) {
969 buf
->dma_handle
= NULL
;
970 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
971 "Could not allocate dma buffer handle: %d\n", mystat
);
972 return (DDI_FAILURE
);
975 mystat
= e1000g_dma_mem_alloc_82546(buf
, size
, &len
);
976 if (mystat
!= DDI_SUCCESS
) {
977 buf
->acc_handle
= NULL
;
979 if (buf
->dma_handle
!= NULL
) {
980 ddi_dma_free_handle(&buf
->dma_handle
);
981 buf
->dma_handle
= NULL
;
983 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
984 "Could not allocate dma buffer memory: %d\n", mystat
);
985 return (DDI_FAILURE
);
988 mystat
= ddi_dma_addr_bind_handle(buf
->dma_handle
,
991 len
, DDI_DMA_READ
| DDI_DMA_STREAMING
,
992 DDI_DMA_DONTWAIT
, 0, &cookie
, &count
);
994 if (mystat
!= DDI_SUCCESS
) {
995 if (buf
->acc_handle
!= NULL
) {
996 ddi_dma_mem_free(&buf
->acc_handle
);
997 buf
->acc_handle
= NULL
;
1000 if (buf
->dma_handle
!= NULL
) {
1001 ddi_dma_free_handle(&buf
->dma_handle
);
1002 buf
->dma_handle
= NULL
;
1004 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
1005 "Could not bind buffer dma handle: %d\n", mystat
);
1006 return (DDI_FAILURE
);
1011 if (buf
->dma_handle
!= NULL
) {
1012 (void) ddi_dma_unbind_handle(buf
->dma_handle
);
1014 if (buf
->acc_handle
!= NULL
) {
1015 ddi_dma_mem_free(&buf
->acc_handle
);
1016 buf
->acc_handle
= NULL
;
1017 buf
->address
= NULL
;
1019 if (buf
->dma_handle
!= NULL
) {
1020 ddi_dma_free_handle(&buf
->dma_handle
);
1021 buf
->dma_handle
= NULL
;
1023 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
1024 "Could not bind buffer as a single frag. "
1025 "Count = %d\n", count
);
1026 return (DDI_FAILURE
);
1029 buf
->dma_address
= cookie
.dmac_laddress
;
1033 return (DDI_SUCCESS
);
1037 * e1000g_dma_mem_alloc_82546 - allocate a dma buffer, making up to
1038 * ALLOC_RETRY attempts to get a buffer that doesn't cross a 64k boundary.
1041 e1000g_dma_mem_alloc_82546(dma_buffer_t
*buf
, size_t size
, size_t *len
)
1043 #define ALLOC_RETRY 10
1046 ddi_acc_handle_t hold
[ALLOC_RETRY
];
1048 while (cnt
< ALLOC_RETRY
) {
1051 /* allocate memory */
1052 stat
= ddi_dma_mem_alloc(buf
->dma_handle
, size
,
1053 &e1000g_buf_acc_attr
, DDI_DMA_STREAMING
, DDI_DMA_DONTWAIT
,
1054 0, &buf
->address
, len
, &buf
->acc_handle
);
1056 if (stat
!= DDI_SUCCESS
) {
1061 * Check 64k bounday:
1062 * if it is bad, hold it and retry
1063 * if it is good, exit loop
1065 if (e1000g_cross_64k_bound(buf
->address
, *len
)) {
1066 hold
[cnt
] = buf
->acc_handle
;
1075 /* Release any held buffers crossing 64k bounday */
1076 for (--cnt
; cnt
>= 0; cnt
--) {
1078 ddi_dma_mem_free(&hold
[cnt
]);
1085 * e1000g_cross_64k_bound - If starting and ending address cross a 64k boundary
1086 * return true; otherwise return false
1089 e1000g_cross_64k_bound(void *addr
, uintptr_t len
)
1091 uintptr_t start
= (uintptr_t)addr
;
1092 uintptr_t end
= start
+ len
- 1;
1094 return (((start
^ end
) >> 16) == 0 ? B_FALSE
: B_TRUE
);
1098 e1000g_free_dma_buffer(dma_buffer_t
*buf
)
1100 if (buf
->dma_handle
!= NULL
) {
1101 (void) ddi_dma_unbind_handle(buf
->dma_handle
);
1106 buf
->dma_address
= NULL
;
1108 if (buf
->acc_handle
!= NULL
) {
1109 ddi_dma_mem_free(&buf
->acc_handle
);
1110 buf
->acc_handle
= NULL
;
1111 buf
->address
= NULL
;
1114 if (buf
->dma_handle
!= NULL
) {
1115 ddi_dma_free_handle(&buf
->dma_handle
);
1116 buf
->dma_handle
= NULL
;
1124 e1000g_alloc_tx_packets(e1000g_tx_ring_t
*tx_ring
)
1127 p_tx_sw_packet_t packet
;
1129 dma_buffer_t
*tx_buf
;
1130 struct e1000g
*Adapter
;
1131 dev_info_t
*devinfo
;
1132 ddi_dma_attr_t dma_attr
;
1134 Adapter
= tx_ring
->adapter
;
1135 devinfo
= Adapter
->dip
;
1136 dma_attr
= e1000g_buf_dma_attr
;
1139 * Memory allocation for the Transmit software structure, the transmit
1140 * software packet. This structure stores all the relevant information
1141 * for transmitting a single packet.
1143 tx_ring
->packet_area
=
1144 kmem_zalloc(TX_SW_PKT_AREA_SZ
, KM_NOSLEEP
);
1146 if (tx_ring
->packet_area
== NULL
)
1147 return (DDI_FAILURE
);
1149 for (j
= 0, packet
= tx_ring
->packet_area
;
1150 j
< Adapter
->tx_freelist_num
; j
++, packet
++) {
1152 ASSERT(packet
!= NULL
);
1155 * Pre-allocate dma handles for transmit. These dma handles
1156 * will be dynamically bound to the data buffers passed down
1157 * from the upper layers at the time of transmitting. The
1158 * dynamic binding only applies for the packets that are larger
1159 * than the tx_bcopy_thresh.
1161 switch (e1000g_dma_type
) {
1164 mystat
= dvma_reserve(devinfo
,
1166 Adapter
->dvma_page_num
,
1167 &packet
->tx_dma_handle
);
1171 mystat
= ddi_dma_alloc_handle(devinfo
,
1172 &e1000g_tx_dma_attr
,
1173 DDI_DMA_DONTWAIT
, 0,
1174 &packet
->tx_dma_handle
);
1180 if (mystat
!= DDI_SUCCESS
) {
1181 packet
->tx_dma_handle
= NULL
;
1182 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
1183 "Could not allocate tx dma handle: %d\n", mystat
);
1188 * Pre-allocate transmit buffers for small packets that the
1189 * size is less than tx_bcopy_thresh. The data of those small
1190 * packets will be bcopy() to the transmit buffers instead of
1191 * using dynamical DMA binding. For small packets, bcopy will
1192 * bring better performance than DMA binding.
1194 tx_buf
= packet
->tx_buf
;
1196 switch (e1000g_dma_type
) {
1199 mystat
= e1000g_alloc_dvma_buffer(Adapter
,
1200 tx_buf
, Adapter
->tx_buffer_size
);
1204 mystat
= e1000g_alloc_dma_buffer(Adapter
,
1205 tx_buf
, Adapter
->tx_buffer_size
, &dma_attr
);
1211 if (mystat
!= DDI_SUCCESS
) {
1212 ASSERT(packet
->tx_dma_handle
!= NULL
);
1213 switch (e1000g_dma_type
) {
1216 dvma_release(packet
->tx_dma_handle
);
1220 ddi_dma_free_handle(&packet
->tx_dma_handle
);
1226 packet
->tx_dma_handle
= NULL
;
1227 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
1228 "Allocate Tx buffer fail\n");
1232 packet
->dma_type
= e1000g_dma_type
;
1235 return (DDI_SUCCESS
);
1238 e1000g_free_tx_packets(tx_ring
);
1240 return (DDI_FAILURE
);
1245 e1000g_increase_rx_packets(e1000g_rx_data_t
*rx_data
)
1248 p_rx_sw_packet_t packet
;
1249 p_rx_sw_packet_t cur
, next
;
1250 struct e1000g
*Adapter
;
1251 ddi_dma_attr_t dma_attr
;
1253 Adapter
= rx_data
->rx_ring
->adapter
;
1254 dma_attr
= e1000g_buf_dma_attr
;
1255 dma_attr
.dma_attr_align
= Adapter
->rx_buf_align
;
1258 for (i
= 0; i
< RX_FREELIST_INCREASE_SIZE
; i
++) {
1259 packet
= e1000g_alloc_rx_sw_packet(rx_data
, &dma_attr
);
1265 Adapter
->rx_freelist_num
+= i
;
1266 rx_data
->avail_freepkt
+= i
;
1268 while (cur
!= NULL
) {
1269 QUEUE_PUSH_TAIL(&rx_data
->free_list
, &cur
->Link
);
1271 cur
->next
= rx_data
->packet_area
;
1272 rx_data
->packet_area
= cur
;
1277 return (DDI_SUCCESS
);
1282 e1000g_alloc_rx_packets(e1000g_rx_data_t
*rx_data
)
1285 p_rx_sw_packet_t packet
;
1286 struct e1000g
*Adapter
;
1287 uint32_t packet_num
;
1288 ddi_dma_attr_t dma_attr
;
1290 Adapter
= rx_data
->rx_ring
->adapter
;
1291 dma_attr
= e1000g_buf_dma_attr
;
1292 dma_attr
.dma_attr_align
= Adapter
->rx_buf_align
;
1295 * Allocate memory for the rx_sw_packet structures. Each one of these
1296 * structures will contain a virtual and physical address to an actual
1297 * receive buffer in host memory. Since we use one rx_sw_packet per
1298 * received packet, the maximum number of rx_sw_packet that we'll
1299 * need is equal to the number of receive descriptors plus the freelist
1302 packet_num
= Adapter
->rx_desc_num
+ RX_FREELIST_INCREASE_SIZE
;
1303 rx_data
->packet_area
= NULL
;
1305 for (i
= 0; i
< packet_num
; i
++) {
1306 packet
= e1000g_alloc_rx_sw_packet(rx_data
, &dma_attr
);
1310 packet
->next
= rx_data
->packet_area
;
1311 rx_data
->packet_area
= packet
;
1314 Adapter
->rx_freelist_num
= RX_FREELIST_INCREASE_SIZE
;
1315 return (DDI_SUCCESS
);
1318 e1000g_free_rx_packets(rx_data
, B_TRUE
);
1319 return (DDI_FAILURE
);
1323 static p_rx_sw_packet_t
1324 e1000g_alloc_rx_sw_packet(e1000g_rx_data_t
*rx_data
, ddi_dma_attr_t
*p_dma_attr
)
1327 p_rx_sw_packet_t packet
;
1328 dma_buffer_t
*rx_buf
;
1329 struct e1000g
*Adapter
;
1331 Adapter
= rx_data
->rx_ring
->adapter
;
1333 packet
= kmem_zalloc(sizeof (rx_sw_packet_t
), KM_NOSLEEP
);
1334 if (packet
== NULL
) {
1335 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
1336 "Cound not allocate memory for Rx SwPacket\n");
1340 rx_buf
= packet
->rx_buf
;
1342 switch (e1000g_dma_type
) {
1345 mystat
= e1000g_alloc_dvma_buffer(Adapter
,
1346 rx_buf
, Adapter
->rx_buffer_size
);
1350 if (Adapter
->mem_workaround_82546
&&
1351 ((Adapter
->shared
.mac
.type
== e1000_82545
) ||
1352 (Adapter
->shared
.mac
.type
== e1000_82546
) ||
1353 (Adapter
->shared
.mac
.type
== e1000_82546_rev_3
))) {
1354 mystat
= e1000g_alloc_dma_buffer_82546(Adapter
,
1355 rx_buf
, Adapter
->rx_buffer_size
, p_dma_attr
);
1357 mystat
= e1000g_alloc_dma_buffer(Adapter
,
1358 rx_buf
, Adapter
->rx_buffer_size
, p_dma_attr
);
1366 if (mystat
!= DDI_SUCCESS
) {
1368 kmem_free(packet
, sizeof (rx_sw_packet_t
));
1370 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
1371 "Failed to allocate Rx buffer\n");
1375 rx_buf
->size
-= E1000G_IPALIGNROOM
;
1376 rx_buf
->address
+= E1000G_IPALIGNROOM
;
1377 rx_buf
->dma_address
+= E1000G_IPALIGNROOM
;
1379 packet
->rx_data
= (caddr_t
)rx_data
;
1380 packet
->free_rtn
.free_func
= e1000g_rxfree_func
;
1381 packet
->free_rtn
.free_arg
= (char *)packet
;
1383 * esballoc is changed to desballoc which
1384 * is undocumented call but as per sun,
1385 * we can use it. It gives better efficiency.
1387 packet
->mp
= desballoc((unsigned char *)
1390 BPRI_MED
, &packet
->free_rtn
);
1392 packet
->dma_type
= e1000g_dma_type
;
1393 packet
->ref_cnt
= 1;
1399 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet
, boolean_t full_release
)
1401 dma_buffer_t
*rx_buf
;
1403 if (packet
->mp
!= NULL
) {
1404 freemsg(packet
->mp
);
1408 rx_buf
= packet
->rx_buf
;
1410 switch (packet
->dma_type
) {
1413 if (rx_buf
->address
!= NULL
) {
1414 rx_buf
->size
+= E1000G_IPALIGNROOM
;
1415 rx_buf
->address
-= E1000G_IPALIGNROOM
;
1417 e1000g_free_dvma_buffer(rx_buf
);
1421 e1000g_free_dma_buffer(rx_buf
);
1427 packet
->dma_type
= USE_NONE
;
1432 kmem_free(packet
, sizeof (rx_sw_packet_t
));
1436 e1000g_free_rx_packets(e1000g_rx_data_t
*rx_data
, boolean_t full_release
)
1438 p_rx_sw_packet_t packet
, next_packet
;
1441 mutex_enter(&e1000g_rx_detach_lock
);
1443 packet
= rx_data
->packet_area
;
1444 while (packet
!= NULL
) {
1445 next_packet
= packet
->next
;
1447 ref_cnt
= atomic_dec_32_nv(&packet
->ref_cnt
);
1449 atomic_inc_32(&rx_data
->pending_count
);
1450 atomic_inc_32(&e1000g_mblks_pending
);
1452 e1000g_free_rx_sw_packet(packet
, full_release
);
1455 packet
= next_packet
;
1459 rx_data
->packet_area
= NULL
;
1461 mutex_exit(&e1000g_rx_detach_lock
);
1466 e1000g_free_tx_packets(e1000g_tx_ring_t
*tx_ring
)
1469 struct e1000g
*Adapter
;
1470 p_tx_sw_packet_t packet
;
1471 dma_buffer_t
*tx_buf
;
1473 Adapter
= tx_ring
->adapter
;
1475 for (j
= 0, packet
= tx_ring
->packet_area
;
1476 j
< Adapter
->tx_freelist_num
; j
++, packet
++) {
1481 /* Free the Tx DMA handle for dynamical binding */
1482 if (packet
->tx_dma_handle
!= NULL
) {
1483 switch (packet
->dma_type
) {
1486 dvma_release(packet
->tx_dma_handle
);
1490 ddi_dma_free_handle(&packet
->tx_dma_handle
);
1496 packet
->tx_dma_handle
= NULL
;
1499 * If the dma handle is NULL, then we don't
1500 * need to check the packets left. For they
1501 * have not been initialized or have been freed.
1506 tx_buf
= packet
->tx_buf
;
1508 switch (packet
->dma_type
) {
1511 e1000g_free_dvma_buffer(tx_buf
);
1515 e1000g_free_dma_buffer(tx_buf
);
1522 packet
->dma_type
= USE_NONE
;
1524 if (tx_ring
->packet_area
!= NULL
) {
1525 kmem_free(tx_ring
->packet_area
, TX_SW_PKT_AREA_SZ
);
1526 tx_ring
->packet_area
= NULL
;
1531 * e1000g_release_dma_resources - release allocated DMA resources
1533 * This function releases any pending buffers that has been
1534 * previously allocated
1537 e1000g_release_dma_resources(struct e1000g
*Adapter
)
1539 e1000g_free_descriptors(Adapter
);
1540 e1000g_free_packets(Adapter
);
1545 e1000g_set_fma_flags(int dma_flag
)
1548 e1000g_tx_dma_attr
.dma_attr_flags
= DDI_DMA_FLAGERR
;
1549 e1000g_buf_dma_attr
.dma_attr_flags
= DDI_DMA_FLAGERR
;
1550 e1000g_desc_dma_attr
.dma_attr_flags
= DDI_DMA_FLAGERR
;
1552 e1000g_tx_dma_attr
.dma_attr_flags
= 0;
1553 e1000g_buf_dma_attr
.dma_attr_flags
= 0;
1554 e1000g_desc_dma_attr
.dma_attr_flags
= 0;