2 * sfe_util.c: general ethernet mac driver framework version 2.6
4 * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3. Neither the name of the author nor the names of its contributors may be
17 * used to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
35 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
36 * Use is subject to license terms.
40 * System Header files.
42 #include <sys/types.h>
44 #include <sys/debug.h>
46 #include <sys/vtrace.h>
47 #include <sys/ethernet.h>
48 #include <sys/modctl.h>
49 #include <sys/errno.h>
51 #include <sys/sunddi.h>
52 #include <sys/stream.h> /* required for MBLK* */
53 #include <sys/strsun.h> /* required for mionack() */
54 #include <sys/byteorder.h>
55 #include <sys/sysmacros.h>
57 #include <inet/common.h>
61 #include <sys/crc32.h>
72 /* Debugging support */
73 #ifdef GEM_DEBUG_LEVEL
74 static int gem_debug
= GEM_DEBUG_LEVEL
;
75 #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args
77 #define DPRINTF(n, args)
82 #define IOC_LINESIZE 0x40 /* Is it right for amd64? */
85 * Useful macros and typedefs
87 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
89 #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
90 #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
92 #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9])
93 #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6])
97 #define INT32_MAX 0x7fffffff
100 #define VTAG_OFF (ETHERADDRL*2)
105 #define VTAG_TPID 0x8100U
108 #define GET_TXBUF(dp, sn) \
109 &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
111 #define TXFLAG_VTAG(flag) \
112 (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
114 #define MAXPKTBUF(dp) \
115 ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
117 #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */
118 #define BOOLEAN(x) ((x) != 0)
121 * Macros to distinct chip generation.
127 static void gem_mii_start(struct gem_dev
*);
128 static void gem_mii_stop(struct gem_dev
*);
130 /* local buffer management */
131 static void gem_nd_setup(struct gem_dev
*dp
);
132 static void gem_nd_cleanup(struct gem_dev
*dp
);
133 static int gem_alloc_memory(struct gem_dev
*);
134 static void gem_free_memory(struct gem_dev
*);
135 static void gem_init_rx_ring(struct gem_dev
*);
136 static void gem_init_tx_ring(struct gem_dev
*);
137 __INLINE__
static void gem_append_rxbuf(struct gem_dev
*, struct rxbuf
*);
139 static void gem_tx_timeout(struct gem_dev
*);
140 static void gem_mii_link_watcher(struct gem_dev
*dp
);
141 static int gem_mac_init(struct gem_dev
*dp
);
142 static int gem_mac_start(struct gem_dev
*dp
);
143 static int gem_mac_stop(struct gem_dev
*dp
, uint_t flags
);
144 static void gem_mac_ioctl(struct gem_dev
*dp
, queue_t
*wq
, mblk_t
*mp
);
146 static struct ether_addr gem_etherbroadcastaddr
= {
147 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
150 int gem_speed_value
[] = {10, 100, 1000};
152 /* ============================================================== */
154 * Misc runtime routines
156 /* ============================================================== */
158 * Ether CRC calculation according to 21143 data sheet
161 gem_ether_crc_le(const uint8_t *addr
, int len
)
165 CRC32(crc
, addr
, ETHERADDRL
, 0xffffffffU
, crc32_table
);
170 gem_ether_crc_be(const uint8_t *addr
, int len
)
176 #define CRC32_POLY_BE 0x04c11db7
179 for (idx
= 0; idx
< len
; idx
++) {
180 for (data
= *addr
++, bit
= 0; bit
< 8; bit
++, data
>>= 1) {
182 ^ ((((crc
>> 31) ^ data
) & 1) ? CRC32_POLY_BE
: 0);
190 gem_prop_get_int(struct gem_dev
*dp
, char *prop_template
, int def_val
)
194 (void) sprintf(propname
, prop_template
, dp
->name
);
196 return (ddi_prop_get_int(DDI_DEV_T_ANY
, dp
->dip
,
197 DDI_PROP_DONTPASS
, propname
, def_val
));
201 gem_population(uint32_t x
)
207 for (i
= 0; i
< 32; i
++) {
215 #ifdef GEM_DEBUG_LEVEL
216 #ifdef GEM_DEBUG_VLAN
218 gem_dump_packet(struct gem_dev
*dp
, char *title
, mblk_t
*mp
,
219 boolean_t check_cksum
)
222 uint8_t buf
[18+20+20];
237 extern uint_t
ip_cksum(mblk_t
*, int, uint32_t);
244 for (tp
= mp
; tp
; tp
= tp
->b_cont
) {
245 len
= tp
->b_wptr
- tp
->b_rptr
;
246 len
= min(rest
, len
);
247 bcopy(tp
->b_rptr
, &buf
[offset
], len
);
258 /* ethernet address */
260 "ether: %02x:%02x:%02x:%02x:%02x:%02x"
261 " -> %02x:%02x:%02x:%02x:%02x:%02x",
262 p
[6], p
[7], p
[8], p
[9], p
[10], p
[11],
263 p
[0], p
[1], p
[2], p
[3], p
[4], p
[5]);
264 bp
= &msg
[strlen(msg
)];
266 /* vlag tag and etherrtype */
267 ethertype
= GET_ETHERTYPE(p
);
268 if (ethertype
== VTAG_TPID
) {
269 sprintf(bp
, " vtag:0x%04x", GET_NET16(&p
[14]));
270 bp
= &msg
[strlen(msg
)];
274 ethertype
= GET_ETHERTYPE(p
);
276 sprintf(bp
, " type:%04x", ethertype
);
277 bp
= &msg
[strlen(msg
)];
279 /* ethernet packet length */
280 sprintf(bp
, " mblklen:%d", msgdsize(mp
));
281 bp
= &msg
[strlen(msg
)];
284 bp
= &msg
[strlen(msg
)];
285 for (tp
= mp
; tp
; tp
= tp
->b_cont
) {
287 sprintf(bp
, "%d", tp
->b_wptr
- tp
->b_rptr
);
289 sprintf(bp
, "+%d", tp
->b_wptr
- tp
->b_rptr
);
291 bp
= &msg
[strlen(msg
)];
294 bp
= &msg
[strlen(msg
)];
297 if (ethertype
!= ETHERTYPE_IP
) {
302 offset
+= sizeof (struct ether_header
);
305 iplen
= GET_NET16(&p
[2]);
306 sprintf(bp
, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
307 p
[12], p
[13], p
[14], p
[15],
308 p
[16], p
[17], p
[18], p
[19],
310 bp
= (void *)&msg
[strlen(msg
)];
312 iphlen
= (p
[0] & 0xf) * 4;
314 /* cksum for psuedo header */
315 cksum
= *(uint16_t *)&p
[12];
316 cksum
+= *(uint16_t *)&p
[14];
317 cksum
+= *(uint16_t *)&p
[16];
318 cksum
+= *(uint16_t *)&p
[18];
319 cksum
+= BE_16(ipproto
);
321 /* tcp or udp protocol header */
324 if (ipproto
== IPPROTO_TCP
) {
325 tcplen
= iplen
- iphlen
;
326 sprintf(bp
, ", tcp: len:%d cksum:%x",
327 tcplen
, GET_NET16(&p
[16]));
328 bp
= (void *)&msg
[strlen(msg
)];
331 cksum
+= BE_16(tcplen
);
332 cksum
= (uint16_t)ip_cksum(mp
, offset
, cksum
);
334 (cksum
== 0 || cksum
== 0xffff) ? "ok" : "ng");
335 bp
= (void *)&msg
[strlen(msg
)];
337 } else if (ipproto
== IPPROTO_UDP
) {
338 udplen
= GET_NET16(&p
[4]);
339 sprintf(bp
, ", udp: len:%d cksum:%x",
340 udplen
, GET_NET16(&p
[6]));
341 bp
= (void *)&msg
[strlen(msg
)];
343 if (GET_NET16(&p
[6]) && check_cksum
) {
344 cksum
+= *(uint16_t *)&p
[4];
345 cksum
= (uint16_t)ip_cksum(mp
, offset
, cksum
);
347 (cksum
== 0 || cksum
== 0xffff) ? "ok" : "ng");
348 bp
= (void *)&msg
[strlen(msg
)];
352 cmn_err(CE_CONT
, "!%s: %s: %s", dp
->name
, title
, msg
);
354 #endif /* GEM_DEBUG_VLAN */
355 #endif /* GEM_DEBUG_LEVEL */
357 /* ============================================================== */
361 /* ============================================================== */
363 gem_rx_desc_dma_sync(struct gem_dev
*dp
, int head
, int nslot
, int how
)
367 int rx_desc_unit_shift
= dp
->gc
.gc_rx_desc_unit_shift
;
369 /* sync active descriptors */
370 if (rx_desc_unit_shift
< 0 || nslot
== 0) {
371 /* no rx descriptor ring */
375 n
= dp
->gc
.gc_rx_ring_size
- head
;
376 if ((m
= nslot
- n
) > 0) {
377 (void) ddi_dma_sync(dp
->desc_dma_handle
,
379 (size_t)(m
<< rx_desc_unit_shift
),
384 (void) ddi_dma_sync(dp
->desc_dma_handle
,
385 (off_t
)(head
<< rx_desc_unit_shift
),
386 (size_t)(nslot
<< rx_desc_unit_shift
),
391 gem_tx_desc_dma_sync(struct gem_dev
*dp
, int head
, int nslot
, int how
)
395 int tx_desc_unit_shift
= dp
->gc
.gc_tx_desc_unit_shift
;
397 /* sync active descriptors */
398 if (tx_desc_unit_shift
< 0 || nslot
== 0) {
399 /* no tx descriptor ring */
403 n
= dp
->gc
.gc_tx_ring_size
- head
;
404 if ((m
= nslot
- n
) > 0) {
405 (void) ddi_dma_sync(dp
->desc_dma_handle
,
406 (off_t
)(dp
->tx_ring_dma
- dp
->rx_ring_dma
),
407 (size_t)(m
<< tx_desc_unit_shift
),
412 (void) ddi_dma_sync(dp
->desc_dma_handle
,
413 (off_t
)((head
<< tx_desc_unit_shift
)
414 + (dp
->tx_ring_dma
- dp
->rx_ring_dma
)),
415 (size_t)(nslot
<< tx_desc_unit_shift
),
420 gem_rx_start_default(struct gem_dev
*dp
, int head
, int nslot
)
422 gem_rx_desc_dma_sync(dp
,
423 SLOT(head
, dp
->gc
.gc_rx_ring_size
), nslot
,
424 DDI_DMA_SYNC_FORDEV
);
427 /* ============================================================== */
431 /* ============================================================== */
433 gem_dump_txbuf(struct gem_dev
*dp
, int level
, const char *title
)
436 "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
437 "tx_softq: %d[%d] %d[%d] (+%d), "
438 "tx_free: %d[%d] %d[%d] (+%d), "
439 "tx_desc: %d[%d] %d[%d] (+%d), "
440 "intr: %d[%d] (+%d), ",
443 SLOT(dp
->tx_active_head
, dp
->gc
.gc_tx_buf_size
),
445 SLOT(dp
->tx_active_tail
, dp
->gc
.gc_tx_buf_size
),
446 dp
->tx_active_tail
- dp
->tx_active_head
,
448 SLOT(dp
->tx_softq_head
, dp
->gc
.gc_tx_buf_size
),
450 SLOT(dp
->tx_softq_tail
, dp
->gc
.gc_tx_buf_size
),
451 dp
->tx_softq_tail
- dp
->tx_softq_head
,
453 SLOT(dp
->tx_free_head
, dp
->gc
.gc_tx_buf_size
),
455 SLOT(dp
->tx_free_tail
, dp
->gc
.gc_tx_buf_size
),
456 dp
->tx_free_tail
- dp
->tx_free_head
,
458 SLOT(dp
->tx_desc_head
, dp
->gc
.gc_tx_ring_size
),
460 SLOT(dp
->tx_desc_tail
, dp
->gc
.gc_tx_ring_size
),
461 dp
->tx_desc_tail
- dp
->tx_desc_head
,
463 SLOT(dp
->tx_desc_intr
, dp
->gc
.gc_tx_ring_size
),
464 dp
->tx_desc_intr
- dp
->tx_desc_head
);
468 gem_free_rxbuf(struct rxbuf
*rbp
)
473 ASSERT(mutex_owned(&dp
->intrlock
));
474 rbp
->rxb_next
= dp
->rx_buf_freelist
;
475 dp
->rx_buf_freelist
= rbp
;
476 dp
->rx_buf_freecnt
++;
480 * gem_get_rxbuf: supply a receive buffer which have been mapped into
484 gem_get_rxbuf(struct gem_dev
*dp
, int cansleep
)
491 ASSERT(mutex_owned(&dp
->intrlock
));
493 DPRINTF(3, (CE_CONT
, "!gem_get_rxbuf: called freecnt:%d",
494 dp
->rx_buf_freecnt
));
496 * Get rx buffer management structure
498 rbp
= dp
->rx_buf_freelist
;
500 /* get one from the recycle list */
501 ASSERT(dp
->rx_buf_freecnt
> 0);
503 dp
->rx_buf_freelist
= rbp
->rxb_next
;
504 dp
->rx_buf_freecnt
--;
505 rbp
->rxb_next
= NULL
;
510 * Allocate a rx buffer management structure
512 rbp
= kmem_zalloc(sizeof (*rbp
), cansleep
? KM_SLEEP
: KM_NOSLEEP
);
519 * Prepare a back pointer to the device structure which will be
520 * refered on freeing the buffer later.
524 /* allocate a dma handle for rx data buffer */
525 if ((err
= ddi_dma_alloc_handle(dp
->dip
,
526 &dp
->gc
.gc_dma_attr_rxbuf
,
527 (cansleep
? DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
),
528 NULL
, &rbp
->rxb_dh
)) != DDI_SUCCESS
) {
531 "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
532 dp
->name
, __func__
, err
);
534 kmem_free(rbp
, sizeof (struct rxbuf
));
538 /* allocate a bounce buffer for rx */
539 if ((err
= ddi_dma_mem_alloc(rbp
->rxb_dh
,
540 ROUNDUP(dp
->rx_buf_len
, IOC_LINESIZE
),
543 * if the nic requires a header at the top of receive buffers,
544 * it may access the rx buffer randomly.
546 (dp
->gc
.gc_rx_header_len
> 0)
547 ? DDI_DMA_CONSISTENT
: DDI_DMA_STREAMING
,
548 cansleep
? DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
,
550 &rbp
->rxb_buf
, &rbp
->rxb_buf_len
,
551 &rbp
->rxb_bah
)) != DDI_SUCCESS
) {
554 "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
555 dp
->name
, __func__
, err
);
557 ddi_dma_free_handle(&rbp
->rxb_dh
);
558 kmem_free(rbp
, sizeof (struct rxbuf
));
562 /* Mapin the bounce buffer into the DMA space */
563 if ((err
= ddi_dma_addr_bind_handle(rbp
->rxb_dh
,
564 NULL
, rbp
->rxb_buf
, dp
->rx_buf_len
,
565 ((dp
->gc
.gc_rx_header_len
> 0)
566 ?(DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
)
567 :(DDI_DMA_READ
| DDI_DMA_STREAMING
)),
568 cansleep
? DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
,
571 &count
)) != DDI_DMA_MAPPED
) {
573 ASSERT(err
!= DDI_DMA_INUSE
);
575 "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
576 dp
->name
, __func__
, err
));
579 * we failed to allocate a dma resource
580 * for the rx bounce buffer.
582 ddi_dma_mem_free(&rbp
->rxb_bah
);
583 ddi_dma_free_handle(&rbp
->rxb_dh
);
584 kmem_free(rbp
, sizeof (struct rxbuf
));
588 /* correct the rest of the DMA mapping */
589 for (i
= 1; i
< count
; i
++) {
590 ddi_dma_nextcookie(rbp
->rxb_dh
, &rbp
->rxb_dmacookie
[i
]);
592 rbp
->rxb_nfrags
= count
;
594 /* Now we successfully prepared an rx buffer */
595 dp
->rx_buf_allocated
++;
600 /* ============================================================== */
602 * memory resource management
604 /* ============================================================== */
606 gem_alloc_memory(struct gem_dev
*dp
)
613 ddi_dma_cookie_t ring_cookie
;
614 ddi_dma_cookie_t buf_cookie
;
620 ddi_dma_attr_t dma_attr_txbounce
;
622 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
624 dp
->desc_dma_handle
= NULL
;
625 req_size
= dp
->rx_desc_size
+ dp
->tx_desc_size
+ dp
->gc
.gc_io_area_size
;
629 * Alloc RX/TX descriptors and a io area.
631 if ((err
= ddi_dma_alloc_handle(dp
->dip
,
632 &dp
->gc
.gc_dma_attr_desc
,
634 &dp
->desc_dma_handle
)) != DDI_SUCCESS
) {
636 "!%s: %s: ddi_dma_alloc_handle failed: %d",
637 dp
->name
, __func__
, err
);
641 if ((err
= ddi_dma_mem_alloc(dp
->desc_dma_handle
,
642 req_size
, &dp
->gc
.gc_desc_attr
,
643 DDI_DMA_CONSISTENT
, DDI_DMA_SLEEP
, NULL
,
645 &dp
->desc_acc_handle
)) != DDI_SUCCESS
) {
647 "!%s: %s: ddi_dma_mem_alloc failed: "
648 "ret %d, request size: %d",
649 dp
->name
, __func__
, err
, (int)req_size
);
650 ddi_dma_free_handle(&dp
->desc_dma_handle
);
654 if ((err
= ddi_dma_addr_bind_handle(dp
->desc_dma_handle
,
655 NULL
, ring
, ring_len
,
656 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
658 &ring_cookie
, &count
)) != DDI_SUCCESS
) {
659 ASSERT(err
!= DDI_DMA_INUSE
);
661 "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
662 dp
->name
, __func__
, err
);
663 ddi_dma_mem_free(&dp
->desc_acc_handle
);
664 ddi_dma_free_handle(&dp
->desc_dma_handle
);
669 /* set base of rx descriptor ring */
671 dp
->rx_ring_dma
= ring_cookie
.dmac_laddress
;
673 /* set base of tx descriptor ring */
674 dp
->tx_ring
= dp
->rx_ring
+ dp
->rx_desc_size
;
675 dp
->tx_ring_dma
= dp
->rx_ring_dma
+ dp
->rx_desc_size
;
677 /* set base of io area */
678 dp
->io_area
= dp
->tx_ring
+ dp
->tx_desc_size
;
679 dp
->io_area_dma
= dp
->tx_ring_dma
+ dp
->tx_desc_size
;
683 * Prepare DMA resources for tx packets
685 ASSERT(dp
->gc
.gc_tx_buf_size
> 0);
687 /* Special dma attribute for tx bounce buffers */
688 dma_attr_txbounce
= dp
->gc
.gc_dma_attr_txbuf
;
689 dma_attr_txbounce
.dma_attr_sgllen
= 1;
690 dma_attr_txbounce
.dma_attr_align
=
691 max(dma_attr_txbounce
.dma_attr_align
, IOC_LINESIZE
);
693 /* Size for tx bounce buffers must be max tx packet size. */
694 tx_buf_len
= MAXPKTBUF(dp
);
695 tx_buf_len
= ROUNDUP(tx_buf_len
, IOC_LINESIZE
);
697 ASSERT(tx_buf_len
>= ETHERMAX
+ETHERFCSL
);
699 for (i
= 0, tbp
= dp
->tx_buf
;
700 i
< dp
->gc
.gc_tx_buf_size
; i
++, tbp
++) {
702 /* setup bounce buffers for tx packets */
703 if ((err
= ddi_dma_alloc_handle(dp
->dip
,
706 &tbp
->txb_bdh
)) != DDI_SUCCESS
) {
709 "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
711 dp
->name
, __func__
, err
, i
);
715 if ((err
= ddi_dma_mem_alloc(tbp
->txb_bdh
,
718 DDI_DMA_STREAMING
, DDI_DMA_SLEEP
, NULL
,
720 &tbp
->txb_bah
)) != DDI_SUCCESS
) {
722 "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
723 "ret %d, request size %d",
724 dp
->name
, __func__
, err
, tx_buf_len
);
725 ddi_dma_free_handle(&tbp
->txb_bdh
);
729 if ((err
= ddi_dma_addr_bind_handle(tbp
->txb_bdh
,
731 DDI_DMA_WRITE
| DDI_DMA_STREAMING
,
733 &buf_cookie
, &count
)) != DDI_SUCCESS
) {
734 ASSERT(err
!= DDI_DMA_INUSE
);
736 "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
737 dp
->name
, __func__
, err
);
738 ddi_dma_mem_free(&tbp
->txb_bah
);
739 ddi_dma_free_handle(&tbp
->txb_bdh
);
744 tbp
->txb_buf_dma
= buf_cookie
.dmac_laddress
;
750 if (dp
->gc
.gc_tx_buf_size
> 0) {
752 (void) ddi_dma_unbind_handle(dp
->tx_buf
[i
].txb_bdh
);
753 ddi_dma_mem_free(&dp
->tx_buf
[i
].txb_bah
);
754 ddi_dma_free_handle(&dp
->tx_buf
[i
].txb_bdh
);
758 if (dp
->desc_dma_handle
) {
759 (void) ddi_dma_unbind_handle(dp
->desc_dma_handle
);
760 ddi_dma_mem_free(&dp
->desc_acc_handle
);
761 ddi_dma_free_handle(&dp
->desc_dma_handle
);
762 dp
->desc_dma_handle
= NULL
;
769 gem_free_memory(struct gem_dev
*dp
)
775 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
777 /* Free TX/RX descriptors and tx padding buffer */
778 if (dp
->desc_dma_handle
) {
779 (void) ddi_dma_unbind_handle(dp
->desc_dma_handle
);
780 ddi_dma_mem_free(&dp
->desc_acc_handle
);
781 ddi_dma_free_handle(&dp
->desc_dma_handle
);
782 dp
->desc_dma_handle
= NULL
;
785 /* Free dma handles for Tx */
786 for (i
= dp
->gc
.gc_tx_buf_size
, tbp
= dp
->tx_buf
; i
--; tbp
++) {
787 /* Free bounce buffer associated to each txbuf */
788 (void) ddi_dma_unbind_handle(tbp
->txb_bdh
);
789 ddi_dma_mem_free(&tbp
->txb_bah
);
790 ddi_dma_free_handle(&tbp
->txb_bdh
);
794 while ((rbp
= dp
->rx_buf_freelist
) != NULL
) {
796 ASSERT(dp
->rx_buf_freecnt
> 0);
798 dp
->rx_buf_freelist
= rbp
->rxb_next
;
799 dp
->rx_buf_freecnt
--;
801 /* release DMA mapping */
802 ASSERT(rbp
->rxb_dh
!= NULL
);
804 /* free dma handles for rx bbuf */
805 /* it has dma mapping always */
806 ASSERT(rbp
->rxb_nfrags
> 0);
807 (void) ddi_dma_unbind_handle(rbp
->rxb_dh
);
809 /* free the associated bounce buffer and dma handle */
810 ASSERT(rbp
->rxb_bah
!= NULL
);
811 ddi_dma_mem_free(&rbp
->rxb_bah
);
812 /* free the associated dma handle */
813 ddi_dma_free_handle(&rbp
->rxb_dh
);
815 /* free the base memory of rx buffer management */
816 kmem_free(rbp
, sizeof (struct rxbuf
));
820 /* ============================================================== */
822 * Rx/Tx descriptor slot management
824 /* ============================================================== */
826 * Initialize an empty rx ring.
829 gem_init_rx_ring(struct gem_dev
*dp
)
832 int rx_ring_size
= dp
->gc
.gc_rx_ring_size
;
834 DPRINTF(1, (CE_CONT
, "!%s: %s ring_size:%d, buf_max:%d",
836 rx_ring_size
, dp
->gc
.gc_rx_buf_max
));
838 /* make a physical chain of rx descriptors */
839 for (i
= 0; i
< rx_ring_size
; i
++) {
840 (*dp
->gc
.gc_rx_desc_init
)(dp
, i
);
842 gem_rx_desc_dma_sync(dp
, 0, rx_ring_size
, DDI_DMA_SYNC_FORDEV
);
844 dp
->rx_active_head
= (seqnum_t
)0;
845 dp
->rx_active_tail
= (seqnum_t
)0;
847 ASSERT(dp
->rx_buf_head
== NULL
);
848 ASSERT(dp
->rx_buf_tail
== NULL
);
852 * Prepare rx buffers and put them into the rx buffer/descriptor ring.
855 gem_prepare_rx_buf(struct gem_dev
*dp
)
861 ASSERT(mutex_owned(&dp
->intrlock
));
863 /* Now we have no active buffers in rx ring */
865 nrbuf
= min(dp
->gc
.gc_rx_ring_size
, dp
->gc
.gc_rx_buf_max
);
866 for (i
= 0; i
< nrbuf
; i
++) {
867 if ((rbp
= gem_get_rxbuf(dp
, B_TRUE
)) == NULL
) {
870 gem_append_rxbuf(dp
, rbp
);
873 gem_rx_desc_dma_sync(dp
,
874 0, dp
->gc
.gc_rx_ring_size
, DDI_DMA_SYNC_FORDEV
);
878 * Reclaim active rx buffers in rx buffer ring.
881 gem_clean_rx_buf(struct gem_dev
*dp
)
885 int rx_ring_size
= dp
->gc
.gc_rx_ring_size
;
886 #ifdef GEM_DEBUG_LEVEL
889 ASSERT(mutex_owned(&dp
->intrlock
));
891 DPRINTF(2, (CE_CONT
, "!%s: %s: %d buffers are free",
892 dp
->name
, __func__
, dp
->rx_buf_freecnt
));
894 * clean up HW descriptors
896 for (i
= 0; i
< rx_ring_size
; i
++) {
897 (*dp
->gc
.gc_rx_desc_clean
)(dp
, i
);
899 gem_rx_desc_dma_sync(dp
, 0, rx_ring_size
, DDI_DMA_SYNC_FORDEV
);
901 #ifdef GEM_DEBUG_LEVEL
905 * Reclaim allocated rx buffers
907 while ((rbp
= dp
->rx_buf_head
) != NULL
) {
908 #ifdef GEM_DEBUG_LEVEL
911 /* remove the first one from rx buffer list */
912 dp
->rx_buf_head
= rbp
->rxb_next
;
914 /* recycle the rxbuf */
917 dp
->rx_buf_tail
= NULL
;
920 "!%s: %s: %d buffers freeed, total: %d free",
921 dp
->name
, __func__
, total
, dp
->rx_buf_freecnt
));
925 * Initialize an empty transmit buffer/descriptor ring
928 gem_init_tx_ring(struct gem_dev
*dp
)
931 int tx_buf_size
= dp
->gc
.gc_tx_buf_size
;
932 int tx_ring_size
= dp
->gc
.gc_tx_ring_size
;
934 DPRINTF(2, (CE_CONT
, "!%s: %s: ring_size:%d, buf_size:%d",
936 dp
->gc
.gc_tx_ring_size
, dp
->gc
.gc_tx_buf_size
));
938 ASSERT(!dp
->mac_active
);
940 /* initialize active list and free list */
942 SLOT(dp
->tx_slots_base
+ dp
->tx_softq_head
, tx_buf_size
);
943 dp
->tx_softq_tail
-= dp
->tx_softq_head
;
944 dp
->tx_softq_head
= (seqnum_t
)0;
946 dp
->tx_active_head
= dp
->tx_softq_head
;
947 dp
->tx_active_tail
= dp
->tx_softq_head
;
949 dp
->tx_free_head
= dp
->tx_softq_tail
;
950 dp
->tx_free_tail
= dp
->gc
.gc_tx_buf_limit
;
952 dp
->tx_desc_head
= (seqnum_t
)0;
953 dp
->tx_desc_tail
= (seqnum_t
)0;
954 dp
->tx_desc_intr
= (seqnum_t
)0;
956 for (i
= 0; i
< tx_ring_size
; i
++) {
957 (*dp
->gc
.gc_tx_desc_init
)(dp
, i
);
959 gem_tx_desc_dma_sync(dp
, 0, tx_ring_size
, DDI_DMA_SYNC_FORDEV
);
964 gem_txbuf_free_dma_resources(struct txbuf
*tbp
)
967 freemsg(tbp
->txb_mp
);
973 #pragma inline(gem_txbuf_free_dma_resources)
976 * reclaim active tx buffers and reset positions in tx rings.
979 gem_clean_tx_buf(struct gem_dev
*dp
)
986 int tx_ring_size
= dp
->gc
.gc_tx_ring_size
;
987 #ifdef GEM_DEBUG_LEVEL
991 ASSERT(!dp
->mac_active
);
992 ASSERT(dp
->tx_busy
== 0);
993 ASSERT(dp
->tx_softq_tail
== dp
->tx_free_head
);
996 * clean up all HW descriptors
998 for (i
= 0; i
< tx_ring_size
; i
++) {
999 (*dp
->gc
.gc_tx_desc_clean
)(dp
, i
);
1001 gem_tx_desc_dma_sync(dp
, 0, tx_ring_size
, DDI_DMA_SYNC_FORDEV
);
1003 /* dequeue all active and loaded buffers */
1004 head
= dp
->tx_active_head
;
1005 tail
= dp
->tx_softq_tail
;
1007 ASSERT(dp
->tx_free_head
- head
>= 0);
1008 tbp
= GET_TXBUF(dp
, head
);
1009 for (sn
= head
; sn
!= tail
; sn
++) {
1010 gem_txbuf_free_dma_resources(tbp
);
1011 ASSERT(tbp
->txb_mp
== NULL
);
1013 tbp
= tbp
->txb_next
;
1016 #ifdef GEM_DEBUG_LEVEL
1017 /* ensure no dma resources for tx are not in use now */
1019 while (sn
!= head
+ dp
->gc
.gc_tx_buf_size
) {
1020 if (tbp
->txb_mp
|| tbp
->txb_nfrags
) {
1021 DPRINTF(0, (CE_CONT
,
1022 "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1024 sn
, SLOT(sn
, dp
->gc
.gc_tx_buf_size
),
1025 tbp
->txb_mp
, tbp
->txb_nfrags
));
1029 tbp
= tbp
->txb_next
;
1033 gem_dump_txbuf(dp
, CE_WARN
,
1034 "gem_clean_tx_buf: tbp->txb_mp != NULL");
1037 /* recycle buffers, now no active tx buffers in the ring */
1038 dp
->tx_free_tail
+= tail
- head
;
1039 ASSERT(dp
->tx_free_tail
== dp
->tx_free_head
+ dp
->gc
.gc_tx_buf_limit
);
1041 /* fix positions in tx buffer rings */
1042 dp
->tx_active_head
= dp
->tx_free_head
;
1043 dp
->tx_active_tail
= dp
->tx_free_head
;
1044 dp
->tx_softq_head
= dp
->tx_free_head
;
1045 dp
->tx_softq_tail
= dp
->tx_free_head
;
1049 * Reclaim transmitted buffers from tx buffer/descriptor ring.
1052 gem_reclaim_txbuf(struct gem_dev
*dp
)
1056 int err
= GEM_SUCCESS
;
1061 int tx_ring_size
= dp
->gc
.gc_tx_ring_size
;
1062 uint_t (*tx_desc_stat
)(struct gem_dev
*dp
,
1063 int slot
, int ndesc
) = dp
->gc
.gc_tx_desc_stat
;
1066 now
= ddi_get_lbolt();
1067 if (now
== (clock_t)0) {
1068 /* make non-zero timestamp */
1072 mutex_enter(&dp
->xmitlock
);
1074 head
= dp
->tx_active_head
;
1075 tail
= dp
->tx_active_tail
;
1077 #if GEM_DEBUG_LEVEL > 2
1079 cmn_err(CE_CONT
, "!%s: %s: "
1080 "testing active_head:%d[%d], active_tail:%d[%d]",
1082 head
, SLOT(head
, dp
->gc
.gc_tx_buf_size
),
1083 tail
, SLOT(tail
, dp
->gc
.gc_tx_buf_size
));
1087 if (dp
->tx_reclaim_busy
== 0) {
1088 /* check tx buffer management consistency */
1089 ASSERT(dp
->tx_free_tail
- dp
->tx_active_head
1090 == dp
->gc
.gc_tx_buf_limit
);
1094 dp
->tx_reclaim_busy
++;
1096 /* sync all active HW descriptors */
1097 gem_tx_desc_dma_sync(dp
,
1098 SLOT(dp
->tx_desc_head
, tx_ring_size
),
1099 dp
->tx_desc_tail
- dp
->tx_desc_head
,
1100 DDI_DMA_SYNC_FORKERNEL
);
1102 tbp
= GET_TXBUF(dp
, head
);
1103 desc_head
= dp
->tx_desc_head
;
1104 for (sn
= head
; sn
!= tail
;
1105 dp
->tx_active_head
= (++sn
), tbp
= tbp
->txb_next
) {
1108 ASSERT(tbp
->txb_desc
== desc_head
);
1110 ndescs
= tbp
->txb_ndescs
;
1112 /* skip errored descriptors */
1115 txstat
= (*tx_desc_stat
)(dp
,
1116 SLOT(tbp
->txb_desc
, tx_ring_size
), ndescs
);
1119 /* not transmitted yet */
1123 if (!dp
->tx_blocked
&& (tbp
->txb_flag
& GEM_TXFLAG_INTR
)) {
1124 dp
->tx_blocked
= now
;
1127 ASSERT(txstat
& (GEM_TX_DONE
| GEM_TX_ERR
));
1129 if (txstat
& GEM_TX_ERR
) {
1131 cmn_err(CE_WARN
, "!%s: tx error at desc %d[%d]",
1132 dp
->name
, sn
, SLOT(sn
, tx_ring_size
));
1134 #if GEM_DEBUG_LEVEL > 4
1135 if (now
- tbp
->txb_stime
>= 50) {
1136 cmn_err(CE_WARN
, "!%s: tx delay while %d mS",
1137 dp
->name
, (now
- tbp
->txb_stime
)*10);
1140 /* free transmitted descriptors */
1141 desc_head
+= ndescs
;
1144 if (dp
->tx_desc_head
!= desc_head
) {
1145 /* we have reclaimed one or more tx buffers */
1146 dp
->tx_desc_head
= desc_head
;
1148 /* If we passed the next interrupt position, update it */
1149 if (desc_head
- dp
->tx_desc_intr
> 0) {
1150 dp
->tx_desc_intr
= desc_head
;
1153 mutex_exit(&dp
->xmitlock
);
1155 /* free dma mapping resources associated with transmitted tx buffers */
1156 tbp
= GET_TXBUF(dp
, head
);
1158 #if GEM_DEBUG_LEVEL > 2
1160 cmn_err(CE_CONT
, "%s: freeing head:%d[%d], tail:%d[%d]",
1162 head
, SLOT(head
, dp
->gc
.gc_tx_buf_size
),
1163 tail
, SLOT(tail
, dp
->gc
.gc_tx_buf_size
));
1166 for (sn
= head
; sn
!= tail
; sn
++, tbp
= tbp
->txb_next
) {
1167 gem_txbuf_free_dma_resources(tbp
);
1170 /* recycle the tx buffers */
1171 mutex_enter(&dp
->xmitlock
);
1172 if (--dp
->tx_reclaim_busy
== 0) {
1173 /* we are the last thread who can update free tail */
1174 #if GEM_DEBUG_LEVEL > 4
1175 /* check all resouces have been deallocated */
1176 sn
= dp
->tx_free_tail
;
1177 tbp
= GET_TXBUF(dp
, new_tail
);
1178 while (sn
!= dp
->tx_active_head
+ dp
->gc
.gc_tx_buf_limit
) {
1179 if (tbp
->txb_nfrags
) {
1183 ASSERT(tbp
->txb_mp
== NULL
);
1184 tbp
= tbp
->txb_next
;
1187 ASSERT(dp
->tx_active_head
+ dp
->gc
.gc_tx_buf_limit
== sn
);
1190 dp
->tx_active_head
+ dp
->gc
.gc_tx_buf_limit
;
1192 if (!dp
->mac_active
) {
1193 /* someone may be waiting for me. */
1194 cv_broadcast(&dp
->tx_drain_cv
);
1196 #if GEM_DEBUG_LEVEL > 2
1197 cmn_err(CE_CONT
, "!%s: %s: called, "
1198 "free_head:%d free_tail:%d(+%d) added:%d",
1200 dp
->tx_free_head
, dp
->tx_free_tail
,
1201 dp
->tx_free_tail
- dp
->tx_free_head
, tail
- head
);
1203 mutex_exit(&dp
->xmitlock
);
1207 #pragma inline(gem_reclaim_txbuf)
1211 * Make tx descriptors in out-of-order manner
1214 gem_tx_load_descs_oo(struct gem_dev
*dp
,
1215 seqnum_t start_slot
, seqnum_t end_slot
, uint64_t flags
)
1219 int tx_ring_size
= dp
->gc
.gc_tx_ring_size
;
1220 int (*tx_desc_write
)
1221 (struct gem_dev
*dp
, int slot
,
1222 ddi_dma_cookie_t
*dmacookie
,
1223 int frags
, uint64_t flag
) = dp
->gc
.gc_tx_desc_write
;
1224 clock_t now
= ddi_get_lbolt();
1227 tbp
= GET_TXBUF(dp
, sn
);
1229 #if GEM_DEBUG_LEVEL > 1
1230 if (dp
->tx_cnt
< 100) {
1232 flags
|= GEM_TXFLAG_INTR
;
1235 /* write a tx descriptor */
1237 tbp
->txb_ndescs
= (*tx_desc_write
)(dp
,
1238 SLOT(sn
, tx_ring_size
),
1240 tbp
->txb_nfrags
, flags
| tbp
->txb_flag
);
1241 tbp
->txb_stime
= now
;
1242 ASSERT(tbp
->txb_ndescs
== 1);
1246 tbp
= tbp
->txb_next
;
1247 } while (sn
!= end_slot
);
1252 gem_setup_txbuf_copy(struct gem_dev
*dp
, mblk_t
*mp
, struct txbuf
*tbp
)
1261 ASSERT(tbp
->txb_mp
== NULL
);
1263 /* we use bounce buffer for the packet */
1269 flag
= tbp
->txb_flag
;
1270 if (flag
& GEM_TXFLAG_SWVTAG
) {
1271 /* need to increase min packet size */
1272 min_pkt
+= VTAG_SIZE
;
1273 ASSERT((flag
& GEM_TXFLAG_VTAG
) == 0);
1277 for (; tp
; tp
= tp
->b_cont
) {
1278 if ((len
= (long)tp
->b_wptr
- (long)tp
->b_rptr
) > 0) {
1279 bcopy(tp
->b_rptr
, &bp
[off
], len
);
1284 if (off
< min_pkt
&&
1285 (min_pkt
> ETHERMIN
|| !dp
->gc
.gc_tx_auto_pad
)) {
1287 * Extend the packet to minimum packet size explicitly.
1288 * For software vlan packets, we shouldn't use tx autopad
1289 * function because nics may not be aware of vlan.
1290 * we must keep 46 octet of payload even if we use vlan.
1292 bzero(&bp
[off
], min_pkt
- off
);
1296 (void) ddi_dma_sync(tbp
->txb_bdh
, (off_t
)0, off
, DDI_DMA_SYNC_FORDEV
);
1298 tbp
->txb_dmacookie
[0].dmac_laddress
= tbp
->txb_buf_dma
;
1299 tbp
->txb_dmacookie
[0].dmac_size
= off
;
1301 DPRINTF(2, (CE_CONT
,
1302 "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1304 tbp
->txb_dmacookie
[0].dmac_laddress
,
1305 tbp
->txb_dmacookie
[0].dmac_size
,
1306 (flag
& GEM_TXFLAG_VTAG
) >> GEM_TXFLAG_VTAG_SHIFT
,
1309 /* save misc info */
1311 tbp
->txb_nfrags
= 1;
1312 #ifdef DEBUG_MULTIFRAGS
1313 if (dp
->gc
.gc_tx_max_frags
>= 3 &&
1314 tbp
->txb_dmacookie
[0].dmac_size
> 16*3) {
1315 tbp
->txb_dmacookie
[1].dmac_laddress
=
1316 tbp
->txb_dmacookie
[0].dmac_laddress
+ 16;
1317 tbp
->txb_dmacookie
[2].dmac_laddress
=
1318 tbp
->txb_dmacookie
[1].dmac_laddress
+ 16;
1320 tbp
->txb_dmacookie
[2].dmac_size
=
1321 tbp
->txb_dmacookie
[0].dmac_size
- 16*2;
1322 tbp
->txb_dmacookie
[1].dmac_size
= 16;
1323 tbp
->txb_dmacookie
[0].dmac_size
= 16;
1324 tbp
->txb_nfrags
= 3;
1329 #pragma inline(gem_setup_txbuf_copy)
1333 gem_tx_start_unit(struct gem_dev
*dp
)
1337 struct txbuf
*tbp_head
;
1338 struct txbuf
*tbp_tail
;
1340 /* update HW descriptors from soft queue */
1341 ASSERT(mutex_owned(&dp
->xmitlock
));
1342 ASSERT(dp
->tx_softq_head
== dp
->tx_active_tail
);
1344 head
= dp
->tx_softq_head
;
1345 tail
= dp
->tx_softq_tail
;
1347 DPRINTF(1, (CE_CONT
,
1348 "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1349 dp
->name
, __func__
, head
, tail
, tail
- head
,
1350 dp
->tx_desc_head
, dp
->tx_desc_tail
,
1351 dp
->tx_desc_tail
- dp
->tx_desc_head
));
1353 ASSERT(tail
- head
> 0);
1355 dp
->tx_desc_tail
= tail
;
1357 tbp_head
= GET_TXBUF(dp
, head
);
1358 tbp_tail
= GET_TXBUF(dp
, tail
- 1);
1360 ASSERT(tbp_tail
->txb_desc
+ tbp_tail
->txb_ndescs
== dp
->tx_desc_tail
);
1362 dp
->gc
.gc_tx_start(dp
,
1363 SLOT(tbp_head
->txb_desc
, dp
->gc
.gc_tx_ring_size
),
1364 tbp_tail
->txb_desc
+ tbp_tail
->txb_ndescs
- tbp_head
->txb_desc
);
1366 /* advance softq head and active tail */
1367 dp
->tx_softq_head
= dp
->tx_active_tail
= tail
;
1369 #pragma inline(gem_tx_start_unit)
1371 #ifdef GEM_DEBUG_LEVEL
1372 static int gem_send_cnt
[10];
1374 #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE)
1375 #define EHLEN (sizeof (struct ether_header))
1377 * check ether packet type and ip protocol
1380 gem_txbuf_options(struct gem_dev
*dp
, mblk_t
*mp
, uint8_t *bp
)
1391 * prepare continuous header of the packet for protocol analysis
1393 if ((long)mp
->b_wptr
- (long)mp
->b_rptr
< PKT_MIN_SIZE
) {
1394 /* we use work buffer to copy mblk */
1395 for (tp
= mp
, off
= 0;
1396 tp
&& (off
< PKT_MIN_SIZE
);
1397 tp
= tp
->b_cont
, off
+= len
) {
1398 len
= (long)tp
->b_wptr
- (long)tp
->b_rptr
;
1399 len
= min(len
, PKT_MIN_SIZE
- off
);
1400 bcopy(tp
->b_rptr
, &bp
[off
], len
);
1403 /* we can use mblk without copy */
1407 /* process vlan tag for GLD v3 */
1408 if (GET_NET16(&bp
[VTAG_OFF
]) == VTAG_TPID
) {
1409 if (dp
->misc_flag
& GEM_VLAN_HARD
) {
1410 vtag
= GET_NET16(&bp
[VTAG_OFF
+ 2]);
1412 flag
|= vtag
<< GEM_TXFLAG_VTAG_SHIFT
;
1414 flag
|= GEM_TXFLAG_SWVTAG
;
1422 * gem_send_common is an exported function because hw depend routines may
1423 * use it for sending control frames like setup frames for 2114x chipset.
1426 gem_send_common(struct gem_dev
*dp
, mblk_t
*mp_head
, uint32_t flags
)
1435 uint64_t load_flags
;
1436 uint64_t len_total
= 0;
1440 ASSERT(mp_head
!= NULL
);
1444 while ((mp
= mp
->b_next
) != NULL
) {
1447 #ifdef GEM_DEBUG_LEVEL
1449 gem_send_cnt
[min(nmblk
, 9)]++;
1454 mutex_enter(&dp
->xmitlock
);
1455 if (dp
->mac_suspended
) {
1456 mutex_exit(&dp
->xmitlock
);
1466 if (!dp
->mac_active
&& (flags
& GEM_SEND_CTRL
) == 0) {
1467 /* don't send data packets while mac isn't active */
1468 /* XXX - should we discard packets? */
1469 mutex_exit(&dp
->xmitlock
);
1473 /* allocate free slots */
1474 head
= dp
->tx_free_head
;
1475 avail
= dp
->tx_free_tail
- head
;
1477 DPRINTF(2, (CE_CONT
,
1478 "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1480 dp
->tx_free_head
, dp
->tx_free_tail
, avail
, nmblk
));
1482 avail
= min(avail
, dp
->tx_max_packets
);
1484 if (nmblk
> avail
) {
1486 /* no resources; short cut */
1487 DPRINTF(2, (CE_CONT
, "!%s: no resources", __func__
));
1488 dp
->tx_max_packets
= max(dp
->tx_max_packets
- 1, 1);
1494 dp
->tx_free_head
= head
+ nmblk
;
1495 load_flags
= ((dp
->tx_busy
++) == 0) ? GEM_TXFLAG_HEAD
: 0;
1497 /* update last interrupt position if tx buffers exhaust. */
1498 if (nmblk
== avail
) {
1499 tbp
= GET_TXBUF(dp
, head
+ avail
- 1);
1500 tbp
->txb_flag
= GEM_TXFLAG_INTR
;
1501 dp
->tx_desc_intr
= head
+ avail
;
1503 mutex_exit(&dp
->xmitlock
);
1505 tbp
= GET_TXBUF(dp
, head
);
1507 for (i
= nmblk
; i
> 0; i
--, tbp
= tbp
->txb_next
) {
1511 /* remove one from the mblk list */
1512 ASSERT(mp_head
!= NULL
);
1514 mp_head
= mp_head
->b_next
;
1517 /* statistics for non-unicast packets */
1519 if ((bp
[0] & 1) && (flags
& GEM_SEND_CTRL
) == 0) {
1520 if (bcmp(bp
, gem_etherbroadcastaddr
.ether_addr_octet
,
1528 /* save misc info */
1529 txflag
= tbp
->txb_flag
;
1530 txflag
|= (flags
& GEM_SEND_CTRL
) << GEM_TXFLAG_PRIVATE_SHIFT
;
1531 txflag
|= gem_txbuf_options(dp
, mp
, (uint8_t *)tbp
->txb_buf
);
1532 tbp
->txb_flag
= txflag
;
1534 len_total
+= gem_setup_txbuf_copy(dp
, mp
, tbp
);
1537 (void) gem_tx_load_descs_oo(dp
, head
, head
+ nmblk
, load_flags
);
1539 /* Append the tbp at the tail of the active tx buffer list */
1540 mutex_enter(&dp
->xmitlock
);
1542 if ((--dp
->tx_busy
) == 0) {
1543 /* extend the tail of softq, as new packets have been ready. */
1544 dp
->tx_softq_tail
= dp
->tx_free_head
;
1546 if (!dp
->mac_active
&& (flags
& GEM_SEND_CTRL
) == 0) {
1548 * The device status has changed while we are
1550 * As we are the last one that make tx non-busy.
1551 * wake up someone who may wait for us.
1553 cv_broadcast(&dp
->tx_drain_cv
);
1555 ASSERT(dp
->tx_softq_tail
- dp
->tx_softq_head
> 0);
1556 gem_tx_start_unit(dp
);
1559 dp
->stats
.obytes
+= len_total
;
1560 dp
->stats
.opackets
+= nmblk
;
1561 dp
->stats
.obcast
+= bcast
;
1562 dp
->stats
.omcast
+= mcast
;
1564 mutex_exit(&dp
->xmitlock
);
1569 /* ========================================================== */
1571 * error detection and restart routines
1573 /* ========================================================== */
1575 gem_restart_nic(struct gem_dev
*dp
, uint_t flags
)
1577 ASSERT(mutex_owned(&dp
->intrlock
));
1579 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
1580 #ifdef GEM_DEBUG_LEVEL
1581 #if GEM_DEBUG_LEVEL > 1
1582 gem_dump_txbuf(dp
, CE_CONT
, "gem_restart_nic");
1586 if (dp
->mac_suspended
) {
1587 /* should we return GEM_FAILURE ? */
1588 return (GEM_FAILURE
);
1592 * We should avoid calling any routines except xxx_chip_reset
1593 * when we are resuming the system.
1595 if (dp
->mac_active
) {
1596 if (flags
& GEM_RESTART_KEEP_BUF
) {
1597 /* stop rx gracefully */
1598 dp
->rxmode
&= ~RXMODE_ENABLE
;
1599 (void) (*dp
->gc
.gc_set_rx_filter
)(dp
);
1601 (void) gem_mac_stop(dp
, flags
);
1604 /* reset the chip. */
1605 if ((*dp
->gc
.gc_reset_chip
)(dp
) != GEM_SUCCESS
) {
1606 cmn_err(CE_WARN
, "%s: %s: failed to reset chip",
1607 dp
->name
, __func__
);
1611 if (gem_mac_init(dp
) != GEM_SUCCESS
) {
1615 /* setup media mode if the link have been up */
1616 if (dp
->mii_state
== MII_STATE_LINKUP
) {
1617 if ((dp
->gc
.gc_set_media
)(dp
) != GEM_SUCCESS
) {
1622 /* setup mac address and enable rx filter */
1623 dp
->rxmode
|= RXMODE_ENABLE
;
1624 if ((*dp
->gc
.gc_set_rx_filter
)(dp
) != GEM_SUCCESS
) {
1629 * XXX - a panic happened because of linkdown.
1630 * We must check mii_state here, because the link can be down just
1631 * before the restart event happen. If the link is down now,
1632 * gem_mac_start() will be called from gem_mii_link_check() when
1633 * the link become up later.
1635 if (dp
->mii_state
== MII_STATE_LINKUP
) {
1636 /* restart the nic */
1637 ASSERT(!dp
->mac_active
);
1638 (void) gem_mac_start(dp
);
1640 return (GEM_SUCCESS
);
1642 return (GEM_FAILURE
);
1647 gem_tx_timeout(struct gem_dev
*dp
)
1653 mutex_enter(&dp
->intrlock
);
1656 now
= ddi_get_lbolt();
1658 mutex_enter(&dp
->xmitlock
);
1659 if (!dp
->mac_active
|| dp
->mii_state
!= MII_STATE_LINKUP
) {
1660 mutex_exit(&dp
->xmitlock
);
1663 mutex_exit(&dp
->xmitlock
);
1665 /* reclaim transmitted buffers to check the trasmitter hangs or not. */
1666 if (gem_reclaim_txbuf(dp
) != GEM_SUCCESS
) {
1667 /* tx error happened, reset transmitter in the chip */
1668 (void) gem_restart_nic(dp
, 0);
1670 dp
->tx_blocked
= (clock_t)0;
1675 mutex_enter(&dp
->xmitlock
);
1676 /* check if the transmitter thread is stuck */
1677 if (dp
->tx_active_head
== dp
->tx_active_tail
) {
1678 /* no tx buffer is loaded to the nic */
1679 if (dp
->tx_blocked
&&
1680 now
- dp
->tx_blocked
> dp
->gc
.gc_tx_timeout_interval
) {
1681 gem_dump_txbuf(dp
, CE_WARN
,
1682 "gem_tx_timeout: tx blocked");
1684 dp
->tx_blocked
= (clock_t)0;
1686 mutex_exit(&dp
->xmitlock
);
1690 tbp
= GET_TXBUF(dp
, dp
->tx_active_head
);
1691 if (now
- tbp
->txb_stime
< dp
->gc
.gc_tx_timeout
) {
1692 mutex_exit(&dp
->xmitlock
);
1695 mutex_exit(&dp
->xmitlock
);
1697 gem_dump_txbuf(dp
, CE_WARN
, "gem_tx_timeout: tx timeout");
1699 /* discard untransmitted packet and restart tx. */
1700 (void) gem_restart_nic(dp
, GEM_RESTART_NOWAIT
);
1702 dp
->tx_blocked
= (clock_t)0;
1705 mutex_exit(&dp
->intrlock
);
1707 /* restart the downstream if needed */
1709 mac_tx_update(dp
->mh
);
1712 DPRINTF(4, (CE_CONT
,
1713 "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1714 dp
->name
, BOOLEAN(dp
->tx_blocked
),
1715 dp
->tx_active_head
, dp
->tx_active_tail
, dp
->tx_desc_intr
));
1717 timeout((void (*)(void *))gem_tx_timeout
,
1718 (void *)dp
, dp
->gc
.gc_tx_timeout_interval
);
1721 /* ================================================================== */
1725 /* ================================================================== */
1728 gem_append_rxbuf(struct gem_dev
*dp
, struct rxbuf
*rbp_head
)
1732 int rx_ring_size
= dp
->gc
.gc_rx_ring_size
;
1734 ASSERT(rbp_head
!= NULL
);
1735 ASSERT(mutex_owned(&dp
->intrlock
));
1737 DPRINTF(3, (CE_CONT
, "!%s: %s: slot_head:%d, slot_tail:%d",
1738 dp
->name
, __func__
, dp
->rx_active_head
, dp
->rx_active_tail
));
1741 * Add new buffers into active rx buffer list
1743 if (dp
->rx_buf_head
== NULL
) {
1744 dp
->rx_buf_head
= rbp_head
;
1745 ASSERT(dp
->rx_buf_tail
== NULL
);
1747 dp
->rx_buf_tail
->rxb_next
= rbp_head
;
1750 tail
= dp
->rx_active_tail
;
1751 for (rbp
= rbp_head
; rbp
; rbp
= rbp
->rxb_next
) {
1752 /* need to notify the tail for the lower layer */
1753 dp
->rx_buf_tail
= rbp
;
1755 dp
->gc
.gc_rx_desc_write(dp
,
1756 SLOT(tail
, rx_ring_size
),
1760 dp
->rx_active_tail
= tail
= tail
+ 1;
1763 #pragma inline(gem_append_rxbuf)
1766 gem_get_packet_default(struct gem_dev
*dp
, struct rxbuf
*rbp
, size_t len
)
1768 int rx_header_len
= dp
->gc
.gc_rx_header_len
;
1772 /* allocate a new mblk */
1773 if (mp
= allocb(len
+ VTAG_SIZE
, BPRI_MED
)) {
1774 ASSERT(mp
->b_next
== NULL
);
1775 ASSERT(mp
->b_cont
== NULL
);
1777 mp
->b_rptr
+= VTAG_SIZE
;
1779 mp
->b_wptr
= bp
+ len
;
1782 * flush the range of the entire buffer to invalidate
1783 * all of corresponding dirty entries in iocache.
1785 (void) ddi_dma_sync(rbp
->rxb_dh
, rx_header_len
,
1786 0, DDI_DMA_SYNC_FORKERNEL
);
1788 bcopy(rbp
->rxb_buf
+ rx_header_len
, bp
, len
);
1793 #ifdef GEM_DEBUG_LEVEL
1794 uint_t gem_rx_pkts
[17];
1799 gem_receive(struct gem_dev
*dp
)
1801 uint64_t len_total
= 0;
1806 struct rxbuf
*newbufs
;
1807 struct rxbuf
**newbufs_tailp
;
1810 int rx_ring_size
= dp
->gc
.gc_rx_ring_size
;
1811 seqnum_t active_head
;
1812 uint64_t (*rx_desc_stat
)(struct gem_dev
*dp
,
1813 int slot
, int ndesc
);
1814 int ethermin
= ETHERMIN
;
1815 int ethermax
= dp
->mtu
+ sizeof (struct ether_header
);
1816 int rx_header_len
= dp
->gc
.gc_rx_header_len
;
1818 ASSERT(mutex_owned(&dp
->intrlock
));
1820 DPRINTF(3, (CE_CONT
, "!%s: gem_receive: rx_buf_head:%p",
1821 dp
->name
, dp
->rx_buf_head
));
1823 rx_desc_stat
= dp
->gc
.gc_rx_desc_stat
;
1824 newbufs_tailp
= &newbufs
;
1825 rx_tailp
= &rx_head
;
1826 for (active_head
= dp
->rx_active_head
;
1827 (rbp
= dp
->rx_buf_head
) != NULL
; active_head
++) {
1830 cnt
= max(dp
->poll_pkt_delay
*2, 10);
1832 dp
->rx_active_tail
- active_head
);
1833 gem_rx_desc_dma_sync(dp
,
1834 SLOT(active_head
, rx_ring_size
),
1836 DDI_DMA_SYNC_FORKERNEL
);
1839 if (rx_header_len
> 0) {
1840 (void) ddi_dma_sync(rbp
->rxb_dh
, 0,
1841 rx_header_len
, DDI_DMA_SYNC_FORKERNEL
);
1844 if (((rxstat
= (*rx_desc_stat
)(dp
,
1845 SLOT(active_head
, rx_ring_size
),
1847 & (GEM_RX_DONE
| GEM_RX_ERR
)) == 0) {
1848 /* not received yet */
1852 /* Remove the head of the rx buffer list */
1853 dp
->rx_buf_head
= rbp
->rxb_next
;
1857 if (rxstat
& GEM_RX_ERR
) {
1861 len
= rxstat
& GEM_RX_LEN
;
1862 DPRINTF(3, (CE_CONT
, "!%s: %s: rxstat:0x%llx, len:0x%x",
1863 dp
->name
, __func__
, rxstat
, len
));
1868 if ((mp
= dp
->gc
.gc_get_packet(dp
, rbp
, len
)) == NULL
) {
1869 /* no memory, discard the packet */
1870 dp
->stats
.norcvbuf
++;
1877 ethermin
= ETHERMIN
;
1878 ethermax
= dp
->mtu
+ sizeof (struct ether_header
);
1879 if (GET_NET16(mp
->b_rptr
+ VTAG_OFF
) == VTAG_TPID
) {
1880 ethermax
+= VTAG_SIZE
;
1883 /* check packet size */
1884 if (len
< ethermin
) {
1891 if (len
> ethermax
) {
1893 dp
->stats
.frame_too_long
++;
1900 #ifdef GEM_DEBUG_VLAN
1901 if (GET_ETHERTYPE(mp
->b_rptr
) == VTAG_TPID
) {
1902 gem_dump_packet(dp
, (char *)__func__
, mp
, B_TRUE
);
1905 /* append received packet to temporaly rx buffer list */
1907 rx_tailp
= &mp
->b_next
;
1909 if (mp
->b_rptr
[0] & 1) {
1910 if (bcmp(mp
->b_rptr
,
1911 gem_etherbroadcastaddr
.ether_addr_octet
,
1919 ASSERT(rbp
!= NULL
);
1921 /* append new one to temporal new buffer list */
1922 *newbufs_tailp
= rbp
;
1923 newbufs_tailp
= &rbp
->rxb_next
;
1926 /* advance rx_active_head */
1927 if ((cnt
= active_head
- dp
->rx_active_head
) > 0) {
1928 dp
->stats
.rbytes
+= len_total
;
1929 dp
->stats
.rpackets
+= cnt
;
1931 dp
->rx_active_head
= active_head
;
1933 /* terminate the working list */
1934 *newbufs_tailp
= NULL
;
1937 if (dp
->rx_buf_head
== NULL
) {
1938 dp
->rx_buf_tail
= NULL
;
1941 DPRINTF(4, (CE_CONT
, "%s: %s: cnt:%d, rx_head:%p",
1942 dp
->name
, __func__
, cnt
, rx_head
));
1946 * fillfull rx list with new buffers
1950 /* save current tail */
1951 head
= dp
->rx_active_tail
;
1952 gem_append_rxbuf(dp
, newbufs
);
1954 /* call hw depend start routine if we have. */
1955 dp
->gc
.gc_rx_start(dp
,
1956 SLOT(head
, rx_ring_size
), dp
->rx_active_tail
- head
);
1961 * send up received packets
1963 mutex_exit(&dp
->intrlock
);
1964 mac_rx(dp
->mh
, NULL
, rx_head
);
1965 mutex_enter(&dp
->intrlock
);
1968 #ifdef GEM_DEBUG_LEVEL
1969 gem_rx_pkts
[min(cnt
, sizeof (gem_rx_pkts
)/sizeof (uint_t
)-1)]++;
1975 gem_tx_done(struct gem_dev
*dp
)
1977 boolean_t tx_sched
= B_FALSE
;
1979 if (gem_reclaim_txbuf(dp
) != GEM_SUCCESS
) {
1980 (void) gem_restart_nic(dp
, GEM_RESTART_KEEP_BUF
);
1981 DPRINTF(2, (CE_CONT
, "!%s: gem_tx_done: tx_desc: %d %d",
1982 dp
->name
, dp
->tx_active_head
, dp
->tx_active_tail
));
1987 mutex_enter(&dp
->xmitlock
);
1989 /* XXX - we must not have any packets in soft queue */
1990 ASSERT(dp
->tx_softq_head
== dp
->tx_softq_tail
);
1992 * If we won't have chance to get more free tx buffers, and blocked,
1993 * it is worth to reschedule the downstream i.e. tx side.
1995 ASSERT(dp
->tx_desc_intr
- dp
->tx_desc_head
>= 0);
1996 if (dp
->tx_blocked
&& dp
->tx_desc_intr
== dp
->tx_desc_head
) {
1998 * As no further tx-done interrupts are scheduled, this
1999 * is the last chance to kick tx side, which may be
2000 * blocked now, otherwise the tx side never works again.
2003 dp
->tx_blocked
= (clock_t)0;
2004 dp
->tx_max_packets
=
2005 min(dp
->tx_max_packets
+ 2, dp
->gc
.gc_tx_buf_limit
);
2008 mutex_exit(&dp
->xmitlock
);
2010 DPRINTF(3, (CE_CONT
, "!%s: %s: ret: blocked:%d",
2011 dp
->name
, __func__
, BOOLEAN(dp
->tx_blocked
)));
2017 gem_intr(struct gem_dev
*dp
)
2021 mutex_enter(&dp
->intrlock
);
2022 if (dp
->mac_suspended
) {
2023 mutex_exit(&dp
->intrlock
);
2024 return (DDI_INTR_UNCLAIMED
);
2026 dp
->intr_busy
= B_TRUE
;
2028 ret
= (*dp
->gc
.gc_interrupt
)(dp
);
2030 if (ret
== DDI_INTR_UNCLAIMED
) {
2031 dp
->intr_busy
= B_FALSE
;
2032 mutex_exit(&dp
->intrlock
);
2036 if (!dp
->mac_active
) {
2037 cv_broadcast(&dp
->tx_drain_cv
);
2042 dp
->intr_busy
= B_FALSE
;
2044 mutex_exit(&dp
->intrlock
);
2046 if (ret
& INTR_RESTART_TX
) {
2047 DPRINTF(4, (CE_CONT
, "!%s: calling mac_tx_update", dp
->name
));
2048 mac_tx_update(dp
->mh
);
2049 ret
&= ~INTR_RESTART_TX
;
2055 gem_intr_watcher(struct gem_dev
*dp
)
2057 (void) gem_intr(dp
);
2059 /* schedule next call of tu_intr_watcher */
2060 dp
->intr_watcher_id
=
2061 timeout((void (*)(void *))gem_intr_watcher
, (void *)dp
, 1);
2064 /* ======================================================================== */
2066 * MII support routines
2068 /* ======================================================================== */
2070 gem_choose_forcedmode(struct gem_dev
*dp
)
2072 /* choose media mode */
2073 if (dp
->anadv_1000fdx
|| dp
->anadv_1000hdx
) {
2074 dp
->speed
= GEM_SPD_1000
;
2075 dp
->full_duplex
= dp
->anadv_1000fdx
;
2076 } else if (dp
->anadv_100fdx
|| dp
->anadv_100t4
) {
2077 dp
->speed
= GEM_SPD_100
;
2078 dp
->full_duplex
= B_TRUE
;
2079 } else if (dp
->anadv_100hdx
) {
2080 dp
->speed
= GEM_SPD_100
;
2081 dp
->full_duplex
= B_FALSE
;
2083 dp
->speed
= GEM_SPD_10
;
2084 dp
->full_duplex
= dp
->anadv_10fdx
;
2089 gem_mii_read(struct gem_dev
*dp
, uint_t reg
)
2091 if ((dp
->mii_status
& MII_STATUS_MFPRMBLSUPR
) == 0) {
2092 (*dp
->gc
.gc_mii_sync
)(dp
);
2094 return ((*dp
->gc
.gc_mii_read
)(dp
, reg
));
2098 gem_mii_write(struct gem_dev
*dp
, uint_t reg
, uint16_t val
)
2100 if ((dp
->mii_status
& MII_STATUS_MFPRMBLSUPR
) == 0) {
2101 (*dp
->gc
.gc_mii_sync
)(dp
);
2103 (*dp
->gc
.gc_mii_write
)(dp
, reg
, val
);
2106 #define fc_cap_decode(x) \
2107 ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \
2108 (((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2111 gem_mii_config_default(struct gem_dev
*dp
)
2115 static uint16_t fc_cap_encode
[4] = {
2117 MII_ABILITY_PAUSE
, /* symmetric */
2118 MII_ABILITY_ASMPAUSE
, /* tx */
2119 MII_ABILITY_PAUSE
| MII_ABILITY_ASMPAUSE
, /* rx-symmetric */
2122 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
2125 * Configure bits in advertisement register
2127 mii_stat
= dp
->mii_status
;
2129 DPRINTF(1, (CE_CONT
, "!%s: %s: MII_STATUS reg:%b",
2130 dp
->name
, __func__
, mii_stat
, MII_STATUS_BITS
));
2132 if ((mii_stat
& MII_STATUS_ABILITY_TECH
) == 0) {
2134 cmn_err(CE_WARN
, "!%s: wrong ability bits: mii_status:%b",
2135 dp
->name
, mii_stat
, MII_STATUS_BITS
);
2136 return (GEM_FAILURE
);
2139 /* Do not change the rest of the ability bits in the advert reg */
2140 val
= gem_mii_read(dp
, MII_AN_ADVERT
) & ~MII_ABILITY_ALL
;
2142 DPRINTF(0, (CE_CONT
,
2143 "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2145 dp
->anadv_100t4
, dp
->anadv_100fdx
, dp
->anadv_100hdx
,
2146 dp
->anadv_10fdx
, dp
->anadv_10hdx
));
2148 if (dp
->anadv_100t4
) {
2149 val
|= MII_ABILITY_100BASE_T4
;
2151 if (dp
->anadv_100fdx
) {
2152 val
|= MII_ABILITY_100BASE_TX_FD
;
2154 if (dp
->anadv_100hdx
) {
2155 val
|= MII_ABILITY_100BASE_TX
;
2157 if (dp
->anadv_10fdx
) {
2158 val
|= MII_ABILITY_10BASE_T_FD
;
2160 if (dp
->anadv_10hdx
) {
2161 val
|= MII_ABILITY_10BASE_T
;
2164 /* set flow control capability */
2165 val
|= fc_cap_encode
[dp
->anadv_flow_control
];
2167 DPRINTF(0, (CE_CONT
,
2168 "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2169 dp
->name
, __func__
, val
, MII_ABILITY_BITS
, dp
->gc
.gc_mii_mode
,
2170 dp
->anadv_flow_control
));
2172 gem_mii_write(dp
, MII_AN_ADVERT
, val
);
2174 if (mii_stat
& MII_STATUS_XSTATUS
) {
2176 * 1000Base-T GMII support
2178 if (!dp
->anadv_autoneg
) {
2179 /* enable manual configuration */
2180 val
= MII_1000TC_CFG_EN
;
2183 if (dp
->anadv_1000fdx
) {
2184 val
|= MII_1000TC_ADV_FULL
;
2186 if (dp
->anadv_1000hdx
) {
2187 val
|= MII_1000TC_ADV_HALF
;
2190 DPRINTF(0, (CE_CONT
,
2191 "!%s: %s: setting MII_1000TC reg:%b",
2192 dp
->name
, __func__
, val
, MII_1000TC_BITS
));
2194 gem_mii_write(dp
, MII_1000TC
, val
);
2197 return (GEM_SUCCESS
);
2200 #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
2201 #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
2203 static uint8_t gem_fc_result
[4 /* my cap */ ][4 /* lp cap */] = {
2204 /* none symm tx rx/symm */
2212 FLOW_CONTROL_SYMMETRIC
,
2214 FLOW_CONTROL_SYMMETRIC
},
2219 FLOW_CONTROL_TX_PAUSE
},
2222 FLOW_CONTROL_SYMMETRIC
,
2223 FLOW_CONTROL_RX_PAUSE
,
2224 FLOW_CONTROL_SYMMETRIC
},
2227 static char *gem_fc_type
[] = {
2235 gem_mii_link_check(struct gem_dev
*dp
)
2237 uint16_t old_mii_state
;
2238 boolean_t tx_sched
= B_FALSE
;
2248 int linkdown_action
;
2249 boolean_t fix_phy
= B_FALSE
;
2251 now
= ddi_get_lbolt();
2252 old_mii_state
= dp
->mii_state
;
2254 DPRINTF(3, (CE_CONT
, "!%s: %s: time:%d state:%d",
2255 dp
->name
, __func__
, now
, dp
->mii_state
));
2257 diff
= now
- dp
->mii_last_check
;
2258 dp
->mii_last_check
= now
;
2261 * For NWAM, don't show linkdown state right
2262 * after the system boots
2264 if (dp
->linkup_delay
> 0) {
2265 if (dp
->linkup_delay
> diff
) {
2266 dp
->linkup_delay
-= diff
;
2268 /* link up timeout */
2269 dp
->linkup_delay
= -1;
2274 switch (dp
->mii_state
) {
2275 case MII_STATE_UNKNOWN
:
2276 /* power-up, DP83840 requires 32 sync bits */
2277 (*dp
->gc
.gc_mii_sync
)(dp
);
2280 case MII_STATE_RESETTING
:
2281 dp
->mii_timer
-= diff
;
2282 if (dp
->mii_timer
> 0) {
2283 /* don't read phy registers in resetting */
2284 dp
->mii_interval
= WATCH_INTERVAL_FAST
;
2288 /* Timer expired, ensure reset bit is not set */
2290 if (dp
->mii_status
& MII_STATUS_MFPRMBLSUPR
) {
2291 /* some phys need sync bits after reset */
2292 (*dp
->gc
.gc_mii_sync
)(dp
);
2294 val
= gem_mii_read(dp
, MII_CONTROL
);
2295 if (val
& MII_CONTROL_RESET
) {
2297 "!%s: time:%ld resetting phy not complete."
2298 " mii_control:0x%b",
2299 dp
->name
, ddi_get_lbolt(),
2300 val
, MII_CONTROL_BITS
);
2303 /* ensure neither isolated nor pwrdown nor auto-nego mode */
2304 /* XXX -- this operation is required for NS DP83840A. */
2305 gem_mii_write(dp
, MII_CONTROL
, 0);
2307 /* As resetting PHY has completed, configure PHY registers */
2308 if ((*dp
->gc
.gc_mii_config
)(dp
) != GEM_SUCCESS
) {
2309 /* we failed to configure PHY. */
2313 /* mii_config may disable autonegatiation */
2314 gem_choose_forcedmode(dp
);
2319 dp
->mii_ctl1000
= 0;
2320 dp
->mii_stat1000
= 0;
2321 dp
->flow_control
= FLOW_CONTROL_NONE
;
2323 if (!dp
->anadv_autoneg
) {
2324 /* skip auto-negotiation phase */
2325 dp
->mii_state
= MII_STATE_MEDIA_SETUP
;
2327 dp
->mii_interval
= 0;
2331 /* Issue auto-negotiation command */
2334 case MII_STATE_AUTONEGOTIATING
:
2336 * Autonegotiation is in progress
2338 dp
->mii_timer
-= diff
;
2340 (dp
->gc
.gc_mii_an_timeout
2341 - dp
->gc
.gc_mii_an_wait
) > 0) {
2343 * wait for a while, typically autonegotiation
2344 * completes in 2.3 - 2.5 sec.
2346 dp
->mii_interval
= WATCH_INTERVAL_FAST
;
2350 /* read PHY status */
2351 status
= gem_mii_read(dp
, MII_STATUS
);
2352 DPRINTF(4, (CE_CONT
,
2353 "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2354 dp
->name
, __func__
, dp
->mii_state
,
2355 status
, MII_STATUS_BITS
));
2357 if (status
& MII_STATUS_REMFAULT
) {
2359 * The link parnert told me something wrong happend.
2363 "!%s: auto-negotiation failed: remote fault",
2368 if ((status
& MII_STATUS_ANDONE
) == 0) {
2369 if (dp
->mii_timer
<= 0) {
2371 * Auto-negotiation was timed out,
2372 * try again w/o resetting phy.
2374 if (!dp
->mii_supress_msg
) {
2376 "!%s: auto-negotiation failed: timeout",
2378 dp
->mii_supress_msg
= B_TRUE
;
2383 * Auto-negotiation is in progress. Wait.
2385 dp
->mii_interval
= dp
->gc
.gc_mii_an_watch_interval
;
2390 * Auto-negotiation have completed.
2391 * Assume linkdown and fall through.
2393 dp
->mii_supress_msg
= B_FALSE
;
2394 dp
->mii_state
= MII_STATE_AN_DONE
;
2395 DPRINTF(0, (CE_CONT
,
2396 "!%s: auto-negotiation completed, MII_STATUS:%b",
2397 dp
->name
, status
, MII_STATUS_BITS
));
2399 if (dp
->gc
.gc_mii_an_delay
> 0) {
2400 dp
->mii_timer
= dp
->gc
.gc_mii_an_delay
;
2401 dp
->mii_interval
= drv_usectohz(20*1000);
2409 case MII_STATE_AN_DONE
:
2411 * Auto-negotiation have done. Now we can set up media.
2413 dp
->mii_timer
-= diff
;
2414 if (dp
->mii_timer
> 0) {
2415 /* wait for a while */
2416 dp
->mii_interval
= WATCH_INTERVAL_FAST
;
2421 * set up the result of auto negotiation
2425 * Read registers required to determin current
2426 * duplex mode and media speed.
2428 if (dp
->gc
.gc_mii_an_delay
> 0) {
2430 * As the link watcher context has been suspended,
2431 * 'status' is invalid. We must status register here
2433 status
= gem_mii_read(dp
, MII_STATUS
);
2435 advert
= gem_mii_read(dp
, MII_AN_ADVERT
);
2436 lpable
= gem_mii_read(dp
, MII_AN_LPABLE
);
2437 exp
= gem_mii_read(dp
, MII_AN_EXPANSION
);
2438 if (exp
== 0xffff) {
2439 /* some phys don't have exp register */
2444 if (dp
->mii_status
& MII_STATUS_XSTATUS
) {
2445 ctl1000
= gem_mii_read(dp
, MII_1000TC
);
2446 stat1000
= gem_mii_read(dp
, MII_1000TS
);
2448 dp
->mii_lpable
= lpable
;
2449 dp
->mii_advert
= advert
;
2451 dp
->mii_ctl1000
= ctl1000
;
2452 dp
->mii_stat1000
= stat1000
;
2455 "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2457 advert
, MII_ABILITY_BITS
,
2458 lpable
, MII_ABILITY_BITS
,
2459 exp
, MII_AN_EXP_BITS
);
2461 if (dp
->mii_status
& MII_STATUS_XSTATUS
) {
2463 "! MII_1000TC:%b, MII_1000TS:%b",
2464 ctl1000
, MII_1000TC_BITS
,
2465 stat1000
, MII_1000TS_BITS
);
2468 if (gem_population(lpable
) <= 1 &&
2469 (exp
& MII_AN_EXP_LPCANAN
) == 0) {
2470 if ((advert
& MII_ABILITY_TECH
) != lpable
) {
2472 "!%s: but the link partnar doesn't seem"
2473 " to have auto-negotiation capability."
2474 " please check the link configuration.",
2478 * it should be result of parallel detection, which
2479 * cannot detect duplex mode.
2481 if (lpable
& MII_ABILITY_100BASE_TX
) {
2483 * we prefer full duplex mode for 100Mbps
2484 * connection, if we can.
2486 lpable
|= advert
& MII_ABILITY_100BASE_TX_FD
;
2489 if ((advert
& lpable
) == 0 &&
2490 lpable
& MII_ABILITY_10BASE_T
) {
2491 lpable
|= advert
& MII_ABILITY_10BASE_T_FD
;
2494 * as the link partnar isn't auto-negotiatable, use
2495 * fixed mode temporally.
2498 } else if (lpable
== 0) {
2499 cmn_err(CE_WARN
, "!%s: wrong lpable.", dp
->name
);
2503 * configure current link mode according to AN priority.
2505 val
= advert
& lpable
;
2506 if ((ctl1000
& MII_1000TC_ADV_FULL
) &&
2507 (stat1000
& MII_1000TS_LP_FULL
)) {
2508 /* 1000BaseT & full duplex */
2509 dp
->speed
= GEM_SPD_1000
;
2510 dp
->full_duplex
= B_TRUE
;
2511 } else if ((ctl1000
& MII_1000TC_ADV_HALF
) &&
2512 (stat1000
& MII_1000TS_LP_HALF
)) {
2513 /* 1000BaseT & half duplex */
2514 dp
->speed
= GEM_SPD_1000
;
2515 dp
->full_duplex
= B_FALSE
;
2516 } else if (val
& MII_ABILITY_100BASE_TX_FD
) {
2517 /* 100BaseTx & full duplex */
2518 dp
->speed
= GEM_SPD_100
;
2519 dp
->full_duplex
= B_TRUE
;
2520 } else if (val
& MII_ABILITY_100BASE_T4
) {
2521 /* 100BaseT4 & full duplex */
2522 dp
->speed
= GEM_SPD_100
;
2523 dp
->full_duplex
= B_TRUE
;
2524 } else if (val
& MII_ABILITY_100BASE_TX
) {
2525 /* 100BaseTx & half duplex */
2526 dp
->speed
= GEM_SPD_100
;
2527 dp
->full_duplex
= B_FALSE
;
2528 } else if (val
& MII_ABILITY_10BASE_T_FD
) {
2529 /* 10BaseT & full duplex */
2530 dp
->speed
= GEM_SPD_10
;
2531 dp
->full_duplex
= B_TRUE
;
2532 } else if (val
& MII_ABILITY_10BASE_T
) {
2533 /* 10BaseT & half duplex */
2534 dp
->speed
= GEM_SPD_10
;
2535 dp
->full_duplex
= B_FALSE
;
2538 * It seems that the link partnar doesn't have
2539 * auto-negotiation capability and our PHY
2540 * could not report the correct current mode.
2541 * We guess current mode by mii_control register.
2543 val
= gem_mii_read(dp
, MII_CONTROL
);
2545 /* select 100m full or 10m half */
2546 dp
->speed
= (val
& MII_CONTROL_100MB
) ?
2547 GEM_SPD_100
: GEM_SPD_10
;
2548 dp
->full_duplex
= dp
->speed
!= GEM_SPD_10
;
2552 "!%s: auto-negotiation done but "
2553 "common ability not found.\n"
2554 "PHY state: control:%b advert:%b lpable:%b\n"
2555 "guessing %d Mbps %s duplex mode",
2557 val
, MII_CONTROL_BITS
,
2558 advert
, MII_ABILITY_BITS
,
2559 lpable
, MII_ABILITY_BITS
,
2560 gem_speed_value
[dp
->speed
],
2561 dp
->full_duplex
? "full" : "half");
2564 if (dp
->full_duplex
) {
2566 gem_fc_result
[fc_cap_decode(advert
)]
2567 [fc_cap_decode(lpable
)];
2569 dp
->flow_control
= FLOW_CONTROL_NONE
;
2571 dp
->mii_state
= MII_STATE_MEDIA_SETUP
;
2574 case MII_STATE_MEDIA_SETUP
:
2575 dp
->mii_state
= MII_STATE_LINKDOWN
;
2576 dp
->mii_timer
= dp
->gc
.gc_mii_linkdown_timeout
;
2577 DPRINTF(2, (CE_CONT
, "!%s: setup midia mode done", dp
->name
));
2578 dp
->mii_supress_msg
= B_FALSE
;
2580 /* use short interval */
2581 dp
->mii_interval
= WATCH_INTERVAL_FAST
;
2583 if ((!dp
->anadv_autoneg
) ||
2584 dp
->gc
.gc_mii_an_oneshot
|| fix_phy
) {
2587 * write specified mode to phy.
2589 val
= gem_mii_read(dp
, MII_CONTROL
);
2590 val
&= ~(MII_CONTROL_SPEED
| MII_CONTROL_FDUPLEX
|
2591 MII_CONTROL_ANE
| MII_CONTROL_RSAN
);
2593 if (dp
->full_duplex
) {
2594 val
|= MII_CONTROL_FDUPLEX
;
2597 switch (dp
->speed
) {
2599 val
|= MII_CONTROL_1000MB
;
2603 val
|= MII_CONTROL_100MB
;
2607 cmn_err(CE_WARN
, "%s: unknown speed:%d",
2608 dp
->name
, dp
->speed
);
2611 /* for GEM_SPD_10, do nothing */
2615 if (dp
->mii_status
& MII_STATUS_XSTATUS
) {
2617 MII_1000TC
, MII_1000TC_CFG_EN
);
2619 gem_mii_write(dp
, MII_CONTROL
, val
);
2622 if (dp
->nic_state
>= NIC_STATE_INITIALIZED
) {
2623 /* notify the result of auto-negotiation to mac */
2624 (*dp
->gc
.gc_set_media
)(dp
);
2627 if ((void *)dp
->gc
.gc_mii_tune_phy
) {
2628 /* for built-in sis900 */
2629 /* XXX - this code should be removed. */
2630 (*dp
->gc
.gc_mii_tune_phy
)(dp
);
2635 case MII_STATE_LINKDOWN
:
2636 status
= gem_mii_read(dp
, MII_STATUS
);
2637 if (status
& MII_STATUS_LINKUP
) {
2641 dp
->mii_state
= MII_STATE_LINKUP
;
2642 dp
->mii_supress_msg
= B_FALSE
;
2644 DPRINTF(0, (CE_CONT
,
2645 "!%s: link up detected: mii_stat:%b",
2646 dp
->name
, status
, MII_STATUS_BITS
));
2649 * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
2650 * ignored when MII_CONTROL_ANE is set.
2653 "!%s: Link up: %d Mbps %s duplex %s flow control",
2655 gem_speed_value
[dp
->speed
],
2656 dp
->full_duplex
? "full" : "half",
2657 gem_fc_type
[dp
->flow_control
]);
2659 dp
->mii_interval
= dp
->gc
.gc_mii_link_watch_interval
;
2661 /* XXX - we need other timer to watch statictics */
2662 if (dp
->gc
.gc_mii_hw_link_detection
&&
2663 dp
->nic_state
== NIC_STATE_ONLINE
) {
2664 dp
->mii_interval
= 0;
2667 if (dp
->nic_state
== NIC_STATE_ONLINE
) {
2668 if (!dp
->mac_active
) {
2669 (void) gem_mac_start(dp
);
2676 dp
->mii_supress_msg
= B_TRUE
;
2677 if (dp
->anadv_autoneg
) {
2678 dp
->mii_timer
-= diff
;
2679 if (dp
->mii_timer
<= 0) {
2681 * link down timer expired.
2682 * need to restart auto-negotiation.
2685 dp
->gc
.gc_mii_linkdown_timeout_action
;
2686 goto restart_autonego
;
2689 /* don't change mii_state */
2692 case MII_STATE_LINKUP
:
2693 status
= gem_mii_read(dp
, MII_STATUS
);
2694 if ((status
& MII_STATUS_LINKUP
) == 0) {
2699 "!%s: link down detected: mii_stat:%b",
2700 dp
->name
, status
, MII_STATUS_BITS
);
2702 if (dp
->nic_state
== NIC_STATE_ONLINE
&&
2704 dp
->gc
.gc_mii_stop_mac_on_linkdown
) {
2705 (void) gem_mac_stop(dp
, 0);
2707 if (dp
->tx_blocked
) {
2713 if (dp
->anadv_autoneg
) {
2714 /* need to restart auto-negotiation */
2715 linkdown_action
= dp
->gc
.gc_mii_linkdown_action
;
2716 goto restart_autonego
;
2719 dp
->mii_state
= MII_STATE_LINKDOWN
;
2720 dp
->mii_timer
= dp
->gc
.gc_mii_linkdown_timeout
;
2722 if ((void *)dp
->gc
.gc_mii_tune_phy
) {
2723 /* for built-in sis900 */
2724 (*dp
->gc
.gc_mii_tune_phy
)(dp
);
2726 dp
->mii_interval
= dp
->gc
.gc_mii_link_watch_interval
;
2730 /* don't change mii_state */
2731 if (dp
->gc
.gc_mii_hw_link_detection
&&
2732 dp
->nic_state
== NIC_STATE_ONLINE
) {
2733 dp
->mii_interval
= 0;
2738 dp
->mii_interval
= dp
->gc
.gc_mii_link_watch_interval
;
2741 /* Actions on the end of state routine */
2744 switch (linkdown_action
) {
2745 case MII_ACTION_RESET
:
2746 if (!dp
->mii_supress_msg
) {
2747 cmn_err(CE_CONT
, "!%s: resetting PHY", dp
->name
);
2749 dp
->mii_supress_msg
= B_TRUE
;
2752 case MII_ACTION_NONE
:
2753 dp
->mii_supress_msg
= B_TRUE
;
2754 if (dp
->gc
.gc_mii_an_oneshot
) {
2757 /* PHY will restart autonego automatically */
2758 dp
->mii_state
= MII_STATE_AUTONEGOTIATING
;
2759 dp
->mii_timer
= dp
->gc
.gc_mii_an_timeout
;
2760 dp
->mii_interval
= dp
->gc
.gc_mii_an_watch_interval
;
2763 case MII_ACTION_RSA
:
2764 if (!dp
->mii_supress_msg
) {
2765 cmn_err(CE_CONT
, "!%s: restarting auto-negotiation",
2768 dp
->mii_supress_msg
= B_TRUE
;
2772 cmn_err(CE_WARN
, "!%s: unknowm linkdown action: %d",
2773 dp
->name
, dp
->gc
.gc_mii_linkdown_action
);
2774 dp
->mii_supress_msg
= B_TRUE
;
2779 if (!dp
->mii_supress_msg
) {
2780 cmn_err(CE_CONT
, "!%s: resetting PHY", dp
->name
);
2782 dp
->mii_state
= MII_STATE_RESETTING
;
2783 dp
->mii_timer
= dp
->gc
.gc_mii_reset_timeout
;
2784 if (!dp
->gc
.gc_mii_dont_reset
) {
2785 gem_mii_write(dp
, MII_CONTROL
, MII_CONTROL_RESET
);
2787 dp
->mii_interval
= WATCH_INTERVAL_FAST
;
2791 if (!dp
->mii_supress_msg
) {
2792 cmn_err(CE_CONT
, "!%s: auto-negotiation started", dp
->name
);
2794 dp
->mii_state
= MII_STATE_AUTONEGOTIATING
;
2795 dp
->mii_timer
= dp
->gc
.gc_mii_an_timeout
;
2797 /* start/restart auto nego */
2798 val
= gem_mii_read(dp
, MII_CONTROL
) &
2799 ~(MII_CONTROL_ISOLATE
| MII_CONTROL_PWRDN
| MII_CONTROL_RESET
);
2801 gem_mii_write(dp
, MII_CONTROL
,
2802 val
| MII_CONTROL_RSAN
| MII_CONTROL_ANE
);
2804 dp
->mii_interval
= dp
->gc
.gc_mii_an_watch_interval
;
2807 if (dp
->link_watcher_id
== 0 && dp
->mii_interval
) {
2808 /* we must schedule next mii_watcher */
2809 dp
->link_watcher_id
=
2810 timeout((void (*)(void *))&gem_mii_link_watcher
,
2811 (void *)dp
, dp
->mii_interval
);
2814 if (old_mii_state
!= dp
->mii_state
) {
2815 /* notify new mii link state */
2816 if (dp
->mii_state
== MII_STATE_LINKUP
) {
2817 dp
->linkup_delay
= 0;
2819 } else if (dp
->linkup_delay
<= 0) {
2822 } else if (dp
->linkup_delay
< 0) {
2823 /* first linkup timeout */
2824 dp
->linkup_delay
= 0;
2832 gem_mii_link_watcher(struct gem_dev
*dp
)
2836 mutex_enter(&dp
->intrlock
);
2838 dp
->link_watcher_id
= 0;
2839 tx_sched
= gem_mii_link_check(dp
);
2840 #if GEM_DEBUG_LEVEL > 2
2841 if (dp
->link_watcher_id
== 0) {
2842 cmn_err(CE_CONT
, "%s: link watcher stopped", dp
->name
);
2845 mutex_exit(&dp
->intrlock
);
2848 /* kick potentially stopped downstream */
2849 mac_tx_update(dp
->mh
);
2854 gem_mii_probe_default(struct gem_dev
*dp
)
2861 DPRINTF(3, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
2866 /* ensure to send sync bits */
2869 /* Try default phy first */
2870 if (dp
->mii_phy_addr
) {
2871 status
= gem_mii_read(dp
, MII_STATUS
);
2872 if (status
!= 0xffff && status
!= 0) {
2873 gem_mii_write(dp
, MII_CONTROL
, 0);
2877 if (dp
->mii_phy_addr
< 0) {
2879 "!%s: failed to probe default internal and/or non-MII PHY",
2881 return (GEM_FAILURE
);
2885 "!%s: failed to probe default MII PHY at %d",
2886 dp
->name
, dp
->mii_phy_addr
);
2889 /* Try all possible address */
2890 for (phy
= dp
->gc
.gc_mii_addr_min
; phy
< 32; phy
++) {
2891 dp
->mii_phy_addr
= phy
;
2892 status
= gem_mii_read(dp
, MII_STATUS
);
2894 if (status
!= 0xffff && status
!= 0) {
2895 gem_mii_write(dp
, MII_CONTROL
, 0);
2900 for (phy
= dp
->gc
.gc_mii_addr_min
; phy
< 32; phy
++) {
2901 dp
->mii_phy_addr
= phy
;
2902 gem_mii_write(dp
, MII_CONTROL
, 0);
2903 status
= gem_mii_read(dp
, MII_STATUS
);
2905 if (status
!= 0xffff && status
!= 0) {
2910 cmn_err(CE_NOTE
, "!%s: no MII PHY found", dp
->name
);
2911 dp
->mii_phy_addr
= -1;
2913 return (GEM_FAILURE
);
2916 dp
->mii_status
= status
;
2917 dp
->mii_phy_id
= (gem_mii_read(dp
, MII_PHYIDH
) << 16) |
2918 gem_mii_read(dp
, MII_PHYIDL
);
2920 if (dp
->mii_phy_addr
< 0) {
2921 cmn_err(CE_CONT
, "!%s: using internal/non-MII PHY(0x%08x)",
2922 dp
->name
, dp
->mii_phy_id
);
2924 cmn_err(CE_CONT
, "!%s: MII PHY (0x%08x) found at %d",
2925 dp
->name
, dp
->mii_phy_id
, dp
->mii_phy_addr
);
2928 cmn_err(CE_CONT
, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2930 gem_mii_read(dp
, MII_CONTROL
), MII_CONTROL_BITS
,
2931 status
, MII_STATUS_BITS
,
2932 gem_mii_read(dp
, MII_AN_ADVERT
), MII_ABILITY_BITS
,
2933 gem_mii_read(dp
, MII_AN_LPABLE
), MII_ABILITY_BITS
);
2935 dp
->mii_xstatus
= 0;
2936 if (status
& MII_STATUS_XSTATUS
) {
2937 dp
->mii_xstatus
= gem_mii_read(dp
, MII_XSTATUS
);
2939 cmn_err(CE_CONT
, "!%s: xstatus:%b",
2940 dp
->name
, dp
->mii_xstatus
, MII_XSTATUS_BITS
);
2943 /* check if the phy can advertize pause abilities */
2944 adv_org
= gem_mii_read(dp
, MII_AN_ADVERT
);
2946 gem_mii_write(dp
, MII_AN_ADVERT
,
2947 MII_ABILITY_PAUSE
| MII_ABILITY_ASMPAUSE
);
2949 adv
= gem_mii_read(dp
, MII_AN_ADVERT
);
2951 if ((adv
& MII_ABILITY_PAUSE
) == 0) {
2952 dp
->gc
.gc_flow_control
&= ~1;
2955 if ((adv
& MII_ABILITY_ASMPAUSE
) == 0) {
2956 dp
->gc
.gc_flow_control
&= ~2;
2959 gem_mii_write(dp
, MII_AN_ADVERT
, adv_org
);
2961 return (GEM_SUCCESS
);
2965 gem_mii_start(struct gem_dev
*dp
)
2967 DPRINTF(3, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
2969 /* make a first call of check link */
2970 dp
->mii_state
= MII_STATE_UNKNOWN
;
2971 dp
->mii_last_check
= ddi_get_lbolt();
2972 dp
->linkup_delay
= dp
->gc
.gc_mii_linkdown_timeout
;
2973 (void) gem_mii_link_watcher(dp
);
2977 gem_mii_stop(struct gem_dev
*dp
)
2979 DPRINTF(3, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
2981 /* Ensure timer routine stopped */
2982 mutex_enter(&dp
->intrlock
);
2983 if (dp
->link_watcher_id
) {
2984 while (untimeout(dp
->link_watcher_id
) == -1)
2986 dp
->link_watcher_id
= 0;
2988 mutex_exit(&dp
->intrlock
);
2992 gem_get_mac_addr_conf(struct gem_dev
*dp
)
2996 uint8_t mac
[ETHERADDRL
];
3005 DPRINTF(3, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
3007 * Get ethernet address from .conf file
3009 (void) sprintf(propname
, "mac-addr");
3010 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY
, dp
->dip
,
3011 DDI_PROP_DONTPASS
, propname
, &valstr
)) !=
3016 if (strlen(valstr
) != ETHERADDRL
*3-1) {
3025 for (i
= 0; i
< 2; i
++) {
3028 if (c
>= 'a' && c
<= 'f') {
3030 } else if (c
>= 'A' && c
<= 'F') {
3032 } else if (c
>= '0' && c
<= '9') {
3042 if (j
== ETHERADDRL
) {
3056 for (i
= 0; i
< ETHERADDRL
; i
++) {
3057 dp
->dev_addr
.ether_addr_octet
[i
] = mac
[i
];
3059 ddi_prop_free(valstr
);
3064 "!%s: read mac addr: trying .conf: syntax err %s",
3067 ddi_prop_free(valstr
);
3073 /* ============================================================== */
3075 * internal start/stop interface
3077 /* ============================================================== */
3079 gem_mac_set_rx_filter(struct gem_dev
*dp
)
3081 return ((*dp
->gc
.gc_set_rx_filter
)(dp
));
3085 * gem_mac_init: cold start
3088 gem_mac_init(struct gem_dev
*dp
)
3090 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
3092 if (dp
->mac_suspended
) {
3093 return (GEM_FAILURE
);
3096 dp
->mac_active
= B_FALSE
;
3098 gem_init_rx_ring(dp
);
3099 gem_init_tx_ring(dp
);
3101 /* reset transmitter state */
3102 dp
->tx_blocked
= (clock_t)0;
3104 dp
->tx_reclaim_busy
= 0;
3105 dp
->tx_max_packets
= dp
->gc
.gc_tx_buf_limit
;
3107 if ((*dp
->gc
.gc_init_chip
)(dp
) != GEM_SUCCESS
) {
3108 return (GEM_FAILURE
);
3111 gem_prepare_rx_buf(dp
);
3113 return (GEM_SUCCESS
);
3116 * gem_mac_start: warm start
3119 gem_mac_start(struct gem_dev
*dp
)
3121 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
3123 ASSERT(mutex_owned(&dp
->intrlock
));
3124 ASSERT(dp
->nic_state
== NIC_STATE_ONLINE
);
3125 ASSERT(dp
->mii_state
== MII_STATE_LINKUP
);
3127 /* enable tx and rx */
3128 mutex_enter(&dp
->xmitlock
);
3129 if (dp
->mac_suspended
) {
3130 mutex_exit(&dp
->xmitlock
);
3131 return (GEM_FAILURE
);
3133 dp
->mac_active
= B_TRUE
;
3134 mutex_exit(&dp
->xmitlock
);
3136 /* setup rx buffers */
3137 (*dp
->gc
.gc_rx_start
)(dp
,
3138 SLOT(dp
->rx_active_head
, dp
->gc
.gc_rx_ring_size
),
3139 dp
->rx_active_tail
- dp
->rx_active_head
);
3141 if ((*dp
->gc
.gc_start_chip
)(dp
) != GEM_SUCCESS
) {
3142 cmn_err(CE_WARN
, "%s: %s: start_chip: failed",
3143 dp
->name
, __func__
);
3144 return (GEM_FAILURE
);
3147 mutex_enter(&dp
->xmitlock
);
3149 /* load untranmitted packets to the nic */
3150 ASSERT(dp
->tx_softq_tail
- dp
->tx_softq_head
>= 0);
3151 if (dp
->tx_softq_tail
- dp
->tx_softq_head
> 0) {
3152 gem_tx_load_descs_oo(dp
,
3153 dp
->tx_softq_head
, dp
->tx_softq_tail
,
3155 /* issue preloaded tx buffers */
3156 gem_tx_start_unit(dp
);
3159 mutex_exit(&dp
->xmitlock
);
3161 return (GEM_SUCCESS
);
3165 gem_mac_stop(struct gem_dev
*dp
, uint_t flags
)
3168 int wait_time
; /* in uS */
3169 #ifdef GEM_DEBUG_LEVEL
3172 int ret
= GEM_SUCCESS
;
3174 DPRINTF(1, (CE_CONT
, "!%s: %s: called, rx_buf_free:%d",
3175 dp
->name
, __func__
, dp
->rx_buf_freecnt
));
3177 ASSERT(mutex_owned(&dp
->intrlock
));
3178 ASSERT(!mutex_owned(&dp
->xmitlock
));
3183 mutex_enter(&dp
->xmitlock
);
3184 if (dp
->mac_suspended
) {
3185 mutex_exit(&dp
->xmitlock
);
3186 return (GEM_SUCCESS
);
3188 dp
->mac_active
= B_FALSE
;
3190 while (dp
->tx_busy
> 0) {
3191 cv_wait(&dp
->tx_drain_cv
, &dp
->xmitlock
);
3193 mutex_exit(&dp
->xmitlock
);
3195 if ((flags
& GEM_RESTART_NOWAIT
) == 0) {
3197 * Wait for all tx buffers sent.
3200 2 * (8 * MAXPKTBUF(dp
) / gem_speed_value
[dp
->speed
]) *
3201 (dp
->tx_active_tail
- dp
->tx_active_head
);
3203 DPRINTF(0, (CE_CONT
, "%s: %s: max drain time: %d uS",
3204 dp
->name
, __func__
, wait_time
));
3206 #ifdef GEM_DEBUG_LEVEL
3207 now
= ddi_get_lbolt();
3209 while (dp
->tx_active_tail
!= dp
->tx_active_head
) {
3210 if (i
> wait_time
) {
3212 cmn_err(CE_NOTE
, "%s: %s timeout: tx drain",
3213 dp
->name
, __func__
);
3216 (void) gem_reclaim_txbuf(dp
);
3220 DPRINTF(0, (CE_NOTE
,
3221 "!%s: %s: the nic have drained in %d uS, real %d mS",
3222 dp
->name
, __func__
, i
,
3223 10*((int)(ddi_get_lbolt() - now
))));
3227 * Now we can stop the nic safely.
3229 if ((*dp
->gc
.gc_stop_chip
)(dp
) != GEM_SUCCESS
) {
3230 cmn_err(CE_NOTE
, "%s: %s: resetting the chip to stop it",
3231 dp
->name
, __func__
);
3232 if ((*dp
->gc
.gc_reset_chip
)(dp
) != GEM_SUCCESS
) {
3233 cmn_err(CE_WARN
, "%s: %s: failed to reset chip",
3234 dp
->name
, __func__
);
3239 * Clear all rx buffers
3241 if (flags
& GEM_RESTART_KEEP_BUF
) {
3242 (void) gem_receive(dp
);
3244 gem_clean_rx_buf(dp
);
3247 * Update final statistics
3249 (*dp
->gc
.gc_get_stats
)(dp
);
3252 * Clear all pended tx packets
3254 ASSERT(dp
->tx_active_tail
== dp
->tx_softq_head
);
3255 ASSERT(dp
->tx_softq_tail
== dp
->tx_free_head
);
3256 if (flags
& GEM_RESTART_KEEP_BUF
) {
3257 /* restore active tx buffers */
3258 dp
->tx_active_tail
= dp
->tx_active_head
;
3259 dp
->tx_softq_head
= dp
->tx_active_head
;
3261 gem_clean_tx_buf(dp
);
3268 gem_add_multicast(struct gem_dev
*dp
, const uint8_t *ep
)
3273 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
3275 mutex_enter(&dp
->intrlock
);
3276 if (dp
->mac_suspended
) {
3277 mutex_exit(&dp
->intrlock
);
3278 return (GEM_FAILURE
);
3281 if (dp
->mc_count_req
++ < GEM_MAXMC
) {
3282 /* append the new address at the end of the mclist */
3284 bcopy(ep
, dp
->mc_list
[cnt
].addr
.ether_addr_octet
,
3286 if (dp
->gc
.gc_multicast_hash
) {
3287 dp
->mc_list
[cnt
].hash
=
3288 (*dp
->gc
.gc_multicast_hash
)(dp
, (uint8_t *)ep
);
3290 dp
->mc_count
= cnt
+ 1;
3293 if (dp
->mc_count_req
!= dp
->mc_count
) {
3294 /* multicast address list overflow */
3295 dp
->rxmode
|= RXMODE_MULTI_OVF
;
3297 dp
->rxmode
&= ~RXMODE_MULTI_OVF
;
3300 /* tell new multicast list to the hardware */
3301 err
= gem_mac_set_rx_filter(dp
);
3303 mutex_exit(&dp
->intrlock
);
3309 gem_remove_multicast(struct gem_dev
*dp
, const uint8_t *ep
)
3316 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
3318 mutex_enter(&dp
->intrlock
);
3319 if (dp
->mac_suspended
) {
3320 mutex_exit(&dp
->intrlock
);
3321 return (GEM_FAILURE
);
3326 for (i
= 0; i
< cnt
; i
++) {
3327 if (bcmp(ep
, &dp
->mc_list
[i
].addr
, ETHERADDRL
)) {
3330 /* shrink the mclist by copying forward */
3331 len
= (cnt
- (i
+ 1)) * sizeof (*dp
->mc_list
);
3333 bcopy(&dp
->mc_list
[i
+1], &dp
->mc_list
[i
], len
);
3339 if (dp
->mc_count_req
!= dp
->mc_count
) {
3340 /* multicast address list overflow */
3341 dp
->rxmode
|= RXMODE_MULTI_OVF
;
3343 dp
->rxmode
&= ~RXMODE_MULTI_OVF
;
3345 /* In gem v2, don't hold xmitlock on calling set_rx_filter */
3346 err
= gem_mac_set_rx_filter(dp
);
3348 mutex_exit(&dp
->intrlock
);
3353 /* ============================================================== */
3357 /* ============================================================== */
3361 PARAM_ASYM_PAUSE_CAP
,
3370 PARAM_ADV_AUTONEG_CAP
,
3371 PARAM_ADV_PAUSE_CAP
,
3372 PARAM_ADV_ASYM_PAUSE_CAP
,
3373 PARAM_ADV_1000FDX_CAP
,
3374 PARAM_ADV_1000HDX_CAP
,
3375 PARAM_ADV_100T4_CAP
,
3376 PARAM_ADV_100FDX_CAP
,
3377 PARAM_ADV_100HDX_CAP
,
3378 PARAM_ADV_10FDX_CAP
,
3379 PARAM_ADV_10HDX_CAP
,
3381 PARAM_LP_AUTONEG_CAP
,
3383 PARAM_LP_ASYM_PAUSE_CAP
,
3384 PARAM_LP_1000FDX_CAP
,
3385 PARAM_LP_1000HDX_CAP
,
3387 PARAM_LP_100FDX_CAP
,
3388 PARAM_LP_100HDX_CAP
,
3397 PARAM_LINK_RX_PAUSE
,
3398 PARAM_LINK_TX_PAUSE
,
3410 IOC_INVAL
= -1, /* bad, NAK with EINVAL */
3411 IOC_DONE
, /* OK, reply sent */
3412 IOC_ACK
, /* OK, just send ACK */
3413 IOC_REPLY
, /* OK, just send reply */
3414 IOC_RESTART_ACK
, /* OK, restart & ACK */
3415 IOC_RESTART_REPLY
/* OK, restart & reply */
3424 gem_param_get(queue_t
*q
, mblk_t
*mp
, caddr_t arg
, cred_t
*credp
)
3426 struct gem_dev
*dp
= ((struct gem_nd_arg
*)(void *)arg
)->dp
;
3427 int item
= ((struct gem_nd_arg
*)(void *)arg
)->item
;
3430 DPRINTF(0, (CE_CONT
, "!%s: %s: called, item:%d",
3431 dp
->name
, __func__
, item
));
3434 case PARAM_AUTONEG_CAP
:
3435 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_CANAUTONEG
);
3436 DPRINTF(0, (CE_CONT
, "autoneg_cap:%d", val
));
3439 case PARAM_PAUSE_CAP
:
3440 val
= BOOLEAN(dp
->gc
.gc_flow_control
& 1);
3443 case PARAM_ASYM_PAUSE_CAP
:
3444 val
= BOOLEAN(dp
->gc
.gc_flow_control
& 2);
3447 case PARAM_1000FDX_CAP
:
3448 val
= (dp
->mii_xstatus
& MII_XSTATUS_1000BASET_FD
) ||
3449 (dp
->mii_xstatus
& MII_XSTATUS_1000BASEX_FD
);
3452 case PARAM_1000HDX_CAP
:
3453 val
= (dp
->mii_xstatus
& MII_XSTATUS_1000BASET
) ||
3454 (dp
->mii_xstatus
& MII_XSTATUS_1000BASEX
);
3457 case PARAM_100T4_CAP
:
3458 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASE_T4
);
3461 case PARAM_100FDX_CAP
:
3462 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASEX_FD
);
3465 case PARAM_100HDX_CAP
:
3466 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASEX
);
3469 case PARAM_10FDX_CAP
:
3470 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_10_FD
);
3473 case PARAM_10HDX_CAP
:
3474 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_10
);
3477 case PARAM_ADV_AUTONEG_CAP
:
3478 val
= dp
->anadv_autoneg
;
3481 case PARAM_ADV_PAUSE_CAP
:
3482 val
= BOOLEAN(dp
->anadv_flow_control
& 1);
3485 case PARAM_ADV_ASYM_PAUSE_CAP
:
3486 val
= BOOLEAN(dp
->anadv_flow_control
& 2);
3489 case PARAM_ADV_1000FDX_CAP
:
3490 val
= dp
->anadv_1000fdx
;
3493 case PARAM_ADV_1000HDX_CAP
:
3494 val
= dp
->anadv_1000hdx
;
3497 case PARAM_ADV_100T4_CAP
:
3498 val
= dp
->anadv_100t4
;
3501 case PARAM_ADV_100FDX_CAP
:
3502 val
= dp
->anadv_100fdx
;
3505 case PARAM_ADV_100HDX_CAP
:
3506 val
= dp
->anadv_100hdx
;
3509 case PARAM_ADV_10FDX_CAP
:
3510 val
= dp
->anadv_10fdx
;
3513 case PARAM_ADV_10HDX_CAP
:
3514 val
= dp
->anadv_10hdx
;
3517 case PARAM_LP_AUTONEG_CAP
:
3518 val
= BOOLEAN(dp
->mii_exp
& MII_AN_EXP_LPCANAN
);
3521 case PARAM_LP_PAUSE_CAP
:
3522 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_PAUSE
);
3525 case PARAM_LP_ASYM_PAUSE_CAP
:
3526 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_ASMPAUSE
);
3529 case PARAM_LP_1000FDX_CAP
:
3530 val
= BOOLEAN(dp
->mii_stat1000
& MII_1000TS_LP_FULL
);
3533 case PARAM_LP_1000HDX_CAP
:
3534 val
= BOOLEAN(dp
->mii_stat1000
& MII_1000TS_LP_HALF
);
3537 case PARAM_LP_100T4_CAP
:
3538 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_100BASE_T4
);
3541 case PARAM_LP_100FDX_CAP
:
3542 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_100BASE_TX_FD
);
3545 case PARAM_LP_100HDX_CAP
:
3546 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_100BASE_TX
);
3549 case PARAM_LP_10FDX_CAP
:
3550 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_10BASE_T_FD
);
3553 case PARAM_LP_10HDX_CAP
:
3554 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_10BASE_T
);
3557 case PARAM_LINK_STATUS
:
3558 val
= (dp
->mii_state
== MII_STATE_LINKUP
);
3561 case PARAM_LINK_SPEED
:
3562 val
= gem_speed_value
[dp
->speed
];
3565 case PARAM_LINK_DUPLEX
:
3567 if (dp
->mii_state
== MII_STATE_LINKUP
) {
3568 val
= dp
->full_duplex
? 2 : 1;
3572 case PARAM_LINK_AUTONEG
:
3573 val
= BOOLEAN(dp
->mii_exp
& MII_AN_EXP_LPCANAN
);
3576 case PARAM_LINK_RX_PAUSE
:
3577 val
= (dp
->flow_control
== FLOW_CONTROL_SYMMETRIC
) ||
3578 (dp
->flow_control
== FLOW_CONTROL_RX_PAUSE
);
3581 case PARAM_LINK_TX_PAUSE
:
3582 val
= (dp
->flow_control
== FLOW_CONTROL_SYMMETRIC
) ||
3583 (dp
->flow_control
== FLOW_CONTROL_TX_PAUSE
);
3587 case PARAM_RESUME_TEST
:
3592 cmn_err(CE_WARN
, "%s: unimplemented ndd control (%d)",
3597 (void) mi_mpprintf(mp
, "%ld", val
);
3603 gem_param_set(queue_t
*q
, mblk_t
*mp
, char *value
, caddr_t arg
, cred_t
*credp
)
3605 struct gem_dev
*dp
= ((struct gem_nd_arg
*)(void *)arg
)->dp
;
3606 int item
= ((struct gem_nd_arg
*)(void *)arg
)->item
;
3610 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
3611 if (ddi_strtol(value
, &end
, 10, &val
)) {
3619 case PARAM_ADV_AUTONEG_CAP
:
3620 if (val
!= 0 && val
!= 1) {
3623 if (val
&& (dp
->mii_status
& MII_STATUS_CANAUTONEG
) == 0) {
3626 dp
->anadv_autoneg
= (int)val
;
3629 case PARAM_ADV_PAUSE_CAP
:
3630 if (val
!= 0 && val
!= 1) {
3634 dp
->anadv_flow_control
|= 1;
3636 dp
->anadv_flow_control
&= ~1;
3640 case PARAM_ADV_ASYM_PAUSE_CAP
:
3641 if (val
!= 0 && val
!= 1) {
3645 dp
->anadv_flow_control
|= 2;
3647 dp
->anadv_flow_control
&= ~2;
3651 case PARAM_ADV_1000FDX_CAP
:
3652 if (val
!= 0 && val
!= 1) {
3655 if (val
&& (dp
->mii_xstatus
&
3656 (MII_XSTATUS_1000BASET_FD
|
3657 MII_XSTATUS_1000BASEX_FD
)) == 0) {
3660 dp
->anadv_1000fdx
= (int)val
;
3663 case PARAM_ADV_1000HDX_CAP
:
3664 if (val
!= 0 && val
!= 1) {
3667 if (val
&& (dp
->mii_xstatus
&
3668 (MII_XSTATUS_1000BASET
| MII_XSTATUS_1000BASEX
)) == 0) {
3671 dp
->anadv_1000hdx
= (int)val
;
3674 case PARAM_ADV_100T4_CAP
:
3675 if (val
!= 0 && val
!= 1) {
3678 if (val
&& (dp
->mii_status
& MII_STATUS_100_BASE_T4
) == 0) {
3681 dp
->anadv_100t4
= (int)val
;
3684 case PARAM_ADV_100FDX_CAP
:
3685 if (val
!= 0 && val
!= 1) {
3688 if (val
&& (dp
->mii_status
& MII_STATUS_100_BASEX_FD
) == 0) {
3691 dp
->anadv_100fdx
= (int)val
;
3694 case PARAM_ADV_100HDX_CAP
:
3695 if (val
!= 0 && val
!= 1) {
3698 if (val
&& (dp
->mii_status
& MII_STATUS_100_BASEX
) == 0) {
3701 dp
->anadv_100hdx
= (int)val
;
3704 case PARAM_ADV_10FDX_CAP
:
3705 if (val
!= 0 && val
!= 1) {
3708 if (val
&& (dp
->mii_status
& MII_STATUS_10_FD
) == 0) {
3711 dp
->anadv_10fdx
= (int)val
;
3714 case PARAM_ADV_10HDX_CAP
:
3715 if (val
!= 0 && val
!= 1) {
3718 if (val
&& (dp
->mii_status
& MII_STATUS_10
) == 0) {
3721 dp
->anadv_10hdx
= (int)val
;
3726 gem_choose_forcedmode(dp
);
3728 dp
->mii_state
= MII_STATE_UNKNOWN
;
3729 if (dp
->gc
.gc_mii_hw_link_detection
&& dp
->link_watcher_id
== 0) {
3730 /* XXX - Can we ignore the return code ? */
3731 (void) gem_mii_link_check(dp
);
3740 gem_nd_load(struct gem_dev
*dp
, char *name
, ndgetf_t gf
, ndsetf_t sf
, int item
)
3742 struct gem_nd_arg
*arg
;
3745 ASSERT(item
< PARAM_COUNT
);
3747 arg
= &((struct gem_nd_arg
*)(void *)dp
->nd_arg_p
)[item
];
3751 DPRINTF(2, (CE_CONT
, "!%s: %s: name:%s, item:%d",
3752 dp
->name
, __func__
, name
, item
));
3753 (void) nd_load(&dp
->nd_data_p
, name
, gf
, sf
, (caddr_t
)arg
);
3757 gem_nd_setup(struct gem_dev
*dp
)
3759 DPRINTF(0, (CE_CONT
, "!%s: %s: called, mii_status:0x%b",
3760 dp
->name
, __func__
, dp
->mii_status
, MII_STATUS_BITS
));
3762 ASSERT(dp
->nd_arg_p
== NULL
);
3765 kmem_zalloc(sizeof (struct gem_nd_arg
) * PARAM_COUNT
, KM_SLEEP
);
3767 #define SETFUNC(x) ((x) ? gem_param_set : NULL)
3769 gem_nd_load(dp
, "autoneg_cap",
3770 gem_param_get
, NULL
, PARAM_AUTONEG_CAP
);
3771 gem_nd_load(dp
, "pause_cap",
3772 gem_param_get
, NULL
, PARAM_PAUSE_CAP
);
3773 gem_nd_load(dp
, "asym_pause_cap",
3774 gem_param_get
, NULL
, PARAM_ASYM_PAUSE_CAP
);
3775 gem_nd_load(dp
, "1000fdx_cap",
3776 gem_param_get
, NULL
, PARAM_1000FDX_CAP
);
3777 gem_nd_load(dp
, "1000hdx_cap",
3778 gem_param_get
, NULL
, PARAM_1000HDX_CAP
);
3779 gem_nd_load(dp
, "100T4_cap",
3780 gem_param_get
, NULL
, PARAM_100T4_CAP
);
3781 gem_nd_load(dp
, "100fdx_cap",
3782 gem_param_get
, NULL
, PARAM_100FDX_CAP
);
3783 gem_nd_load(dp
, "100hdx_cap",
3784 gem_param_get
, NULL
, PARAM_100HDX_CAP
);
3785 gem_nd_load(dp
, "10fdx_cap",
3786 gem_param_get
, NULL
, PARAM_10FDX_CAP
);
3787 gem_nd_load(dp
, "10hdx_cap",
3788 gem_param_get
, NULL
, PARAM_10HDX_CAP
);
3790 /* Our advertised capabilities */
3791 gem_nd_load(dp
, "adv_autoneg_cap", gem_param_get
,
3792 SETFUNC(dp
->mii_status
& MII_STATUS_CANAUTONEG
),
3793 PARAM_ADV_AUTONEG_CAP
);
3794 gem_nd_load(dp
, "adv_pause_cap", gem_param_get
,
3795 SETFUNC(dp
->gc
.gc_flow_control
& 1),
3796 PARAM_ADV_PAUSE_CAP
);
3797 gem_nd_load(dp
, "adv_asym_pause_cap", gem_param_get
,
3798 SETFUNC(dp
->gc
.gc_flow_control
& 2),
3799 PARAM_ADV_ASYM_PAUSE_CAP
);
3800 gem_nd_load(dp
, "adv_1000fdx_cap", gem_param_get
,
3801 SETFUNC(dp
->mii_xstatus
&
3802 (MII_XSTATUS_1000BASEX_FD
| MII_XSTATUS_1000BASET_FD
)),
3803 PARAM_ADV_1000FDX_CAP
);
3804 gem_nd_load(dp
, "adv_1000hdx_cap", gem_param_get
,
3805 SETFUNC(dp
->mii_xstatus
&
3806 (MII_XSTATUS_1000BASEX
| MII_XSTATUS_1000BASET
)),
3807 PARAM_ADV_1000HDX_CAP
);
3808 gem_nd_load(dp
, "adv_100T4_cap", gem_param_get
,
3809 SETFUNC((dp
->mii_status
& MII_STATUS_100_BASE_T4
) &&
3810 !dp
->mii_advert_ro
),
3811 PARAM_ADV_100T4_CAP
);
3812 gem_nd_load(dp
, "adv_100fdx_cap", gem_param_get
,
3813 SETFUNC((dp
->mii_status
& MII_STATUS_100_BASEX_FD
) &&
3814 !dp
->mii_advert_ro
),
3815 PARAM_ADV_100FDX_CAP
);
3816 gem_nd_load(dp
, "adv_100hdx_cap", gem_param_get
,
3817 SETFUNC((dp
->mii_status
& MII_STATUS_100_BASEX
) &&
3818 !dp
->mii_advert_ro
),
3819 PARAM_ADV_100HDX_CAP
);
3820 gem_nd_load(dp
, "adv_10fdx_cap", gem_param_get
,
3821 SETFUNC((dp
->mii_status
& MII_STATUS_10_FD
) &&
3822 !dp
->mii_advert_ro
),
3823 PARAM_ADV_10FDX_CAP
);
3824 gem_nd_load(dp
, "adv_10hdx_cap", gem_param_get
,
3825 SETFUNC((dp
->mii_status
& MII_STATUS_10
) &&
3826 !dp
->mii_advert_ro
),
3827 PARAM_ADV_10HDX_CAP
);
3829 /* Partner's advertised capabilities */
3830 gem_nd_load(dp
, "lp_autoneg_cap",
3831 gem_param_get
, NULL
, PARAM_LP_AUTONEG_CAP
);
3832 gem_nd_load(dp
, "lp_pause_cap",
3833 gem_param_get
, NULL
, PARAM_LP_PAUSE_CAP
);
3834 gem_nd_load(dp
, "lp_asym_pause_cap",
3835 gem_param_get
, NULL
, PARAM_LP_ASYM_PAUSE_CAP
);
3836 gem_nd_load(dp
, "lp_1000fdx_cap",
3837 gem_param_get
, NULL
, PARAM_LP_1000FDX_CAP
);
3838 gem_nd_load(dp
, "lp_1000hdx_cap",
3839 gem_param_get
, NULL
, PARAM_LP_1000HDX_CAP
);
3840 gem_nd_load(dp
, "lp_100T4_cap",
3841 gem_param_get
, NULL
, PARAM_LP_100T4_CAP
);
3842 gem_nd_load(dp
, "lp_100fdx_cap",
3843 gem_param_get
, NULL
, PARAM_LP_100FDX_CAP
);
3844 gem_nd_load(dp
, "lp_100hdx_cap",
3845 gem_param_get
, NULL
, PARAM_LP_100HDX_CAP
);
3846 gem_nd_load(dp
, "lp_10fdx_cap",
3847 gem_param_get
, NULL
, PARAM_LP_10FDX_CAP
);
3848 gem_nd_load(dp
, "lp_10hdx_cap",
3849 gem_param_get
, NULL
, PARAM_LP_10HDX_CAP
);
3851 /* Current operating modes */
3852 gem_nd_load(dp
, "link_status",
3853 gem_param_get
, NULL
, PARAM_LINK_STATUS
);
3854 gem_nd_load(dp
, "link_speed",
3855 gem_param_get
, NULL
, PARAM_LINK_SPEED
);
3856 gem_nd_load(dp
, "link_duplex",
3857 gem_param_get
, NULL
, PARAM_LINK_DUPLEX
);
3858 gem_nd_load(dp
, "link_autoneg",
3859 gem_param_get
, NULL
, PARAM_LINK_AUTONEG
);
3860 gem_nd_load(dp
, "link_rx_pause",
3861 gem_param_get
, NULL
, PARAM_LINK_RX_PAUSE
);
3862 gem_nd_load(dp
, "link_tx_pause",
3863 gem_param_get
, NULL
, PARAM_LINK_TX_PAUSE
);
3865 gem_nd_load(dp
, "resume_test",
3866 gem_param_get
, NULL
, PARAM_RESUME_TEST
);
3873 gem_nd_ioctl(struct gem_dev
*dp
, queue_t
*wq
, mblk_t
*mp
, struct iocblk
*iocp
)
3877 ASSERT(mutex_owned(&dp
->intrlock
));
3879 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
3881 switch (iocp
->ioc_cmd
) {
3883 ok
= nd_getset(wq
, dp
->nd_data_p
, mp
);
3884 DPRINTF(0, (CE_CONT
,
3885 "%s: get %s", dp
->name
, ok
? "OK" : "FAIL"));
3886 return (ok
? IOC_REPLY
: IOC_INVAL
);
3889 ok
= nd_getset(wq
, dp
->nd_data_p
, mp
);
3891 DPRINTF(0, (CE_CONT
, "%s: set %s err %d",
3892 dp
->name
, ok
? "OK" : "FAIL", iocp
->ioc_error
));
3898 if (iocp
->ioc_error
) {
3902 return (IOC_RESTART_REPLY
);
3905 cmn_err(CE_WARN
, "%s: invalid cmd 0x%x", dp
->name
, iocp
->ioc_cmd
);
3911 gem_nd_cleanup(struct gem_dev
*dp
)
3913 ASSERT(dp
->nd_data_p
!= NULL
);
3914 ASSERT(dp
->nd_arg_p
!= NULL
);
3916 nd_free(&dp
->nd_data_p
);
3918 kmem_free(dp
->nd_arg_p
, sizeof (struct gem_nd_arg
) * PARAM_COUNT
);
3919 dp
->nd_arg_p
= NULL
;
3923 gem_mac_ioctl(struct gem_dev
*dp
, queue_t
*wq
, mblk_t
*mp
)
3925 struct iocblk
*iocp
;
3926 enum ioc_reply status
;
3929 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
3932 * Validate the command before bothering with the mutex ...
3934 iocp
= (void *)mp
->b_rptr
;
3935 iocp
->ioc_error
= 0;
3936 cmd
= iocp
->ioc_cmd
;
3938 DPRINTF(0, (CE_CONT
, "%s: %s cmd:0x%x", dp
->name
, __func__
, cmd
));
3940 mutex_enter(&dp
->intrlock
);
3941 mutex_enter(&dp
->xmitlock
);
3951 status
= gem_nd_ioctl(dp
, wq
, mp
, iocp
);
3955 mutex_exit(&dp
->xmitlock
);
3956 mutex_exit(&dp
->intrlock
);
3959 if (cmd
== ND_GET
) {
3960 gem_suspend(dp
->dip
);
3961 gem_resume(dp
->dip
);
3965 * Finally, decide how to reply
3971 * Error, reply with a NAK and EINVAL or the specified error
3973 miocnak(wq
, mp
, 0, iocp
->ioc_error
== 0 ?
3974 EINVAL
: iocp
->ioc_error
);
3979 * OK, reply already sent
3983 case IOC_RESTART_ACK
:
3986 * OK, reply with an ACK
3988 miocack(wq
, mp
, 0, 0);
3991 case IOC_RESTART_REPLY
:
3994 * OK, send prepared reply as ACK or NAK
3996 mp
->b_datap
->db_type
=
3997 iocp
->ioc_error
== 0 ? M_IOCACK
: M_IOCNAK
;
4004 #define XCVR_UNDEFINED 0
4007 #define XCVR_100T4 3
4009 #define XCVR_100T2 5
4010 #define XCVR_1000X 6
4011 #define XCVR_1000T 7
4014 gem_mac_xcvr_inuse(struct gem_dev
*dp
)
4016 int val
= XCVR_UNDEFINED
;
4018 if ((dp
->mii_status
& MII_STATUS_XSTATUS
) == 0) {
4019 if (dp
->mii_status
& MII_STATUS_100_BASE_T4
) {
4021 } else if (dp
->mii_status
&
4022 (MII_STATUS_100_BASEX_FD
|
4023 MII_STATUS_100_BASEX
)) {
4025 } else if (dp
->mii_status
&
4026 (MII_STATUS_100_BASE_T2_FD
|
4027 MII_STATUS_100_BASE_T2
)) {
4029 } else if (dp
->mii_status
&
4030 (MII_STATUS_10_FD
| MII_STATUS_10
)) {
4033 } else if (dp
->mii_xstatus
&
4034 (MII_XSTATUS_1000BASET_FD
| MII_XSTATUS_1000BASET
)) {
4036 } else if (dp
->mii_xstatus
&
4037 (MII_XSTATUS_1000BASEX_FD
| MII_XSTATUS_1000BASEX
)) {
4044 /* ============================================================== */
4048 /* ============================================================== */
4049 static int gem_m_getstat(void *, uint_t
, uint64_t *);
4050 static int gem_m_start(void *);
4051 static void gem_m_stop(void *);
4052 static int gem_m_setpromisc(void *, boolean_t
);
4053 static int gem_m_multicst(void *, boolean_t
, const uint8_t *);
4054 static int gem_m_unicst(void *, const uint8_t *);
4055 static mblk_t
*gem_m_tx(void *, mblk_t
*);
4056 static void gem_m_ioctl(void *, queue_t
*, mblk_t
*);
4057 static boolean_t
gem_m_getcapab(void *, mac_capab_t
, void *);
4059 #define GEM_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
4061 static mac_callbacks_t gem_m_callbacks
= {
4062 GEM_M_CALLBACK_FLAGS
,
4076 gem_m_start(void *arg
)
4079 struct gem_dev
*dp
= arg
;
4081 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
4083 mutex_enter(&dp
->intrlock
);
4084 if (dp
->mac_suspended
) {
4088 if (gem_mac_init(dp
) != GEM_SUCCESS
) {
4092 dp
->nic_state
= NIC_STATE_INITIALIZED
;
4094 /* reset rx filter state */
4096 dp
->mc_count_req
= 0;
4098 /* setup media mode if the link have been up */
4099 if (dp
->mii_state
== MII_STATE_LINKUP
) {
4100 (dp
->gc
.gc_set_media
)(dp
);
4103 /* setup initial rx filter */
4104 bcopy(dp
->dev_addr
.ether_addr_octet
,
4105 dp
->cur_addr
.ether_addr_octet
, ETHERADDRL
);
4106 dp
->rxmode
|= RXMODE_ENABLE
;
4108 if (gem_mac_set_rx_filter(dp
) != GEM_SUCCESS
) {
4113 dp
->nic_state
= NIC_STATE_ONLINE
;
4114 if (dp
->mii_state
== MII_STATE_LINKUP
) {
4115 if (gem_mac_start(dp
) != GEM_SUCCESS
) {
4121 dp
->timeout_id
= timeout((void (*)(void *))gem_tx_timeout
,
4122 (void *)dp
, dp
->gc
.gc_tx_timeout_interval
);
4123 mutex_exit(&dp
->intrlock
);
4127 dp
->nic_state
= NIC_STATE_STOPPED
;
4128 mutex_exit(&dp
->intrlock
);
4133 gem_m_stop(void *arg
)
4135 struct gem_dev
*dp
= arg
;
4137 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
4140 mutex_enter(&dp
->intrlock
);
4141 if (dp
->mac_suspended
) {
4142 mutex_exit(&dp
->intrlock
);
4145 dp
->rxmode
&= ~RXMODE_ENABLE
;
4146 (void) gem_mac_set_rx_filter(dp
);
4147 mutex_exit(&dp
->intrlock
);
4149 /* stop tx timeout watcher */
4150 if (dp
->timeout_id
) {
4151 while (untimeout(dp
->timeout_id
) == -1)
4156 /* make the nic state inactive */
4157 mutex_enter(&dp
->intrlock
);
4158 if (dp
->mac_suspended
) {
4159 mutex_exit(&dp
->intrlock
);
4162 dp
->nic_state
= NIC_STATE_STOPPED
;
4164 /* we need deassert mac_active due to block interrupt handler */
4165 mutex_enter(&dp
->xmitlock
);
4166 dp
->mac_active
= B_FALSE
;
4167 mutex_exit(&dp
->xmitlock
);
4169 /* block interrupts */
4170 while (dp
->intr_busy
) {
4171 cv_wait(&dp
->tx_drain_cv
, &dp
->intrlock
);
4173 (void) gem_mac_stop(dp
, 0);
4174 mutex_exit(&dp
->intrlock
);
4178 gem_m_multicst(void *arg
, boolean_t add
, const uint8_t *ep
)
4182 struct gem_dev
*dp
= arg
;
4184 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
4187 ret
= gem_add_multicast(dp
, ep
);
4189 ret
= gem_remove_multicast(dp
, ep
);
4193 if (ret
!= GEM_SUCCESS
) {
4201 gem_m_setpromisc(void *arg
, boolean_t on
)
4203 int err
= 0; /* no error */
4204 struct gem_dev
*dp
= arg
;
4206 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
4208 mutex_enter(&dp
->intrlock
);
4209 if (dp
->mac_suspended
) {
4210 mutex_exit(&dp
->intrlock
);
4214 dp
->rxmode
|= RXMODE_PROMISC
;
4216 dp
->rxmode
&= ~RXMODE_PROMISC
;
4219 if (gem_mac_set_rx_filter(dp
) != GEM_SUCCESS
) {
4222 mutex_exit(&dp
->intrlock
);
4228 gem_m_getstat(void *arg
, uint_t stat
, uint64_t *valp
)
4230 struct gem_dev
*dp
= arg
;
4231 struct gem_stats
*gstp
= &dp
->stats
;
4234 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
4236 if (mutex_owned(&dp
->intrlock
)) {
4237 if (dp
->mac_suspended
) {
4241 mutex_enter(&dp
->intrlock
);
4242 if (dp
->mac_suspended
) {
4243 mutex_exit(&dp
->intrlock
);
4246 mutex_exit(&dp
->intrlock
);
4249 if ((*dp
->gc
.gc_get_stats
)(dp
) != GEM_SUCCESS
) {
4254 case MAC_STAT_IFSPEED
:
4255 val
= gem_speed_value
[dp
->speed
] *1000000ull;
4258 case MAC_STAT_MULTIRCV
:
4262 case MAC_STAT_BRDCSTRCV
:
4266 case MAC_STAT_MULTIXMT
:
4270 case MAC_STAT_BRDCSTXMT
:
4274 case MAC_STAT_NORCVBUF
:
4275 val
= gstp
->norcvbuf
+ gstp
->missed
;
4278 case MAC_STAT_IERRORS
:
4282 case MAC_STAT_NOXMTBUF
:
4283 val
= gstp
->noxmtbuf
;
4286 case MAC_STAT_OERRORS
:
4290 case MAC_STAT_COLLISIONS
:
4291 val
= gstp
->collisions
;
4294 case MAC_STAT_RBYTES
:
4298 case MAC_STAT_IPACKETS
:
4299 val
= gstp
->rpackets
;
4302 case MAC_STAT_OBYTES
:
4306 case MAC_STAT_OPACKETS
:
4307 val
= gstp
->opackets
;
4310 case MAC_STAT_UNDERFLOWS
:
4311 val
= gstp
->underflow
;
4314 case MAC_STAT_OVERFLOWS
:
4315 val
= gstp
->overflow
;
4318 case ETHER_STAT_ALIGN_ERRORS
:
4322 case ETHER_STAT_FCS_ERRORS
:
4326 case ETHER_STAT_FIRST_COLLISIONS
:
4327 val
= gstp
->first_coll
;
4330 case ETHER_STAT_MULTI_COLLISIONS
:
4331 val
= gstp
->multi_coll
;
4334 case ETHER_STAT_SQE_ERRORS
:
4338 case ETHER_STAT_DEFER_XMTS
:
4342 case ETHER_STAT_TX_LATE_COLLISIONS
:
4343 val
= gstp
->xmtlatecoll
;
4346 case ETHER_STAT_EX_COLLISIONS
:
4350 case ETHER_STAT_MACXMT_ERRORS
:
4351 val
= gstp
->xmit_internal_err
;
4354 case ETHER_STAT_CARRIER_ERRORS
:
4355 val
= gstp
->nocarrier
;
4358 case ETHER_STAT_TOOLONG_ERRORS
:
4359 val
= gstp
->frame_too_long
;
4362 case ETHER_STAT_MACRCV_ERRORS
:
4363 val
= gstp
->rcv_internal_err
;
4366 case ETHER_STAT_XCVR_ADDR
:
4367 val
= dp
->mii_phy_addr
;
4370 case ETHER_STAT_XCVR_ID
:
4371 val
= dp
->mii_phy_id
;
4374 case ETHER_STAT_XCVR_INUSE
:
4375 val
= gem_mac_xcvr_inuse(dp
);
4378 case ETHER_STAT_CAP_1000FDX
:
4379 val
= (dp
->mii_xstatus
& MII_XSTATUS_1000BASET_FD
) ||
4380 (dp
->mii_xstatus
& MII_XSTATUS_1000BASEX_FD
);
4383 case ETHER_STAT_CAP_1000HDX
:
4384 val
= (dp
->mii_xstatus
& MII_XSTATUS_1000BASET
) ||
4385 (dp
->mii_xstatus
& MII_XSTATUS_1000BASEX
);
4388 case ETHER_STAT_CAP_100FDX
:
4389 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASEX_FD
);
4392 case ETHER_STAT_CAP_100HDX
:
4393 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASEX
);
4396 case ETHER_STAT_CAP_10FDX
:
4397 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_10_FD
);
4400 case ETHER_STAT_CAP_10HDX
:
4401 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_10
);
4404 case ETHER_STAT_CAP_ASMPAUSE
:
4405 val
= BOOLEAN(dp
->gc
.gc_flow_control
& 2);
4408 case ETHER_STAT_CAP_PAUSE
:
4409 val
= BOOLEAN(dp
->gc
.gc_flow_control
& 1);
4412 case ETHER_STAT_CAP_AUTONEG
:
4413 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_CANAUTONEG
);
4416 case ETHER_STAT_ADV_CAP_1000FDX
:
4417 val
= dp
->anadv_1000fdx
;
4420 case ETHER_STAT_ADV_CAP_1000HDX
:
4421 val
= dp
->anadv_1000hdx
;
4424 case ETHER_STAT_ADV_CAP_100FDX
:
4425 val
= dp
->anadv_100fdx
;
4428 case ETHER_STAT_ADV_CAP_100HDX
:
4429 val
= dp
->anadv_100hdx
;
4432 case ETHER_STAT_ADV_CAP_10FDX
:
4433 val
= dp
->anadv_10fdx
;
4436 case ETHER_STAT_ADV_CAP_10HDX
:
4437 val
= dp
->anadv_10hdx
;
4440 case ETHER_STAT_ADV_CAP_ASMPAUSE
:
4441 val
= BOOLEAN(dp
->anadv_flow_control
& 2);
4444 case ETHER_STAT_ADV_CAP_PAUSE
:
4445 val
= BOOLEAN(dp
->anadv_flow_control
& 1);
4448 case ETHER_STAT_ADV_CAP_AUTONEG
:
4449 val
= dp
->anadv_autoneg
;
4452 case ETHER_STAT_LP_CAP_1000FDX
:
4453 val
= BOOLEAN(dp
->mii_stat1000
& MII_1000TS_LP_FULL
);
4456 case ETHER_STAT_LP_CAP_1000HDX
:
4457 val
= BOOLEAN(dp
->mii_stat1000
& MII_1000TS_LP_HALF
);
4460 case ETHER_STAT_LP_CAP_100FDX
:
4461 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_100BASE_TX_FD
);
4464 case ETHER_STAT_LP_CAP_100HDX
:
4465 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_100BASE_TX
);
4468 case ETHER_STAT_LP_CAP_10FDX
:
4469 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_10BASE_T_FD
);
4472 case ETHER_STAT_LP_CAP_10HDX
:
4473 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_10BASE_T
);
4476 case ETHER_STAT_LP_CAP_ASMPAUSE
:
4477 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_ASMPAUSE
);
4480 case ETHER_STAT_LP_CAP_PAUSE
:
4481 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_PAUSE
);
4484 case ETHER_STAT_LP_CAP_AUTONEG
:
4485 val
= BOOLEAN(dp
->mii_exp
& MII_AN_EXP_LPCANAN
);
4488 case ETHER_STAT_LINK_ASMPAUSE
:
4489 val
= BOOLEAN(dp
->flow_control
& 2);
4492 case ETHER_STAT_LINK_PAUSE
:
4493 val
= BOOLEAN(dp
->flow_control
& 1);
4496 case ETHER_STAT_LINK_AUTONEG
:
4497 val
= dp
->anadv_autoneg
&&
4498 BOOLEAN(dp
->mii_exp
& MII_AN_EXP_LPCANAN
);
4501 case ETHER_STAT_LINK_DUPLEX
:
4502 val
= (dp
->mii_state
== MII_STATE_LINKUP
) ?
4503 (dp
->full_duplex
? 2 : 1) : 0;
4506 case ETHER_STAT_TOOSHORT_ERRORS
:
4509 case ETHER_STAT_LP_REMFAULT
:
4510 val
= BOOLEAN(dp
->mii_lpable
& MII_AN_ADVERT_REMFAULT
);
4513 case ETHER_STAT_JABBER_ERRORS
:
4517 case ETHER_STAT_CAP_100T4
:
4518 val
= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASE_T4
);
4521 case ETHER_STAT_ADV_CAP_100T4
:
4522 val
= dp
->anadv_100t4
;
4525 case ETHER_STAT_LP_CAP_100T4
:
4526 val
= BOOLEAN(dp
->mii_lpable
& MII_ABILITY_100BASE_T4
);
4530 #if GEM_DEBUG_LEVEL > 2
4532 "%s: unrecognized parameter value = %d",
4544 gem_m_unicst(void *arg
, const uint8_t *mac
)
4547 struct gem_dev
*dp
= arg
;
4549 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
4551 mutex_enter(&dp
->intrlock
);
4552 if (dp
->mac_suspended
) {
4553 mutex_exit(&dp
->intrlock
);
4556 bcopy(mac
, dp
->cur_addr
.ether_addr_octet
, ETHERADDRL
);
4557 dp
->rxmode
|= RXMODE_ENABLE
;
4559 if (gem_mac_set_rx_filter(dp
) != GEM_SUCCESS
) {
4562 mutex_exit(&dp
->intrlock
);
4568 * gem_m_tx is used only for sending data packets into ethernet wire.
4571 gem_m_tx(void *arg
, mblk_t
*mp
)
4574 struct gem_dev
*dp
= arg
;
4577 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
4579 ASSERT(dp
->nic_state
== NIC_STATE_ONLINE
);
4580 if (dp
->mii_state
!= MII_STATE_LINKUP
) {
4581 /* Some nics hate to send packets when the link is down. */
4591 return (gem_send_common(dp
, mp
, flags
));
4595 gem_m_ioctl(void *arg
, queue_t
*wq
, mblk_t
*mp
)
4597 DPRINTF(0, (CE_CONT
, "!%s: %s: called",
4598 ((struct gem_dev
*)arg
)->name
, __func__
));
4600 gem_mac_ioctl((struct gem_dev
*)arg
, wq
, mp
);
4605 gem_m_getcapab(void *arg
, mac_capab_t cap
, void *cap_data
)
4611 gem_gld3_init(struct gem_dev
*dp
, mac_register_t
*macp
)
4613 macp
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
4614 macp
->m_driver
= dp
;
4615 macp
->m_dip
= dp
->dip
;
4616 macp
->m_src_addr
= dp
->dev_addr
.ether_addr_octet
;
4617 macp
->m_callbacks
= &gem_m_callbacks
;
4618 macp
->m_min_sdu
= 0;
4619 macp
->m_max_sdu
= dp
->mtu
;
4621 if (dp
->misc_flag
& GEM_VLAN
) {
4622 macp
->m_margin
= VTAG_SIZE
;
4626 /* ======================================================================== */
4628 * attach/detatch support
4630 /* ======================================================================== */
4632 gem_read_conf(struct gem_dev
*dp
)
4636 DPRINTF(1, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
4639 * Get media mode infomation from .conf file
4641 dp
->anadv_autoneg
= gem_prop_get_int(dp
, "adv_autoneg_cap", 1) != 0;
4642 dp
->anadv_1000fdx
= gem_prop_get_int(dp
, "adv_1000fdx_cap", 1) != 0;
4643 dp
->anadv_1000hdx
= gem_prop_get_int(dp
, "adv_1000hdx_cap", 1) != 0;
4644 dp
->anadv_100t4
= gem_prop_get_int(dp
, "adv_100T4_cap", 1) != 0;
4645 dp
->anadv_100fdx
= gem_prop_get_int(dp
, "adv_100fdx_cap", 1) != 0;
4646 dp
->anadv_100hdx
= gem_prop_get_int(dp
, "adv_100hdx_cap", 1) != 0;
4647 dp
->anadv_10fdx
= gem_prop_get_int(dp
, "adv_10fdx_cap", 1) != 0;
4648 dp
->anadv_10hdx
= gem_prop_get_int(dp
, "adv_10hdx_cap", 1) != 0;
4650 if ((ddi_prop_exists(DDI_DEV_T_ANY
, dp
->dip
,
4651 DDI_PROP_DONTPASS
, "full-duplex"))) {
4652 dp
->full_duplex
= gem_prop_get_int(dp
, "full-duplex", 1) != 0;
4653 dp
->anadv_autoneg
= B_FALSE
;
4654 if (dp
->full_duplex
) {
4655 dp
->anadv_1000hdx
= B_FALSE
;
4656 dp
->anadv_100hdx
= B_FALSE
;
4657 dp
->anadv_10hdx
= B_FALSE
;
4659 dp
->anadv_1000fdx
= B_FALSE
;
4660 dp
->anadv_100fdx
= B_FALSE
;
4661 dp
->anadv_10fdx
= B_FALSE
;
4665 if ((val
= gem_prop_get_int(dp
, "speed", 0)) > 0) {
4666 dp
->anadv_autoneg
= B_FALSE
;
4669 dp
->speed
= GEM_SPD_1000
;
4670 dp
->anadv_100t4
= B_FALSE
;
4671 dp
->anadv_100fdx
= B_FALSE
;
4672 dp
->anadv_100hdx
= B_FALSE
;
4673 dp
->anadv_10fdx
= B_FALSE
;
4674 dp
->anadv_10hdx
= B_FALSE
;
4677 dp
->speed
= GEM_SPD_100
;
4678 dp
->anadv_1000fdx
= B_FALSE
;
4679 dp
->anadv_1000hdx
= B_FALSE
;
4680 dp
->anadv_10fdx
= B_FALSE
;
4681 dp
->anadv_10hdx
= B_FALSE
;
4684 dp
->speed
= GEM_SPD_10
;
4685 dp
->anadv_1000fdx
= B_FALSE
;
4686 dp
->anadv_1000hdx
= B_FALSE
;
4687 dp
->anadv_100t4
= B_FALSE
;
4688 dp
->anadv_100fdx
= B_FALSE
;
4689 dp
->anadv_100hdx
= B_FALSE
;
4693 "!%s: property %s: illegal value:%d",
4694 dp
->name
, "speed", val
);
4695 dp
->anadv_autoneg
= B_TRUE
;
4700 val
= gem_prop_get_int(dp
, "flow-control", dp
->gc
.gc_flow_control
);
4701 if (val
> FLOW_CONTROL_RX_PAUSE
|| val
< FLOW_CONTROL_NONE
) {
4703 "!%s: property %s: illegal value:%d",
4704 dp
->name
, "flow-control", val
);
4706 val
= min(val
, dp
->gc
.gc_flow_control
);
4708 dp
->anadv_flow_control
= val
;
4710 if (gem_prop_get_int(dp
, "nointr", 0)) {
4711 dp
->misc_flag
|= GEM_NOINTR
;
4712 cmn_err(CE_NOTE
, "!%s: polling mode enabled", dp
->name
);
4715 dp
->mtu
= gem_prop_get_int(dp
, "mtu", dp
->mtu
);
4716 dp
->txthr
= gem_prop_get_int(dp
, "txthr", dp
->txthr
);
4717 dp
->rxthr
= gem_prop_get_int(dp
, "rxthr", dp
->rxthr
);
4718 dp
->txmaxdma
= gem_prop_get_int(dp
, "txmaxdma", dp
->txmaxdma
);
4719 dp
->rxmaxdma
= gem_prop_get_int(dp
, "rxmaxdma", dp
->rxmaxdma
);
4727 #define GEM_LOCAL_DATA_SIZE(gc) \
4728 (sizeof (struct gem_dev) + \
4729 sizeof (struct mcast_addr) * GEM_MAXMC + \
4730 sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4731 sizeof (void *) * ((gc)->gc_tx_buf_size))
4734 gem_do_attach(dev_info_t
*dip
, int port
,
4735 struct gem_conf
*gc
, void *base
, ddi_acc_handle_t
*regs_handlep
,
4736 void *lp
, int lmsize
)
4740 ddi_iblock_cookie_t c
;
4741 mac_register_t
*macp
= NULL
;
4746 unit
= ddi_get_instance(dip
);
4747 if ((nports
= gc
->gc_nports
) == 0) {
4751 ddi_set_driver_private(dip
, NULL
);
4754 DPRINTF(2, (CE_CONT
, "!gem%d: gem_do_attach: called cmd:ATTACH",
4758 * Allocate soft data structure
4760 dp
= kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc
), KM_SLEEP
);
4762 if ((macp
= mac_alloc(MAC_VERSION
)) == NULL
) {
4763 cmn_err(CE_WARN
, "!gem%d: %s: mac_alloc failed",
4767 /* ddi_set_driver_private(dip, dp); */
4769 /* link to private area */
4771 dp
->priv_size
= lmsize
;
4772 dp
->mc_list
= (struct mcast_addr
*)&dp
[1];
4775 (void) sprintf(dp
->name
, gc
->gc_name
, nports
* unit
+ port
);
4780 if (ddi_get_iblock_cookie(dip
, 0, &c
) != DDI_SUCCESS
) {
4782 "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4784 goto err_free_private
;
4786 dp
->iblock_cookie
= c
;
4789 * Initialize mutex's for this device.
4791 mutex_init(&dp
->intrlock
, NULL
, MUTEX_DRIVER
, (void *)c
);
4792 mutex_init(&dp
->xmitlock
, NULL
, MUTEX_DRIVER
, (void *)c
);
4793 cv_init(&dp
->tx_drain_cv
, NULL
, CV_DRIVER
, NULL
);
4796 * configure gem parameter
4798 dp
->base_addr
= base
;
4799 dp
->regs_handle
= *regs_handlep
;
4802 /* patch for simplify dma resource management */
4803 gc
->gc_tx_max_frags
= 1;
4804 gc
->gc_tx_max_descs_per_pkt
= 1;
4805 gc
->gc_tx_ring_size
= gc
->gc_tx_buf_size
;
4806 gc
->gc_tx_ring_limit
= gc
->gc_tx_buf_limit
;
4807 gc
->gc_tx_desc_write_oo
= B_TRUE
;
4809 gc
->gc_nports
= nports
; /* fix nports */
4811 /* fix copy threadsholds */
4812 gc
->gc_tx_copy_thresh
= max(ETHERMIN
, gc
->gc_tx_copy_thresh
);
4813 gc
->gc_rx_copy_thresh
= max(ETHERMIN
, gc
->gc_rx_copy_thresh
);
4815 /* fix rx buffer boundary for iocache line size */
4816 ASSERT(gc
->gc_dma_attr_txbuf
.dma_attr_align
-1 == gc
->gc_tx_buf_align
);
4817 ASSERT(gc
->gc_dma_attr_rxbuf
.dma_attr_align
-1 == gc
->gc_rx_buf_align
);
4818 gc
->gc_rx_buf_align
= max(gc
->gc_rx_buf_align
, IOC_LINESIZE
- 1);
4819 gc
->gc_dma_attr_rxbuf
.dma_attr_align
= gc
->gc_rx_buf_align
+ 1;
4821 /* fix descriptor boundary for cache line size */
4822 gc
->gc_dma_attr_desc
.dma_attr_align
=
4823 max(gc
->gc_dma_attr_desc
.dma_attr_align
, IOC_LINESIZE
);
4825 /* patch get_packet method */
4826 if (gc
->gc_get_packet
== NULL
) {
4827 gc
->gc_get_packet
= &gem_get_packet_default
;
4830 /* patch get_rx_start method */
4831 if (gc
->gc_rx_start
== NULL
) {
4832 gc
->gc_rx_start
= &gem_rx_start_default
;
4835 /* calculate descriptor area */
4836 if (gc
->gc_rx_desc_unit_shift
>= 0) {
4838 ROUNDUP(gc
->gc_rx_ring_size
<< gc
->gc_rx_desc_unit_shift
,
4839 gc
->gc_dma_attr_desc
.dma_attr_align
);
4841 if (gc
->gc_tx_desc_unit_shift
>= 0) {
4843 ROUNDUP(gc
->gc_tx_ring_size
<< gc
->gc_tx_desc_unit_shift
,
4844 gc
->gc_dma_attr_desc
.dma_attr_align
);
4848 dp
->tx_buf
= (void *)&dp
->mc_list
[GEM_MAXMC
];
4849 /* link tx buffers */
4850 for (i
= 0; i
< dp
->gc
.gc_tx_buf_size
; i
++) {
4851 dp
->tx_buf
[i
].txb_next
=
4852 &dp
->tx_buf
[SLOT(i
+ 1, dp
->gc
.gc_tx_buf_size
)];
4856 dp
->speed
= GEM_SPD_10
; /* default is 10Mbps */
4857 dp
->full_duplex
= B_FALSE
; /* default is half */
4858 dp
->flow_control
= FLOW_CONTROL_NONE
;
4859 dp
->poll_pkt_delay
= 8; /* typical coalease for rx packets */
4861 /* performance tuning parameters */
4862 dp
->txthr
= ETHERMAX
; /* tx fifo threshold */
4863 dp
->txmaxdma
= 16*4; /* tx max dma burst size */
4864 dp
->rxthr
= 128; /* rx fifo threshold */
4865 dp
->rxmaxdma
= 16*4; /* rx max dma burst size */
4868 * Get media mode information from .conf file
4872 /* rx_buf_len is required buffer length without padding for alignment */
4873 dp
->rx_buf_len
= MAXPKTBUF(dp
) + dp
->gc
.gc_rx_header_len
;
4878 mutex_enter(&dp
->intrlock
);
4879 dp
->nic_state
= NIC_STATE_STOPPED
;
4880 ret
= (*dp
->gc
.gc_reset_chip
)(dp
);
4881 mutex_exit(&dp
->intrlock
);
4882 if (ret
!= GEM_SUCCESS
) {
4887 * HW dependant paremeter initialization
4889 mutex_enter(&dp
->intrlock
);
4890 ret
= (*dp
->gc
.gc_attach_chip
)(dp
);
4891 mutex_exit(&dp
->intrlock
);
4892 if (ret
!= GEM_SUCCESS
) {
4896 #ifdef DEBUG_MULTIFRAGS
4897 dp
->gc
.gc_tx_copy_thresh
= dp
->mtu
;
4899 /* allocate tx and rx resources */
4900 if (gem_alloc_memory(dp
)) {
4904 DPRINTF(0, (CE_CONT
,
4905 "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4906 dp
->name
, (long)dp
->base_addr
,
4907 dp
->dev_addr
.ether_addr_octet
[0],
4908 dp
->dev_addr
.ether_addr_octet
[1],
4909 dp
->dev_addr
.ether_addr_octet
[2],
4910 dp
->dev_addr
.ether_addr_octet
[3],
4911 dp
->dev_addr
.ether_addr_octet
[4],
4912 dp
->dev_addr
.ether_addr_octet
[5]));
4914 /* copy mac address */
4915 dp
->cur_addr
= dp
->dev_addr
;
4917 gem_gld3_init(dp
, macp
);
4919 /* Probe MII phy (scan phy) */
4923 dp
->mii_ctl1000
= 0;
4924 dp
->mii_stat1000
= 0;
4925 if ((*dp
->gc
.gc_mii_probe
)(dp
) != GEM_SUCCESS
) {
4929 /* mask unsupported abilities */
4930 dp
->anadv_autoneg
&= BOOLEAN(dp
->mii_status
& MII_STATUS_CANAUTONEG
);
4931 dp
->anadv_1000fdx
&=
4932 BOOLEAN(dp
->mii_xstatus
&
4933 (MII_XSTATUS_1000BASEX_FD
| MII_XSTATUS_1000BASET_FD
));
4934 dp
->anadv_1000hdx
&=
4935 BOOLEAN(dp
->mii_xstatus
&
4936 (MII_XSTATUS_1000BASEX
| MII_XSTATUS_1000BASET
));
4937 dp
->anadv_100t4
&= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASE_T4
);
4938 dp
->anadv_100fdx
&= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASEX_FD
);
4939 dp
->anadv_100hdx
&= BOOLEAN(dp
->mii_status
& MII_STATUS_100_BASEX
);
4940 dp
->anadv_10fdx
&= BOOLEAN(dp
->mii_status
& MII_STATUS_10_FD
);
4941 dp
->anadv_10hdx
&= BOOLEAN(dp
->mii_status
& MII_STATUS_10
);
4943 gem_choose_forcedmode(dp
);
4945 /* initialize MII phy if required */
4946 if (dp
->gc
.gc_mii_init
) {
4947 if ((*dp
->gc
.gc_mii_init
)(dp
) != GEM_SUCCESS
) {
4953 * initialize kstats including mii statistics
4958 * Add interrupt to system.
4960 if (ret
= mac_register(macp
, &dp
->mh
)) {
4961 cmn_err(CE_WARN
, "!%s: mac_register failed, error:%d",
4963 goto err_release_stats
;
4968 if (dp
->misc_flag
& GEM_SOFTINTR
) {
4969 if (ddi_add_softintr(dip
,
4970 DDI_SOFTINT_LOW
, &dp
->soft_id
,
4972 (uint_t (*)(caddr_t
))gem_intr
,
4973 (caddr_t
)dp
) != DDI_SUCCESS
) {
4974 cmn_err(CE_WARN
, "!%s: ddi_add_softintr failed",
4976 goto err_unregister
;
4978 } else if ((dp
->misc_flag
& GEM_NOINTR
) == 0) {
4979 if (ddi_add_intr(dip
, 0, NULL
, NULL
,
4980 (uint_t (*)(caddr_t
))gem_intr
,
4981 (caddr_t
)dp
) != DDI_SUCCESS
) {
4982 cmn_err(CE_WARN
, "!%s: ddi_add_intr failed", dp
->name
);
4983 goto err_unregister
;
4987 * Dont use interrupt.
4988 * schedule first call of gem_intr_watcher
4990 dp
->intr_watcher_id
=
4991 timeout((void (*)(void *))gem_intr_watcher
,
4992 (void *)dp
, drv_usectohz(3*1000000));
4995 /* link this device to dev_info */
4996 dp
->next
= (struct gem_dev
*)ddi_get_driver_private(dip
);
4998 ddi_set_driver_private(dip
, (caddr_t
)dp
);
5000 /* reset mii phy and start mii link watcher */
5003 DPRINTF(2, (CE_CONT
, "!gem_do_attach: return: success"));
5007 (void) mac_unregister(dp
->mh
);
5009 /* release NDD resources */
5013 gem_free_memory(dp
);
5015 ddi_regs_map_free(&dp
->regs_handle
);
5017 mutex_destroy(&dp
->xmitlock
);
5018 mutex_destroy(&dp
->intrlock
);
5019 cv_destroy(&dp
->tx_drain_cv
);
5024 kmem_free((caddr_t
)dp
, GEM_LOCAL_DATA_SIZE(gc
));
5030 gem_do_detach(dev_info_t
*dip
)
5033 struct gem_dev
*tmp
;
5036 ddi_acc_handle_t rh
;
5038 dp
= GEM_GET_DEV(dip
);
5040 return (DDI_SUCCESS
);
5043 rh
= dp
->regs_handle
;
5044 private = dp
->private;
5045 priv_size
= dp
->priv_size
;
5048 /* unregister with gld v3 */
5049 if (mac_unregister(dp
->mh
) != 0) {
5050 return (DDI_FAILURE
);
5053 /* ensure any rx buffers are not used */
5054 if (dp
->rx_buf_allocated
!= dp
->rx_buf_freecnt
) {
5055 /* resource is busy */
5057 "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5059 dp
->rx_buf_allocated
, dp
->rx_buf_freecnt
);
5063 /* stop mii link watcher */
5066 /* unregister interrupt handler */
5067 if (dp
->misc_flag
& GEM_SOFTINTR
) {
5068 ddi_remove_softintr(dp
->soft_id
);
5069 } else if ((dp
->misc_flag
& GEM_NOINTR
) == 0) {
5070 ddi_remove_intr(dip
, 0, dp
->iblock_cookie
);
5072 /* stop interrupt watcher */
5073 if (dp
->intr_watcher_id
) {
5074 while (untimeout(dp
->intr_watcher_id
) == -1)
5076 dp
->intr_watcher_id
= 0;
5080 /* release NDD resources */
5082 /* release buffers, descriptors and dma resources */
5083 gem_free_memory(dp
);
5085 /* release locks and condition variables */
5086 mutex_destroy(&dp
->xmitlock
);
5087 mutex_destroy(&dp
->intrlock
);
5088 cv_destroy(&dp
->tx_drain_cv
);
5090 /* release basic memory resources */
5092 kmem_free((caddr_t
)dp
, GEM_LOCAL_DATA_SIZE(&dp
->gc
));
5096 /* release common private memory for the nic */
5097 kmem_free(private, priv_size
);
5099 /* release register mapping resources */
5100 ddi_regs_map_free(&rh
);
5102 DPRINTF(2, (CE_CONT
, "!%s%d: gem_do_detach: return: success",
5103 ddi_driver_name(dip
), ddi_get_instance(dip
)));
5105 return (DDI_SUCCESS
);
5109 gem_suspend(dev_info_t
*dip
)
5116 dp
= GEM_GET_DEV(dip
);
5119 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
5121 for (; dp
; dp
= dp
->next
) {
5123 /* stop mii link watcher */
5126 /* stop interrupt watcher for no-intr mode */
5127 if (dp
->misc_flag
& GEM_NOINTR
) {
5128 if (dp
->intr_watcher_id
) {
5129 while (untimeout(dp
->intr_watcher_id
) == -1)
5132 dp
->intr_watcher_id
= 0;
5135 /* stop tx timeout watcher */
5136 if (dp
->timeout_id
) {
5137 while (untimeout(dp
->timeout_id
) == -1)
5142 /* make the nic state inactive */
5143 mutex_enter(&dp
->intrlock
);
5144 (void) gem_mac_stop(dp
, 0);
5145 ASSERT(!dp
->mac_active
);
5147 /* no further register access */
5148 dp
->mac_suspended
= B_TRUE
;
5149 mutex_exit(&dp
->intrlock
);
5152 /* XXX - power down the nic */
5154 return (DDI_SUCCESS
);
5158 gem_resume(dev_info_t
*dip
)
5163 * restart the device
5165 dp
= GEM_GET_DEV(dip
);
5168 DPRINTF(0, (CE_CONT
, "!%s: %s: called", dp
->name
, __func__
));
5170 for (; dp
; dp
= dp
->next
) {
5173 * Bring up the nic after power up
5176 /* gem_xxx.c layer to setup power management state. */
5177 ASSERT(!dp
->mac_active
);
5179 /* reset the chip, because we are just after power up. */
5180 mutex_enter(&dp
->intrlock
);
5182 dp
->mac_suspended
= B_FALSE
;
5183 dp
->nic_state
= NIC_STATE_STOPPED
;
5185 if ((*dp
->gc
.gc_reset_chip
)(dp
) != GEM_SUCCESS
) {
5186 cmn_err(CE_WARN
, "%s: %s: failed to reset chip",
5187 dp
->name
, __func__
);
5188 mutex_exit(&dp
->intrlock
);
5191 mutex_exit(&dp
->intrlock
);
5193 /* initialize mii phy because we are just after power up */
5194 if (dp
->gc
.gc_mii_init
) {
5195 (void) (*dp
->gc
.gc_mii_init
)(dp
);
5198 if (dp
->misc_flag
& GEM_NOINTR
) {
5200 * schedule first call of gem_intr_watcher
5201 * instead of interrupts.
5203 dp
->intr_watcher_id
=
5204 timeout((void (*)(void *))gem_intr_watcher
,
5205 (void *)dp
, drv_usectohz(3*1000000));
5208 /* restart mii link watcher */
5212 mutex_enter(&dp
->intrlock
);
5214 if (gem_mac_init(dp
) != GEM_SUCCESS
) {
5215 mutex_exit(&dp
->intrlock
);
5218 dp
->nic_state
= NIC_STATE_INITIALIZED
;
5220 /* setup media mode if the link have been up */
5221 if (dp
->mii_state
== MII_STATE_LINKUP
) {
5222 if ((dp
->gc
.gc_set_media
)(dp
) != GEM_SUCCESS
) {
5223 mutex_exit(&dp
->intrlock
);
5228 /* enable mac address and rx filter */
5229 dp
->rxmode
|= RXMODE_ENABLE
;
5230 if ((*dp
->gc
.gc_set_rx_filter
)(dp
) != GEM_SUCCESS
) {
5231 mutex_exit(&dp
->intrlock
);
5234 dp
->nic_state
= NIC_STATE_ONLINE
;
5236 /* restart tx timeout watcher */
5237 dp
->timeout_id
= timeout((void (*)(void *))gem_tx_timeout
,
5239 dp
->gc
.gc_tx_timeout_interval
);
5241 /* now the nic is fully functional */
5242 if (dp
->mii_state
== MII_STATE_LINKUP
) {
5243 if (gem_mac_start(dp
) != GEM_SUCCESS
) {
5244 mutex_exit(&dp
->intrlock
);
5248 mutex_exit(&dp
->intrlock
);
5251 return (DDI_SUCCESS
);
5254 if (dp
->intr_watcher_id
) {
5255 while (untimeout(dp
->intr_watcher_id
) == -1)
5257 dp
->intr_watcher_id
= 0;
5259 mutex_enter(&dp
->intrlock
);
5260 (*dp
->gc
.gc_reset_chip
)(dp
);
5261 dp
->nic_state
= NIC_STATE_STOPPED
;
5262 mutex_exit(&dp
->intrlock
);
5265 return (DDI_FAILURE
);
5269 * misc routines for PCI
5272 gem_search_pci_cap(dev_info_t
*dip
,
5273 ddi_acc_handle_t conf_handle
, uint8_t target
)
5275 uint8_t pci_cap_ptr
;
5278 /* search power management capablities */
5279 pci_cap_ptr
= pci_config_get8(conf_handle
, PCI_CONF_CAP_PTR
);
5280 while (pci_cap_ptr
) {
5281 /* read pci capability header */
5282 pci_cap
= pci_config_get32(conf_handle
, pci_cap_ptr
);
5283 if ((pci_cap
& 0xff) == target
) {
5288 pci_cap_ptr
= (pci_cap
>> 8) & 0xff;
5290 return (pci_cap_ptr
);
5294 gem_pci_set_power_state(dev_info_t
*dip
,
5295 ddi_acc_handle_t conf_handle
, uint_t new_mode
)
5297 uint8_t pci_cap_ptr
;
5300 const char *drv_name
;
5302 ASSERT(new_mode
< 4);
5304 unit
= ddi_get_instance(dip
);
5305 drv_name
= ddi_driver_name(dip
);
5307 /* search power management capablities */
5308 pci_cap_ptr
= gem_search_pci_cap(dip
, conf_handle
, PCI_CAP_ID_PM
);
5310 if (pci_cap_ptr
== 0) {
5312 "!%s%d: doesn't have pci power management capability",
5314 return (DDI_FAILURE
);
5317 /* read power management capabilities */
5318 pmcsr
= pci_config_get32(conf_handle
, pci_cap_ptr
+ PCI_PMCSR
);
5320 DPRINTF(0, (CE_CONT
,
5321 "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5322 drv_name
, unit
, pci_cap_ptr
, pmcsr
));
5325 * Is the resuested power mode supported?
5332 pmcsr
= (pmcsr
& ~PCI_PMCSR_STATE_MASK
) | new_mode
;
5333 pci_config_put32(conf_handle
, pci_cap_ptr
+ PCI_PMCSR
, pmcsr
);
5335 return (DDI_SUCCESS
);
5339 * select suitable register for by specified address space or register
5340 * offset in PCI config space
5343 gem_pci_regs_map_setup(dev_info_t
*dip
, uint32_t which
, uint32_t mask
,
5344 struct ddi_device_acc_attr
*attrp
,
5345 caddr_t
*basep
, ddi_acc_handle_t
*hp
)
5347 struct pci_phys_spec
*regs
;
5353 const char *drv_name
;
5355 unit
= ddi_get_instance(dip
);
5356 drv_name
= ddi_driver_name(dip
);
5358 /* Search IO-range or memory-range to be mapped */
5362 if ((ret
= ddi_prop_lookup_int_array(
5363 DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
5364 "reg", (void *)®s
, &len
)) != DDI_PROP_SUCCESS
) {
5366 "!%s%d: failed to get reg property (ret:%d)",
5367 drv_name
, unit
, ret
);
5368 return (DDI_FAILURE
);
5370 n
= len
/ (sizeof (struct pci_phys_spec
) / sizeof (int));
5372 ASSERT(regs
!= NULL
&& len
> 0);
5374 #if GEM_DEBUG_LEVEL > 0
5375 for (i
= 0; i
< n
; i
++) {
5377 "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5379 regs
[i
].pci_phys_hi
,
5380 regs
[i
].pci_phys_mid
,
5381 regs
[i
].pci_phys_low
,
5382 regs
[i
].pci_size_hi
,
5383 regs
[i
].pci_size_low
);
5386 for (i
= 0; i
< n
; i
++) {
5387 if ((regs
[i
].pci_phys_hi
& mask
) == which
) {
5388 /* it's the requested space */
5389 ddi_prop_free(regs
);
5390 goto address_range_found
;
5393 ddi_prop_free(regs
);
5394 return (DDI_FAILURE
);
5396 address_range_found
:
5397 if ((ret
= ddi_regs_map_setup(dip
, i
, basep
, 0, 0, attrp
, hp
))
5400 "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5401 drv_name
, unit
, ret
);
5408 gem_mod_init(struct dev_ops
*dop
, char *name
)
5410 mac_init_ops(dop
, name
);
5414 gem_mod_fini(struct dev_ops
*dop
)