2 * sfe.c : DP83815/DP83816/SiS900 Fast Ethernet MAC driver for Solaris
4 * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3. Neither the name of the author nor the names of its contributors may be
17 * used to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
34 /* Avoid undefined symbol for non IA architectures */
39 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
40 * Use is subject to license terms.
44 * System Header files.
46 #include <sys/types.h>
48 #include <sys/debug.h>
50 #include <sys/modctl.h>
51 #include <sys/errno.h>
53 #include <sys/sunddi.h>
54 #include <sys/byteorder.h>
55 #include <sys/ethernet.h>
62 char ident
[] = "sis900/dp83815 driver v" "2.6.1t30os";
64 /* Debugging support */
66 static int sfe_debug
= DEBUG_LEVEL
;
72 #define DPRINTF(n, args) if (sfe_debug > (n)) cmn_err args
75 #define DPRINTF(n, args)
79 * Useful macros and typedefs
81 #define ONESEC (drv_usectohz(1*1000000))
82 #define ROUNDUP2(x, a) (((x) + (a) - 1) & ~((a) - 1))
91 #define TX_BUF_SIZE 64
95 #define TX_RING_SIZE TX_BUF_SIZE
97 #define TX_RING_SIZE (TX_BUF_SIZE * 4)
102 #define RX_BUF_SIZE 256
105 #define RX_RING_SIZE RX_BUF_SIZE
108 #define OUR_INTR_BITS \
109 (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR | \
110 ISR_TXURN | ISR_TXDESC | ISR_TXERR | \
111 ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR)
113 #define USE_MULTICAST_HASHTBL
115 static int sfe_tx_copy_thresh
= 256;
116 static int sfe_rx_copy_thresh
= 256;
118 /* special PHY registers for SIS900 */
119 #define MII_CONFIG1 0x0010
120 #define MII_CONFIG2 0x0011
121 #define MII_MASK 0x0013
122 #define MII_RESV 0x0014
124 #define PHY_MASK 0xfffffff0
125 #define PHY_SIS900_INTERNAL 0x001d8000
126 #define PHY_ICS1893 0x0015f440
129 #define SFE_DESC_SIZE 16 /* including pads rounding up to power of 2 */
139 #define CHIPTYPE_DP83815 0
140 #define CHIPTYPE_SIS900 1
144 * Chip dependent MAC state
147 /* misc HW information */
148 struct chip_info
*chip
;
149 uint32_t our_intr_bits
;
152 uint_t tx_drain_threshold
;
153 uint_t tx_fill_threshold
;
154 uint_t rx_drain_threshold
;
155 uint_t rx_fill_threshold
;
156 uint8_t revid
; /* revision from PCI configuration */
157 boolean_t (*get_mac_addr
)(struct gem_dev
*);
158 uint8_t mac_addr
[ETHERADDRL
];
159 uint8_t bridge_revid
;
163 * Hardware information
165 struct chip_info sfe_chiptbl
[] = {
166 { 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900
, },
167 { 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815
, },
168 { 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900
, },
170 #define CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info))
172 /* ======================================================== */
175 static void sfe_mii_sync_dp83815(struct gem_dev
*);
176 static void sfe_mii_sync_sis900(struct gem_dev
*);
177 static uint16_t sfe_mii_read_dp83815(struct gem_dev
*, uint_t
);
178 static uint16_t sfe_mii_read_sis900(struct gem_dev
*, uint_t
);
179 static void sfe_mii_write_dp83815(struct gem_dev
*, uint_t
, uint16_t);
180 static void sfe_mii_write_sis900(struct gem_dev
*, uint_t
, uint16_t);
181 static void sfe_set_eq_sis630(struct gem_dev
*dp
);
183 static int sfe_reset_chip_sis900(struct gem_dev
*);
184 static int sfe_reset_chip_dp83815(struct gem_dev
*);
185 static int sfe_init_chip(struct gem_dev
*);
186 static int sfe_start_chip(struct gem_dev
*);
187 static int sfe_stop_chip(struct gem_dev
*);
188 static int sfe_set_media(struct gem_dev
*);
189 static int sfe_set_rx_filter_dp83815(struct gem_dev
*);
190 static int sfe_set_rx_filter_sis900(struct gem_dev
*);
191 static int sfe_get_stats(struct gem_dev
*);
192 static int sfe_attach_chip(struct gem_dev
*);
194 /* descriptor operations */
195 static int sfe_tx_desc_write(struct gem_dev
*dp
, int slot
,
196 ddi_dma_cookie_t
*dmacookie
, int frags
, uint64_t flags
);
197 static void sfe_tx_start(struct gem_dev
*dp
, int startslot
, int nslot
);
198 static void sfe_rx_desc_write(struct gem_dev
*dp
, int slot
,
199 ddi_dma_cookie_t
*dmacookie
, int frags
);
200 static uint_t
sfe_tx_desc_stat(struct gem_dev
*dp
, int slot
, int ndesc
);
201 static uint64_t sfe_rx_desc_stat(struct gem_dev
*dp
, int slot
, int ndesc
);
203 static void sfe_tx_desc_init(struct gem_dev
*dp
, int slot
);
204 static void sfe_rx_desc_init(struct gem_dev
*dp
, int slot
);
205 static void sfe_tx_desc_clean(struct gem_dev
*dp
, int slot
);
206 static void sfe_rx_desc_clean(struct gem_dev
*dp
, int slot
);
208 /* interrupt handler */
209 static uint_t
sfe_interrupt(struct gem_dev
*dp
);
211 /* ======================================================== */
213 /* mapping attributes */
214 /* Data access requirements. */
215 static struct ddi_device_acc_attr sfe_dev_attr
= {
217 DDI_STRUCTURE_LE_ACC
,
221 /* On sparc, Buffers should be native endian for speed */
222 static struct ddi_device_acc_attr sfe_buf_attr
= {
224 DDI_NEVERSWAP_ACC
, /* native endianness */
228 static ddi_dma_attr_t sfe_dma_attr_buf
= {
229 DMA_ATTR_V0
, /* dma_attr_version */
230 0, /* dma_attr_addr_lo */
231 0xffffffffull
, /* dma_attr_addr_hi */
232 0x00000fffull
, /* dma_attr_count_max */
233 0, /* patched later */ /* dma_attr_align */
234 0x000003fc, /* dma_attr_burstsizes */
235 1, /* dma_attr_minxfer */
236 0x00000fffull
, /* dma_attr_maxxfer */
237 0xffffffffull
, /* dma_attr_seg */
238 0, /* patched later */ /* dma_attr_sgllen */
239 1, /* dma_attr_granular */
240 0 /* dma_attr_flags */
243 static ddi_dma_attr_t sfe_dma_attr_desc
= {
244 DMA_ATTR_V0
, /* dma_attr_version */
245 16, /* dma_attr_addr_lo */
246 0xffffffffull
, /* dma_attr_addr_hi */
247 0xffffffffull
, /* dma_attr_count_max */
248 16, /* dma_attr_align */
249 0x000003fc, /* dma_attr_burstsizes */
250 1, /* dma_attr_minxfer */
251 0xffffffffull
, /* dma_attr_maxxfer */
252 0xffffffffull
, /* dma_attr_seg */
253 1, /* dma_attr_sgllen */
254 1, /* dma_attr_granular */
255 0 /* dma_attr_flags */
258 uint32_t sfe_use_pcimemspace
= 0;
260 /* ======================================================== */
262 * HW manipulation routines
264 /* ======================================================== */
266 #define SFE_EEPROM_DELAY(dp) \
267 { (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
268 #define EE_CMD_READ 6
269 #define EE_CMD_SHIFT 6
272 sfe_read_eeprom(struct gem_dev
*dp
, uint_t offset
)
278 /* ensure de-assert chip select */
280 SFE_EEPROM_DELAY(dp
);
281 OUTL(dp
, EROMAR
, EROMAR_EESK
);
282 SFE_EEPROM_DELAY(dp
);
284 /* assert chip select */
285 offset
|= EE_CMD_READ
<< EE_CMD_SHIFT
;
287 for (i
= 8; i
>= 0; i
--) {
289 eedi
= ((offset
>> i
) & 1) << EROMAR_EEDI_SHIFT
;
292 OUTL(dp
, EROMAR
, EROMAR_EECS
| eedi
);
293 SFE_EEPROM_DELAY(dp
);
294 OUTL(dp
, EROMAR
, EROMAR_EECS
| eedi
| EROMAR_EESK
);
295 SFE_EEPROM_DELAY(dp
);
298 OUTL(dp
, EROMAR
, EROMAR_EECS
);
301 for (i
= 0; i
< 16; i
++) {
303 OUTL(dp
, EROMAR
, EROMAR_EECS
);
304 SFE_EEPROM_DELAY(dp
);
305 OUTL(dp
, EROMAR
, EROMAR_EECS
| EROMAR_EESK
);
306 SFE_EEPROM_DELAY(dp
);
308 ret
= (ret
<< 1) | ((INL(dp
, EROMAR
) >> EROMAR_EEDO_SHIFT
) & 1);
312 SFE_EEPROM_DELAY(dp
);
316 #undef SFE_EEPROM_DELAY
319 sfe_get_mac_addr_dp83815(struct gem_dev
*dp
)
325 #define BITSET(p, ix, v) (p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7)
327 DPRINTF(4, (CE_CONT
, CONS
"%s: %s: called", dp
->name
, __func__
));
329 mac
= dp
->dev_addr
.ether_addr_octet
;
331 /* first of all, clear MAC address buffer */
332 bzero(mac
, ETHERADDRL
);
335 val
= sfe_read_eeprom(dp
, 0x6);
336 BITSET(mac
, 0, val
& 1);
339 val
= sfe_read_eeprom(dp
, 0x7);
340 for (i
= 0; i
< 16; i
++) {
341 BITSET(mac
, 1 + i
, val
& (1 << (15 - i
)));
344 /* get bit 17 - 32 */
345 val
= sfe_read_eeprom(dp
, 0x8);
346 for (i
= 0; i
< 16; i
++) {
347 BITSET(mac
, 17 + i
, val
& (1 << (15 - i
)));
350 /* get bit 33 - 47 */
351 val
= sfe_read_eeprom(dp
, 0x9);
352 for (i
= 0; i
< 15; i
++) {
353 BITSET(mac
, 33 + i
, val
& (1 << (15 - i
)));
361 sfe_get_mac_addr_sis900(struct gem_dev
*dp
)
367 mac
= dp
->dev_addr
.ether_addr_octet
;
369 for (i
= 0; i
< ETHERADDRL
/2; i
++) {
370 val
= sfe_read_eeprom(dp
, 0x8 + i
);
371 *mac
++ = (uint8_t)val
;
372 *mac
++ = (uint8_t)(val
>> 8);
379 sfe_search_pci_dev_subr(dev_info_t
*cur_node
, int vendor_id
, int device_id
)
381 dev_info_t
*child_id
;
385 if (cur_node
== NULL
) {
391 vid
= ddi_prop_get_int(DDI_DEV_T_ANY
, cur_node
,
392 DDI_PROP_DONTPASS
, "vendor-id", -1);
393 did
= ddi_prop_get_int(DDI_DEV_T_ANY
, cur_node
,
394 DDI_PROP_DONTPASS
, "device-id", -1);
396 if (vid
== vendor_id
&& did
== device_id
) {
402 if ((child_id
= ddi_get_child(cur_node
)) != NULL
) {
403 if ((ret
= sfe_search_pci_dev_subr(child_id
,
404 vendor_id
, device_id
)) != NULL
) {
409 } while ((cur_node
= ddi_get_next_sibling(cur_node
)) != NULL
);
416 sfe_search_pci_dev(int vendor_id
, int device_id
)
418 return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id
, device_id
));
422 sfe_get_mac_addr_sis630e(struct gem_dev
*dp
)
425 dev_info_t
*isa_bridge
;
426 ddi_acc_handle_t isa_handle
;
429 if (inb
== NULL
|| outb
== NULL
) {
430 /* this is not IA architecture */
434 if ((isa_bridge
= sfe_search_pci_dev(0x1039, 0x8)) == NULL
) {
435 cmn_err(CE_WARN
, "%s: failed to find isa-bridge pci1039,8",
440 if (pci_config_setup(isa_bridge
, &isa_handle
) != DDI_SUCCESS
) {
441 cmn_err(CE_WARN
, "%s: ddi_regs_map_setup failed",
446 /* enable to access CMOS RAM */
447 reg
= pci_config_get8(isa_handle
, 0x48);
448 pci_config_put8(isa_handle
, 0x48, reg
| 0x40);
450 for (i
= 0; i
< ETHERADDRL
; i
++) {
451 outb(0x70, 0x09 + i
);
452 dp
->dev_addr
.ether_addr_octet
[i
] = inb(0x71);
455 /* disable to access CMOS RAM */
456 pci_config_put8(isa_handle
, 0x48, reg
);
457 pci_config_teardown(&isa_handle
);
463 sfe_get_mac_addr_sis635(struct gem_dev
*dp
)
468 struct sfe_dev
*lp
= dp
->private;
470 DPRINTF(2, (CE_CONT
, CONS
"%s: %s: called", dp
->name
, __func__
));
471 rfcr
= INL(dp
, RFCR
);
473 OUTL(dp
, CR
, lp
->cr
| CR_RELOAD
);
474 OUTL(dp
, CR
, lp
->cr
);
476 /* disable packet filtering before reading filter */
477 OUTL(dp
, RFCR
, rfcr
& ~RFCR_RFEN
);
479 /* load MAC addr from filter data register */
480 for (i
= 0; i
< ETHERADDRL
; i
+= 2) {
482 (RFADDR_MAC_SIS900
+ (i
/2)) << RFCR_RFADDR_SHIFT_SIS900
);
484 dp
->dev_addr
.ether_addr_octet
[i
] = (uint8_t)v
;
485 dp
->dev_addr
.ether_addr_octet
[i
+1] = (uint8_t)(v
>> 8);
488 /* re-enable packet filtering */
489 OUTL(dp
, RFCR
, rfcr
| RFCR_RFEN
);
495 sfe_get_mac_addr_sis962(struct gem_dev
*dp
)
502 /* rise request signal to access EEPROM */
503 OUTL(dp
, MEAR
, EROMAR_EEREQ
);
504 for (i
= 0; (INL(dp
, MEAR
) & EROMAR_EEGNT
) == 0; i
++) {
506 /* failed to acquire eeprom */
508 CONS
"%s: failed to access eeprom", dp
->name
);
513 ret
= sfe_get_mac_addr_sis900(dp
);
516 OUTL(dp
, MEAR
, EROMAR_EEDONE
);
522 sfe_reset_chip_sis900(struct gem_dev
*dp
)
527 struct sfe_dev
*lp
= dp
->private;
529 DPRINTF(4, (CE_CONT
, CONS
"%s: %s called", dp
->name
, __func__
));
531 /* invalidate mac addr cache */
532 bzero(lp
->mac_addr
, sizeof (lp
->mac_addr
));
536 /* inhibit interrupt */
538 lp
->isr_pended
|= INL(dp
, ISR
) & lp
->our_intr_bits
;
540 OUTLINL(dp
, RFCR
, 0);
542 OUTL(dp
, CR
, CR_RST
| CR_TXR
| CR_RXR
);
546 for (i
= 0; done
!= (ISR_TXRCMP
| ISR_RXRCMP
); i
++) {
548 cmn_err(CE_WARN
, "%s: chip reset timeout", dp
->name
);
549 return (GEM_FAILURE
);
551 done
|= INL(dp
, ISR
) & (ISR_TXRCMP
| ISR_RXRCMP
);
555 if (lp
->revid
== SIS630ET_900_REV
) {
556 lp
->cr
|= CR_ACCESSMODE
;
557 OUTL(dp
, CR
, lp
->cr
| INL(dp
, CR
));
560 /* Configuration register: enable PCI parity */
561 DPRINTF(2, (CE_CONT
, CONS
"%s: cfg:%b",
562 dp
->name
, INL(dp
, CFG
), CFG_BITS_SIS900
));
564 if (lp
->revid
>= SIS635A_900_REV
||
565 lp
->revid
== SIS900B_900_REV
) {
570 DPRINTF(2, (CE_CONT
, CONS
"%s: cfg:%b", dp
->name
,
571 INL(dp
, CFG
), CFG_BITS_SIS900
));
573 return (GEM_SUCCESS
);
577 sfe_reset_chip_dp83815(struct gem_dev
*dp
)
581 struct sfe_dev
*lp
= dp
->private;
583 DPRINTF(4, (CE_CONT
, CONS
"%s: %s called", dp
->name
, __func__
));
585 /* invalidate mac addr cache */
586 bzero(lp
->mac_addr
, sizeof (lp
->mac_addr
));
590 /* inhibit interrupts */
592 lp
->isr_pended
|= INL(dp
, ISR
) & lp
->our_intr_bits
;
596 OUTL(dp
, CR
, CR_RST
);
599 for (i
= 0; INL(dp
, CR
) & CR_RST
; i
++) {
601 cmn_err(CE_WARN
, "!%s: chip reset timeout", dp
->name
);
602 return (GEM_FAILURE
);
606 DPRINTF(0, (CE_CONT
, "!%s: chip reset in %duS", dp
->name
, i
*10));
608 OUTL(dp
, CCSR
, CCSR_PMESTS
);
611 /* Configuration register: enable PCI parity */
612 DPRINTF(2, (CE_CONT
, CONS
"%s: cfg:%b",
613 dp
->name
, INL(dp
, CFG
), CFG_BITS_DP83815
));
614 val
= INL(dp
, CFG
) & (CFG_ANEG_SEL
| CFG_PHY_CFG
);
615 OUTL(dp
, CFG
, val
| CFG_PAUSE_ADV
);
616 DPRINTF(2, (CE_CONT
, CONS
"%s: cfg:%b", dp
->name
,
617 INL(dp
, CFG
), CFG_BITS_DP83815
));
619 return (GEM_SUCCESS
);
623 sfe_init_chip(struct gem_dev
*dp
)
625 /* Configuration register: have been set up in sfe_chip_reset */
627 /* PCI test control register: do nothing */
629 /* Interrupt status register : do nothing */
631 /* Interrupt mask register: clear, but leave lp->our_intr_bits */
634 /* Enhanced PHY Access register (sis900): do nothing */
636 /* Transmit Descriptor Pointer register: base addr of TX ring */
637 OUTL(dp
, TXDP
, dp
->tx_ring_dma
);
639 /* Receive descriptor pointer register: base addr of RX ring */
640 OUTL(dp
, RXDP
, dp
->rx_ring_dma
);
642 return (GEM_SUCCESS
);
646 sfe_mcast_hash(struct gem_dev
*dp
, uint8_t *addr
)
648 return (gem_ether_crc_be(addr
, ETHERADDRL
));
653 sfe_rxfilter_dump(struct gem_dev
*dp
, int start
, int end
)
659 cmn_err(CE_CONT
, "!%s: rx filter ram dump:", dp
->name
);
660 #define WORDS_PER_LINE 4
661 for (i
= start
; i
< end
; i
+= WORDS_PER_LINE
*2) {
662 for (j
= 0; j
< WORDS_PER_LINE
; j
++) {
663 OUTL(dp
, RFCR
, RFADDR_MAC_DP83815
+ i
+ j
*2);
664 ram
[j
] = INL(dp
, RFDR
);
667 cmn_err(CE_CONT
, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
668 i
, ram
[0], ram
[1], ram
[2], ram
[3]);
671 #undef WORDS_PER_LINE
675 static uint_t sfe_rf_perfect_base_dp83815
[] = {
676 RFADDR_PMATCH0_DP83815
,
677 RFADDR_PMATCH1_DP83815
,
678 RFADDR_PMATCH2_DP83815
,
679 RFADDR_PMATCH3_DP83815
,
683 sfe_set_rx_filter_dp83815(struct gem_dev
*dp
)
688 uint8_t *mac
= dp
->cur_addr
.ether_addr_octet
;
689 uint16_t hash_tbl
[32];
690 struct sfe_dev
*lp
= dp
->private;
692 DPRINTF(1, (CE_CONT
, CONS
"%s: %s: called, mc_count:%d, mode:0x%b",
693 dp
->name
, __func__
, dp
->mc_count
, dp
->rxmode
, RXMODE_BITS
));
696 for (i
= 0; i
< dp
->mc_count
; i
++) {
698 "!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x",
700 dp
->mc_list
[i
].addr
.ether_addr_octet
[0],
701 dp
->mc_list
[i
].addr
.ether_addr_octet
[1],
702 dp
->mc_list
[i
].addr
.ether_addr_octet
[2],
703 dp
->mc_list
[i
].addr
.ether_addr_octet
[3],
704 dp
->mc_list
[i
].addr
.ether_addr_octet
[4],
705 dp
->mc_list
[i
].addr
.ether_addr_octet
[5]);
708 if ((dp
->rxmode
& RXMODE_ENABLE
) == 0) {
709 /* disable rx filter */
711 return (GEM_SUCCESS
);
715 * Set Receive filter control register
717 if (dp
->rxmode
& RXMODE_PROMISC
) {
718 /* all broadcast, all multicast, all physical */
719 mode
= RFCR_AAB
| RFCR_AAM
| RFCR_AAP
;
720 } else if ((dp
->rxmode
& RXMODE_ALLMULTI
) || dp
->mc_count
> 16*32/2) {
721 /* all broadcast, all multicast, physical for the chip */
722 mode
= RFCR_AAB
| RFCR_AAM
| RFCR_APM_DP83815
;
723 } else if (dp
->mc_count
> 4) {
725 * Use multicast hash table,
726 * accept all broadcast and physical for the chip.
728 mode
= RFCR_AAB
| RFCR_MHEN_DP83815
| RFCR_APM_DP83815
;
730 bzero(hash_tbl
, sizeof (hash_tbl
));
731 for (i
= 0; i
< dp
->mc_count
; i
++) {
732 j
= dp
->mc_list
[i
].hash
>> (32 - 9);
733 hash_tbl
[j
/ 16] |= 1 << (j
% 16);
737 * Use pattern mach filter for multicast address,
738 * accept all broadcast and physical for the chip
740 /* need to enable corresponding pattern registers */
741 mode
= RFCR_AAB
| RFCR_APM_DP83815
|
742 (((1 << dp
->mc_count
) - 1) << RFCR_APAT_SHIFT
);
747 "!%s: mac %02x:%02x:%02x:%02x:%02x:%02x"
748 " cache %02x:%02x:%02x:%02x:%02x:%02x",
749 dp
->name
, mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5],
750 lp
->mac_addr
[0], lp
->mac_addr
[1],
751 lp
->mac_addr
[2], lp
->mac_addr
[3],
752 lp
->mac_addr
[4], lp
->mac_addr
[5]);
754 if (bcmp(mac
, lp
->mac_addr
, ETHERADDRL
) != 0) {
756 * XXX - need to *disable* rx filter to load mac address for
757 * the chip. otherwise, we cannot setup rxfilter correctly.
759 /* setup perfect match register for my station address */
760 for (i
= 0; i
< ETHERADDRL
; i
+= 2) {
761 OUTL(dp
, RFCR
, RFADDR_MAC_DP83815
+ i
);
762 OUTL(dp
, RFDR
, (mac
[i
+1] << 8) | mac
[i
]);
765 bcopy(mac
, lp
->mac_addr
, ETHERADDRL
);
769 /* clear pattern ram */
770 for (j
= 0x200; j
< 0x380; j
+= 2) {
775 if (mode
& RFCR_APAT_DP83815
) {
776 /* setup multicast address into pattern match registers */
777 for (j
= 0; j
< dp
->mc_count
; j
++) {
778 mac
= &dp
->mc_list
[j
].addr
.ether_addr_octet
[0];
779 for (i
= 0; i
< ETHERADDRL
; i
+= 2) {
781 sfe_rf_perfect_base_dp83815
[j
] + i
*2);
782 OUTL(dp
, RFDR
, (mac
[i
+1] << 8) | mac
[i
]);
786 /* setup pattern count registers */
787 OUTL(dp
, RFCR
, RFADDR_PCOUNT01_DP83815
);
788 OUTL(dp
, RFDR
, (ETHERADDRL
<< 8) | ETHERADDRL
);
789 OUTL(dp
, RFCR
, RFADDR_PCOUNT23_DP83815
);
790 OUTL(dp
, RFDR
, (ETHERADDRL
<< 8) | ETHERADDRL
);
793 if (mode
& RFCR_MHEN_DP83815
) {
794 /* Load Multicast hash table */
795 for (i
= 0; i
< 32; i
++) {
796 /* for DP83815, index is in byte */
797 OUTL(dp
, RFCR
, RFADDR_MULTICAST_DP83815
+ i
*2);
798 OUTL(dp
, RFDR
, hash_tbl
[i
]);
802 sfe_rxfilter_dump(dp
, 0, 0x10);
803 sfe_rxfilter_dump(dp
, 0x200, 0x380);
805 /* Set rx filter mode and enable rx filter */
806 OUTL(dp
, RFCR
, RFCR_RFEN
| mode
);
808 return (GEM_SUCCESS
);
812 sfe_set_rx_filter_sis900(struct gem_dev
*dp
)
816 uint16_t hash_tbl
[16];
817 uint8_t *mac
= dp
->cur_addr
.ether_addr_octet
;
820 struct sfe_dev
*lp
= dp
->private;
822 DPRINTF(4, (CE_CONT
, CONS
"%s: %s: called", dp
->name
, __func__
));
824 if ((dp
->rxmode
& RXMODE_ENABLE
) == 0) {
825 /* disable rx filter */
826 OUTLINL(dp
, RFCR
, 0);
827 return (GEM_SUCCESS
);
831 * determine hardware hash table size in word.
834 if (lp
->revid
>= SIS635A_900_REV
|| lp
->revid
== SIS900B_900_REV
) {
837 hash_size
= (1 << (32 - hash_shift
)) / 16;
838 bzero(hash_tbl
, sizeof (hash_tbl
));
840 /* Set Receive filter control register */
842 if (dp
->rxmode
& RXMODE_PROMISC
) {
843 /* all broadcast, all multicast, all physical */
844 mode
= RFCR_AAB
| RFCR_AAM
| RFCR_AAP
;
845 } else if ((dp
->rxmode
& RXMODE_ALLMULTI
) ||
846 dp
->mc_count
> hash_size
*16/2) {
847 /* all broadcast, all multicast, physical for the chip */
848 mode
= RFCR_AAB
| RFCR_AAM
;
850 /* all broadcast, physical for the chip */
854 /* make hash table */
855 for (i
= 0; i
< dp
->mc_count
; i
++) {
857 h
= dp
->mc_list
[i
].hash
>> hash_shift
;
858 hash_tbl
[h
/ 16] |= 1 << (h
% 16);
861 if (bcmp(mac
, lp
->mac_addr
, ETHERADDRL
) != 0) {
862 /* Disable Rx filter and load mac address */
863 for (i
= 0; i
< ETHERADDRL
/2; i
++) {
864 /* For sis900, index is in word */
866 (RFADDR_MAC_SIS900
+i
) << RFCR_RFADDR_SHIFT_SIS900
);
867 OUTLINL(dp
, RFDR
, (mac
[i
*2+1] << 8) | mac
[i
*2]);
870 bcopy(mac
, lp
->mac_addr
, ETHERADDRL
);
873 /* Load Multicast hash table */
874 for (i
= 0; i
< hash_size
; i
++) {
875 /* For sis900, index is in word */
877 (RFADDR_MULTICAST_SIS900
+ i
) << RFCR_RFADDR_SHIFT_SIS900
);
878 OUTLINL(dp
, RFDR
, hash_tbl
[i
]);
881 /* Load rx filter mode and enable rx filter */
882 OUTLINL(dp
, RFCR
, RFCR_RFEN
| mode
);
884 return (GEM_SUCCESS
);
888 sfe_start_chip(struct gem_dev
*dp
)
890 struct sfe_dev
*lp
= dp
->private;
892 DPRINTF(4, (CE_CONT
, CONS
"%s: %s: called", dp
->name
, __func__
));
895 * setup interrupt mask, which shouldn't include ISR_TOK
896 * to improve performance.
898 lp
->our_intr_bits
= OUR_INTR_BITS
;
900 /* enable interrupt */
901 if ((dp
->misc_flag
& GEM_NOINTR
) == 0) {
903 OUTL(dp
, IMR
, lp
->our_intr_bits
);
907 OUTL(dp
, CR
, lp
->cr
| CR_RXE
);
909 return (GEM_SUCCESS
);
913 * Stop nic core gracefully.
916 sfe_stop_chip(struct gem_dev
*dp
)
918 struct sfe_dev
*lp
= dp
->private;
923 DPRINTF(4, (CE_CONT
, CONS
"%s: %s: called", dp
->name
, __func__
));
926 * Although we inhibit interrupt here, we don't clear soft copy of
927 * interrupt mask to avoid bogus interrupts.
931 /* stop TX and RX immediately */
932 OUTL(dp
, CR
, lp
->cr
| CR_TXR
| CR_RXR
);
935 for (i
= 0; done
!= (ISR_RXRCMP
| ISR_TXRCMP
); i
++) {
938 * As gem layer will call sfe_reset_chip(),
939 * we don't neet to reset futher
941 cmn_err(CE_NOTE
, "!%s: %s: Tx/Rx reset timeout",
944 return (GEM_FAILURE
);
947 done
|= val
& (ISR_RXRCMP
| ISR_TXRCMP
);
948 lp
->isr_pended
|= val
& lp
->our_intr_bits
;
952 return (GEM_SUCCESS
);
957 * Stop nic core gracefully for quiesce
960 sfe_stop_chip_quiesce(struct gem_dev
*dp
)
962 struct sfe_dev
*lp
= dp
->private;
968 * Although we inhibit interrupt here, we don't clear soft copy of
969 * interrupt mask to avoid bogus interrupts.
973 /* stop TX and RX immediately */
974 OUTL(dp
, CR
, CR_TXR
| CR_RXR
);
977 for (i
= 0; done
!= (ISR_RXRCMP
| ISR_TXRCMP
); i
++) {
980 * As gem layer will call sfe_reset_chip(),
981 * we don't neet to reset futher
984 return (DDI_FAILURE
);
987 done
|= val
& (ISR_RXRCMP
| ISR_TXRCMP
);
988 lp
->isr_pended
|= val
& lp
->our_intr_bits
;
991 return (DDI_SUCCESS
);
999 sfe_mxdma_value
[] = { 512, 4, 8, 16, 32, 64, 128, 256, };
1002 sfe_encode_mxdma(uint_t burstsize
)
1006 if (burstsize
> 256) {
1011 for (i
= 1; i
< 8; i
++) {
1012 if (burstsize
<= sfe_mxdma_value
[i
]) {
1020 sfe_set_media(struct gem_dev
*dp
)
1028 struct sfe_dev
*lp
= dp
->private;
1030 extern int gem_speed_value
[];
1032 DPRINTF(2, (CE_CONT
, CONS
"%s: %s: %s duplex, %d Mbps",
1034 dp
->full_duplex
? "full" : "half", gem_speed_value
[dp
->speed
]));
1036 /* initialize txcfg and rxcfg */
1038 if (dp
->full_duplex
) {
1039 txcfg
|= (TXCFG_CSI
| TXCFG_HBI
);
1041 rxcfg
= RXCFG_AEP
| RXCFG_ARP
;
1042 if (dp
->full_duplex
) {
1046 /* select txmxdma and rxmxdma, maxmum burst length */
1047 if (lp
->chip
->chip_type
== CHIPTYPE_SIS900
) {
1048 #ifdef DEBUG_SIS900_EDB
1049 val
= CFG_EDB_MASTER
;
1051 val
= INL(dp
, CFG
) & CFG_EDB_MASTER
;
1055 * sis900 built-in cores:
1056 * max burst length must be fixed to 64
1062 * sis900 pci chipset:
1063 * the vendor recommended to fix max burst length
1072 * use user defined or default for tx/rx max burst length
1074 txmxdma
= max(dp
->txmaxdma
, 256);
1075 rxmxdma
= max(dp
->rxmaxdma
, 256);
1079 /* tx high water mark */
1080 lp
->tx_drain_threshold
= ROUNDUP2(dp
->txthr
, TXCFG_FIFO_UNIT
);
1082 /* determine tx_fill_threshold accroding drain threshold */
1083 lp
->tx_fill_threshold
=
1084 TXFIFOSIZE
- lp
->tx_drain_threshold
- TXCFG_FIFO_UNIT
;
1086 /* tune txmxdma not to exceed tx_fill_threshold */
1088 /* normalize txmxdma requested */
1089 val
= sfe_encode_mxdma(txmxdma
);
1090 txmxdma
= sfe_mxdma_value
[val
];
1092 if (txmxdma
<= lp
->tx_fill_threshold
) {
1095 /* select new txmxdma */
1096 txmxdma
= txmxdma
/ 2;
1098 txcfg
|= val
<< TXCFG_MXDMA_SHIFT
;
1100 /* encode rxmxdma, maxmum burst length for rx */
1101 val
= sfe_encode_mxdma(rxmxdma
);
1102 rxcfg
|= val
<< RXCFG_MXDMA_SHIFT
;
1103 rxmxdma
= sfe_mxdma_value
[val
];
1105 /* receive starting threshold - it have only 5bit-wide field */
1106 val
= ROUNDUP2(max(dp
->rxthr
, ETHERMIN
), RXCFG_FIFO_UNIT
);
1107 lp
->rx_drain_threshold
=
1108 min(val
, (RXCFG_DRTH
>> RXCFG_DRTH_SHIFT
) * RXCFG_FIFO_UNIT
);
1110 DPRINTF(0, (CE_CONT
,
1111 "%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d,"
1112 " rx: drain:%d mxdma:%d",
1114 lp
->tx_drain_threshold
, TXFIFOSIZE
- lp
->tx_drain_threshold
,
1115 lp
->tx_fill_threshold
, txmxdma
,
1116 lp
->rx_drain_threshold
, rxmxdma
));
1118 ASSERT(lp
->tx_drain_threshold
< 64*TXCFG_FIFO_UNIT
);
1119 ASSERT(lp
->tx_fill_threshold
< 64*TXCFG_FIFO_UNIT
);
1120 ASSERT(lp
->rx_drain_threshold
< 32*RXCFG_FIFO_UNIT
);
1122 txcfg
|= ((lp
->tx_fill_threshold
/TXCFG_FIFO_UNIT
) << TXCFG_FLTH_SHIFT
)
1123 | (lp
->tx_drain_threshold
/TXCFG_FIFO_UNIT
);
1124 OUTL(dp
, TXCFG
, txcfg
);
1126 rxcfg
|= ((lp
->rx_drain_threshold
/RXCFG_FIFO_UNIT
) << RXCFG_DRTH_SHIFT
);
1127 if (lp
->chip
->chip_type
== CHIPTYPE_DP83815
) {
1128 rxcfg
|= RXCFG_ALP_DP83815
;
1130 OUTL(dp
, RXCFG
, rxcfg
);
1132 DPRINTF(0, (CE_CONT
, CONS
"%s: %s: txcfg:%b rxcfg:%b",
1134 txcfg
, TXCFG_BITS
, rxcfg
, RXCFG_BITS
));
1137 if (lp
->chip
->chip_type
== CHIPTYPE_DP83815
) {
1139 switch (dp
->flow_control
) {
1140 case FLOW_CONTROL_SYMMETRIC
:
1141 case FLOW_CONTROL_RX_PAUSE
:
1142 OUTL(dp
, PCR
, pcr
| PCR_PSEN
| PCR_PS_MCAST
);
1147 pcr
& ~(PCR_PSEN
| PCR_PS_MCAST
| PCR_PS_DA
));
1150 DPRINTF(2, (CE_CONT
, CONS
"%s: PCR: %b", dp
->name
,
1151 INL(dp
, PCR
), PCR_BITS
));
1153 } else if (lp
->chip
->chip_type
== CHIPTYPE_SIS900
) {
1154 switch (dp
->flow_control
) {
1155 case FLOW_CONTROL_SYMMETRIC
:
1156 case FLOW_CONTROL_RX_PAUSE
:
1157 OUTL(dp
, FLOWCTL
, FLOWCTL_FLOWEN
);
1160 OUTL(dp
, FLOWCTL
, 0);
1163 DPRINTF(2, (CE_CONT
, CONS
"%s: FLOWCTL: %b",
1164 dp
->name
, INL(dp
, FLOWCTL
), FLOWCTL_BITS
));
1166 return (GEM_SUCCESS
);
1170 sfe_get_stats(struct gem_dev
*dp
)
1173 return (GEM_SUCCESS
);
1177 * descriptor manipulations
1180 sfe_tx_desc_write(struct gem_dev
*dp
, int slot
,
1181 ddi_dma_cookie_t
*dmacookie
, int frags
, uint64_t flags
)
1184 struct sfe_desc
*tdp
;
1185 ddi_dma_cookie_t
*dcp
;
1191 CONS
"%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx",
1192 dp
->name
, ddi_get_lbolt(), __func__
,
1193 dp
->tx_desc_tail
, slot
, frags
, flags
);
1195 for (i
= 0; i
< frags
; i
++) {
1196 cmn_err(CE_CONT
, CONS
"%d: addr: 0x%x, len: 0x%x",
1197 i
, dmacookie
[i
].dmac_address
, dmacookie
[i
].dmac_size
);
1201 * write tx descriptor in reversed order.
1204 flags
|= GEM_TXFLAG_INTR
;
1206 mark
= (flags
& GEM_TXFLAG_INTR
)
1207 ? (CMDSTS_OWN
| CMDSTS_INTR
) : CMDSTS_OWN
;
1210 dcp
= &dmacookie
[0];
1211 if (flags
& GEM_TXFLAG_HEAD
) {
1212 mark
&= ~CMDSTS_OWN
;
1215 tdp
= (void *)&dp
->tx_ring
[SFE_DESC_SIZE
* slot
];
1216 tmp0
= (uint32_t)dcp
->dmac_address
;
1217 mark
|= (uint32_t)dcp
->dmac_size
;
1218 tdp
->d_bufptr
= LE_32(tmp0
);
1219 tdp
->d_cmdsts
= LE_32(mark
);
1225 sfe_tx_start(struct gem_dev
*dp
, int start_slot
, int nslot
)
1227 uint_t tx_ring_size
= dp
->gc
.gc_tx_ring_size
;
1228 struct sfe_desc
*tdp
;
1229 struct sfe_dev
*lp
= dp
->private;
1232 gem_tx_desc_dma_sync(dp
,
1233 SLOT(start_slot
+ 1, tx_ring_size
),
1234 nslot
- 1, DDI_DMA_SYNC_FORDEV
);
1237 tdp
= (void *)&dp
->tx_ring
[SFE_DESC_SIZE
* start_slot
];
1238 tdp
->d_cmdsts
|= LE_32(CMDSTS_OWN
);
1240 gem_tx_desc_dma_sync(dp
, start_slot
, 1, DDI_DMA_SYNC_FORDEV
);
1243 * Let the Transmit Buffer Manager Fill state machine active.
1245 if (dp
->mac_active
) {
1246 OUTL(dp
, CR
, lp
->cr
| CR_TXE
);
1251 sfe_rx_desc_write(struct gem_dev
*dp
, int slot
,
1252 ddi_dma_cookie_t
*dmacookie
, int frags
)
1254 struct sfe_desc
*rdp
;
1262 cmn_err(CE_CONT
, CONS
1263 "%s: %s seqnum: %d, slot %d, frags: %d",
1264 dp
->name
, __func__
, dp
->rx_active_tail
, slot
, frags
);
1265 for (i
= 0; i
< frags
; i
++) {
1266 cmn_err(CE_CONT
, CONS
" frag: %d addr: 0x%llx, len: 0x%lx",
1267 i
, dmacookie
[i
].dmac_address
, dmacookie
[i
].dmac_size
);
1270 /* for the last slot of the packet */
1271 rdp
= (void *)&dp
->rx_ring
[SFE_DESC_SIZE
* slot
];
1273 tmp0
= (uint32_t)dmacookie
->dmac_address
;
1274 tmp1
= CMDSTS_INTR
| (uint32_t)dmacookie
->dmac_size
;
1275 rdp
->d_bufptr
= LE_32(tmp0
);
1276 rdp
->d_cmdsts
= LE_32(tmp1
);
1280 sfe_tx_desc_stat(struct gem_dev
*dp
, int slot
, int ndesc
)
1282 uint_t tx_ring_size
= dp
->gc
.gc_tx_ring_size
;
1283 struct sfe_desc
*tdp
;
1286 struct sfe_dev
*lp
= dp
->private;
1291 /* check status of the last descriptor */
1293 &dp
->tx_ring
[SFE_DESC_SIZE
* SLOT(slot
+ ndesc
- 1, tx_ring_size
)];
1296 * Don't use LE_32() directly to refer tdp->d_cmdsts.
1297 * It is not atomic for big endian cpus.
1299 status
= tdp
->d_cmdsts
;
1300 status
= LE_32(status
);
1302 DPRINTF(2, (CE_CONT
, CONS
"%s: time:%ld %s: slot:%d, status:0x%b",
1303 dp
->name
, ddi_get_lbolt(), __func__
,
1304 slot
, status
, TXSTAT_BITS
));
1306 if (status
& CMDSTS_OWN
) {
1308 * not yet transmitted
1310 /* workaround for tx hang */
1311 if (lp
->chip
->chip_type
== CHIPTYPE_DP83815
&&
1313 OUTL(dp
, CR
, lp
->cr
| CR_TXE
);
1318 if (status
& CMDSTS_MORE
) {
1319 /* XXX - the hardware problem but don't panic the system */
1320 /* avoid lint bug for %b format string including 32nd bit */
1321 cmn_err(CE_NOTE
, CONS
1322 "%s: tx status bits incorrect: slot:%d, status:0x%x",
1323 dp
->name
, slot
, status
);
1327 delay
= (ddi_get_lbolt() - dp
->tx_buf_head
->txb_stime
) * 10;
1329 DPRINTF(0, (CE_NOTE
, "%s: tx deferred %d mS: slot %d",
1330 dp
->name
, delay
, slot
));
1335 for (i
= 0; i
< nfrag
-1; i
++) {
1339 n
= SLOT(slot
+ i
, tx_ring_size
);
1341 ((struct sfe_desc
*)((void *)
1342 &dp
->tx_ring
[SFE_DESC_SIZE
* n
]))->d_cmdsts
);
1344 ASSERT(s
& CMDSTS_MORE
);
1345 ASSERT((s
& CMDSTS_OWN
) == 0);
1350 * collect statistics
1352 if ((status
& CMDSTS_OK
) == 0) {
1354 /* failed to transmit the packet */
1356 DPRINTF(0, (CE_CONT
, CONS
"%s: Transmit error, Tx status %b",
1357 dp
->name
, status
, TXSTAT_BITS
));
1361 if (status
& CMDSTS_TFU
) {
1362 dp
->stats
.underflow
++;
1363 } else if (status
& CMDSTS_CRS
) {
1364 dp
->stats
.nocarrier
++;
1365 } else if (status
& CMDSTS_OWC
) {
1366 dp
->stats
.xmtlatecoll
++;
1367 } else if ((!dp
->full_duplex
) && (status
& CMDSTS_EC
)) {
1369 dp
->stats
.collisions
+= 16;
1371 dp
->stats
.xmit_internal_err
++;
1373 } else if (!dp
->full_duplex
) {
1374 cols
= (status
>> CMDSTS_CCNT_SHIFT
) & CCNT_MASK
;
1378 dp
->stats
.first_coll
++;
1379 } else /* (cols > 1) */ {
1380 dp
->stats
.multi_coll
++;
1382 dp
->stats
.collisions
+= cols
;
1383 } else if (status
& CMDSTS_TD
) {
1387 return (GEM_TX_DONE
);
1391 sfe_rx_desc_stat(struct gem_dev
*dp
, int slot
, int ndesc
)
1393 struct sfe_desc
*rdp
;
1400 /* Dont read ISR because we cannot ack only to rx interrupt. */
1402 rdp
= (void *)&dp
->rx_ring
[SFE_DESC_SIZE
* slot
];
1405 * Don't use LE_32() directly to refer rdp->d_cmdsts.
1406 * It is not atomic for big endian cpus.
1408 status
= rdp
->d_cmdsts
;
1409 status
= LE_32(status
);
1411 DPRINTF(2, (CE_CONT
, CONS
"%s: time:%ld %s: slot:%d, status:0x%b",
1412 dp
->name
, ddi_get_lbolt(), __func__
,
1413 slot
, status
, RXSTAT_BITS
));
1415 if ((status
& CMDSTS_OWN
) == 0) {
1417 * No more received packets because
1418 * this buffer is owned by NIC.
1423 #define RX_ERR_BITS \
1424 (CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \
1425 CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE)
1427 if (status
& RX_ERR_BITS
) {
1429 * Packet with error received
1431 DPRINTF(0, (CE_CONT
, CONS
"%s: Corrupted packet "
1432 "received, buffer status: %b",
1433 dp
->name
, status
, RXSTAT_BITS
));
1435 /* collect statistics information */
1438 if (status
& CMDSTS_RXO
) {
1439 dp
->stats
.overflow
++;
1440 } else if (status
& (CMDSTS_LONG
| CMDSTS_MORE
)) {
1441 dp
->stats
.frame_too_long
++;
1442 } else if (status
& CMDSTS_RUNT
) {
1444 } else if (status
& (CMDSTS_ISE
| CMDSTS_FAE
)) {
1446 } else if (status
& CMDSTS_CRCE
) {
1449 dp
->stats
.rcv_internal_err
++;
1452 return (flag
| GEM_RX_ERR
);
1456 * this packet was received without errors
1458 if ((len
= (status
& CMDSTS_SIZE
)) >= ETHERFCSL
) {
1462 #if DEBUG_LEVEL > 10
1465 uint8_t *bp
= dp
->rx_buf_head
->rxb_buf
;
1467 cmn_err(CE_CONT
, CONS
"%s: len:%d", dp
->name
, len
);
1469 for (i
= 0; i
< 60; i
+= 10) {
1470 cmn_err(CE_CONT
, CONS
1471 "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
1472 bp
[0], bp
[1], bp
[2], bp
[3], bp
[4],
1473 bp
[5], bp
[6], bp
[7], bp
[8], bp
[9]);
1478 return (flag
| (len
& GEM_RX_LEN
));
1482 sfe_tx_desc_init(struct gem_dev
*dp
, int slot
)
1484 uint_t tx_ring_size
= dp
->gc
.gc_tx_ring_size
;
1485 struct sfe_desc
*tdp
;
1488 tdp
= (void *)&dp
->tx_ring
[SFE_DESC_SIZE
* slot
];
1490 /* don't clear d_link field, which have a valid pointer */
1493 /* make a link to this from the previous descriptor */
1494 here
= ((uint32_t)dp
->tx_ring_dma
) + SFE_DESC_SIZE
*slot
;
1497 &dp
->tx_ring
[SFE_DESC_SIZE
* SLOT(slot
- 1, tx_ring_size
)];
1498 tdp
->d_link
= LE_32(here
);
1502 sfe_rx_desc_init(struct gem_dev
*dp
, int slot
)
1504 uint_t rx_ring_size
= dp
->gc
.gc_rx_ring_size
;
1505 struct sfe_desc
*rdp
;
1508 rdp
= (void *)&dp
->rx_ring
[SFE_DESC_SIZE
* slot
];
1510 /* don't clear d_link field, which have a valid pointer */
1511 rdp
->d_cmdsts
= LE_32(CMDSTS_OWN
);
1513 /* make a link to this from the previous descriptor */
1514 here
= ((uint32_t)dp
->rx_ring_dma
) + SFE_DESC_SIZE
*slot
;
1517 &dp
->rx_ring
[SFE_DESC_SIZE
* SLOT(slot
- 1, rx_ring_size
)];
1518 rdp
->d_link
= LE_32(here
);
1522 sfe_tx_desc_clean(struct gem_dev
*dp
, int slot
)
1524 struct sfe_desc
*tdp
;
1526 tdp
= (void *)&dp
->tx_ring
[SFE_DESC_SIZE
* slot
];
1531 sfe_rx_desc_clean(struct gem_dev
*dp
, int slot
)
1533 struct sfe_desc
*rdp
;
1535 rdp
= (void *)&dp
->rx_ring
[SFE_DESC_SIZE
* slot
];
1536 rdp
->d_cmdsts
= LE_32(CMDSTS_OWN
);
1540 * Device depend interrupt handler
1543 sfe_interrupt(struct gem_dev
*dp
)
1545 uint_t rx_ring_size
= dp
->gc
.gc_rx_ring_size
;
1549 boolean_t need_to_reset
= B_FALSE
;
1550 struct sfe_dev
*lp
= dp
->private;
1552 /* read reason and clear interrupt */
1555 isr_bogus
= lp
->isr_pended
;
1558 if (((isr
| isr_bogus
) & lp
->our_intr_bits
) == 0) {
1559 /* we are not the interrupt source */
1560 return (DDI_INTR_UNCLAIMED
);
1563 DPRINTF(3, (CE_CONT
,
1564 CONS
"%s: time:%ld %s:called: isr:0x%b rx_active_head: %d",
1565 dp
->name
, ddi_get_lbolt(), __func__
,
1566 isr
, INTR_BITS
, dp
->rx_active_head
));
1568 if (!dp
->mac_active
) {
1569 /* the device is going to stop */
1570 lp
->our_intr_bits
= 0;
1571 return (DDI_INTR_CLAIMED
);
1574 isr
&= lp
->our_intr_bits
;
1576 if (isr
& (ISR_RXSOVR
| ISR_RXORN
| ISR_RXIDLE
| ISR_RXERR
|
1577 ISR_RXDESC
| ISR_RXOK
)) {
1578 (void) gem_receive(dp
);
1580 if (isr
& (ISR_RXSOVR
| ISR_RXORN
)) {
1581 DPRINTF(0, (CE_CONT
,
1582 CONS
"%s: rx fifo overrun: isr %b",
1583 dp
->name
, isr
, INTR_BITS
));
1584 /* no need restart rx */
1585 dp
->stats
.overflow
++;
1588 if (isr
& ISR_RXIDLE
) {
1589 DPRINTF(0, (CE_CONT
,
1590 CONS
"%s: rx buffer ran out: isr %b",
1591 dp
->name
, isr
, INTR_BITS
));
1593 dp
->stats
.norcvbuf
++;
1596 * Make RXDP points the head of receive
1599 OUTL(dp
, RXDP
, dp
->rx_ring_dma
+
1601 SLOT(dp
->rx_active_head
, rx_ring_size
));
1603 /* Restart the receive engine */
1604 OUTL(dp
, CR
, lp
->cr
| CR_RXE
);
1608 if (isr
& (ISR_TXURN
| ISR_TXERR
| ISR_TXDESC
|
1609 ISR_TXIDLE
| ISR_TXOK
)) {
1610 /* need to reclaim tx buffers */
1611 if (gem_tx_done(dp
)) {
1612 flags
|= INTR_RESTART_TX
;
1615 * XXX - tx error statistics will be counted in
1616 * sfe_tx_desc_stat() and no need to restart tx on errors.
1620 if (isr
& (ISR_DPERR
| ISR_SSERR
| ISR_RMABT
| ISR_RTABT
)) {
1621 cmn_err(CE_WARN
, "%s: ERROR interrupt: isr %b.",
1622 dp
->name
, isr
, INTR_BITS
);
1623 need_to_reset
= B_TRUE
;
1626 if (need_to_reset
) {
1627 (void) gem_restart_nic(dp
, GEM_RESTART_KEEP_BUF
);
1628 flags
|= INTR_RESTART_TX
;
1631 DPRINTF(5, (CE_CONT
, CONS
"%s: %s: return: isr: %b",
1632 dp
->name
, __func__
, isr
, INTR_BITS
));
1634 return (DDI_INTR_CLAIMED
| flags
);
1637 /* ======================================================== */
1639 * HW depend MII routine
1641 /* ======================================================== */
1644 * MII routines for NS DP83815
1647 sfe_mii_sync_dp83815(struct gem_dev
*dp
)
1653 sfe_mii_read_dp83815(struct gem_dev
*dp
, uint_t offset
)
1655 DPRINTF(4, (CE_CONT
, CONS
"%s: %s: offset 0x%x",
1656 dp
->name
, __func__
, offset
));
1657 return ((uint16_t)INL(dp
, MII_REGS_BASE
+ offset
*4));
1661 sfe_mii_write_dp83815(struct gem_dev
*dp
, uint_t offset
, uint16_t val
)
1663 DPRINTF(4, (CE_CONT
, CONS
"%s: %s: offset 0x%x 0x%x",
1664 dp
->name
, __func__
, offset
, val
));
1665 OUTL(dp
, MII_REGS_BASE
+ offset
*4, val
);
1669 sfe_mii_config_dp83815(struct gem_dev
*dp
)
1673 srr
= INL(dp
, SRR
) & SRR_REV
;
1675 DPRINTF(0, (CE_CONT
, CONS
"%s: srr:0x%04x %04x %04x %04x %04x %04x",
1677 INW(dp
, 0x00cc), /* PGSEL */
1678 INW(dp
, 0x00e4), /* PMDCSR */
1679 INW(dp
, 0x00fc), /* TSTDAT */
1680 INW(dp
, 0x00f4), /* DSPCFG */
1681 INW(dp
, 0x00f8))); /* SDCFG */
1683 if (srr
== SRR_REV_DP83815CVNG
) {
1685 * NS datasheet says that DP83815CVNG needs following
1686 * registers to be patched for optimizing its performance.
1687 * A report said that CRC errors on RX disappeared
1690 OUTW(dp
, 0x00cc, 0x0001); /* PGSEL */
1691 OUTW(dp
, 0x00e4, 0x189c); /* PMDCSR */
1692 OUTW(dp
, 0x00fc, 0x0000); /* TSTDAT */
1693 OUTW(dp
, 0x00f4, 0x5040); /* DSPCFG */
1694 OUTW(dp
, 0x00f8, 0x008c); /* SDCFG */
1695 OUTW(dp
, 0x00cc, 0x0000); /* PGSEL */
1697 DPRINTF(0, (CE_CONT
,
1698 CONS
"%s: PHY patched %04x %04x %04x %04x %04x",
1700 INW(dp
, 0x00cc), /* PGSEL */
1701 INW(dp
, 0x00e4), /* PMDCSR */
1702 INW(dp
, 0x00fc), /* TSTDAT */
1703 INW(dp
, 0x00f4), /* DSPCFG */
1704 INW(dp
, 0x00f8))); /* SDCFG */
1705 } else if (((srr
^ SRR_REV_DP83815DVNG
) & 0xff00) == 0 ||
1706 ((srr
^ SRR_REV_DP83816AVNG
) & 0xff00) == 0) {
1708 * Additional packets for later chipset
1710 OUTW(dp
, 0x00cc, 0x0001); /* PGSEL */
1711 OUTW(dp
, 0x00e4, 0x189c); /* PMDCSR */
1712 OUTW(dp
, 0x00cc, 0x0000); /* PGSEL */
1714 DPRINTF(0, (CE_CONT
,
1715 CONS
"%s: PHY patched %04x %04x",
1717 INW(dp
, 0x00cc), /* PGSEL */
1718 INW(dp
, 0x00e4))); /* PMDCSR */
1721 return (gem_mii_config_default(dp
));
1725 sfe_mii_probe_dp83815(struct gem_dev
*dp
)
1729 /* try external phy first */
1730 DPRINTF(0, (CE_CONT
, CONS
"%s: %s: trying external phy",
1731 dp
->name
, __func__
));
1732 dp
->mii_phy_addr
= 0;
1733 dp
->gc
.gc_mii_sync
= &sfe_mii_sync_sis900
;
1734 dp
->gc
.gc_mii_read
= &sfe_mii_read_sis900
;
1735 dp
->gc
.gc_mii_write
= &sfe_mii_write_sis900
;
1737 val
= INL(dp
, CFG
) & (CFG_ANEG_SEL
| CFG_PHY_CFG
);
1738 OUTL(dp
, CFG
, val
| CFG_EXT_PHY
| CFG_PHY_DIS
);
1740 if (gem_mii_probe_default(dp
) == GEM_SUCCESS
) {
1741 return (GEM_SUCCESS
);
1744 /* switch to internal phy */
1745 DPRINTF(0, (CE_CONT
, CONS
"%s: %s: switching to internal phy",
1746 dp
->name
, __func__
));
1747 dp
->mii_phy_addr
= -1;
1748 dp
->gc
.gc_mii_sync
= &sfe_mii_sync_dp83815
;
1749 dp
->gc
.gc_mii_read
= &sfe_mii_read_dp83815
;
1750 dp
->gc
.gc_mii_write
= &sfe_mii_write_dp83815
;
1752 val
= INL(dp
, CFG
) & (CFG_ANEG_SEL
| CFG_PHY_CFG
);
1753 OUTL(dp
, CFG
, val
| CFG_PAUSE_ADV
| CFG_PHY_RST
);
1754 drv_usecwait(100); /* keep to assert RST bit for a while */
1755 OUTL(dp
, CFG
, val
| CFG_PAUSE_ADV
);
1757 /* wait for PHY reset */
1758 delay(drv_usectohz(10000));
1760 return (gem_mii_probe_default(dp
));
1764 sfe_mii_init_dp83815(struct gem_dev
*dp
)
1768 val
= INL(dp
, CFG
) & (CFG_ANEG_SEL
| CFG_PHY_CFG
);
1770 if (dp
->mii_phy_addr
== -1) {
1771 /* select internal phy */
1772 OUTL(dp
, CFG
, val
| CFG_PAUSE_ADV
);
1774 /* select external phy */
1775 OUTL(dp
, CFG
, val
| CFG_EXT_PHY
| CFG_PHY_DIS
);
1778 return (GEM_SUCCESS
);
1782 * MII routines for SiS900
1784 #define MDIO_DELAY(dp) {(void) INL(dp, MEAR); (void) INL(dp, MEAR); }
1786 sfe_mii_sync_sis900(struct gem_dev
*dp
)
1790 /* send 32 ONE's to make MII line idle */
1791 for (i
= 0; i
< 32; i
++) {
1792 OUTL(dp
, MEAR
, MEAR_MDDIR
| MEAR_MDIO
);
1794 OUTL(dp
, MEAR
, MEAR_MDDIR
| MEAR_MDIO
| MEAR_MDC
);
1800 sfe_mii_config_sis900(struct gem_dev
*dp
)
1802 struct sfe_dev
*lp
= dp
->private;
1804 /* Do chip depend setup */
1805 if ((dp
->mii_phy_id
& PHY_MASK
) == PHY_ICS1893
) {
1806 /* workaround for ICS1893 PHY */
1807 gem_mii_write(dp
, 0x0018, 0xD200);
1810 if (lp
->revid
== SIS630E_900_REV
) {
1812 * SiS 630E has bugs on default values
1815 gem_mii_write(dp
, MII_AN_ADVERT
, 0x05e1);
1816 gem_mii_write(dp
, MII_CONFIG1
, 0x0022);
1817 gem_mii_write(dp
, MII_CONFIG2
, 0xff00);
1818 gem_mii_write(dp
, MII_MASK
, 0xffc0);
1820 sfe_set_eq_sis630(dp
);
1822 return (gem_mii_config_default(dp
));
1826 sfe_mii_read_sis900(struct gem_dev
*dp
, uint_t reg
)
1833 cmd
= MII_READ_CMD(dp
->mii_phy_addr
, reg
);
1835 for (i
= 31; i
>= 18; i
--) {
1836 data
= ((cmd
>> i
) & 1) << MEAR_MDIO_SHIFT
;
1837 OUTL(dp
, MEAR
, data
| MEAR_MDDIR
);
1839 OUTL(dp
, MEAR
, data
| MEAR_MDDIR
| MEAR_MDC
);
1843 /* turn around cycle */
1847 /* get response from PHY */
1848 OUTL(dp
, MEAR
, MEAR_MDC
);
1853 (void) INL(dp
, MEAR
); /* delay */
1854 if (INL(dp
, MEAR
) & MEAR_MDIO
) {
1855 cmn_err(CE_WARN
, "%s: PHY@%d not responded",
1856 dp
->name
, dp
->mii_phy_addr
);
1861 /* terminate response cycle */
1862 OUTL(dp
, MEAR
, MEAR_MDC
);
1865 ret
= 0; /* to avoid lint errors */
1866 for (i
= 16; i
> 0; i
--) {
1868 (void) INL(dp
, MEAR
); /* delay */
1869 ret
= (ret
<< 1) | ((INL(dp
, MEAR
) >> MEAR_MDIO_SHIFT
) & 1);
1870 OUTL(dp
, MEAR
, MEAR_MDC
);
1874 /* send two idle(Z) bits to terminate the read cycle */
1875 for (i
= 0; i
< 2; i
++) {
1878 OUTL(dp
, MEAR
, MEAR_MDC
);
1886 sfe_mii_write_sis900(struct gem_dev
*dp
, uint_t reg
, uint16_t val
)
1892 cmd
= MII_WRITE_CMD(dp
->mii_phy_addr
, reg
, val
);
1894 for (i
= 31; i
>= 0; i
--) {
1895 data
= ((cmd
>> i
) & 1) << MEAR_MDIO_SHIFT
;
1896 OUTL(dp
, MEAR
, data
| MEAR_MDDIR
);
1898 OUTL(dp
, MEAR
, data
| MEAR_MDDIR
| MEAR_MDC
);
1902 /* send two idle(Z) bits to terminate the write cycle. */
1903 for (i
= 0; i
< 2; i
++) {
1906 OUTL(dp
, MEAR
, MEAR_MDC
);
1913 sfe_set_eq_sis630(struct gem_dev
*dp
)
1921 struct sfe_dev
*lp
= dp
->private;
1925 if (!(rev
== SIS630E_900_REV
|| rev
== SIS630EA1_900_REV
||
1926 rev
== SIS630A_900_REV
|| rev
== SIS630ET_900_REV
)) {
1927 /* it doesn't have a internal PHY */
1931 if (dp
->mii_state
== MII_STATE_LINKUP
) {
1932 reg14h
= gem_mii_read(dp
, MII_RESV
);
1933 gem_mii_write(dp
, MII_RESV
, (0x2200 | reg14h
) & 0xBFFF);
1935 eq_value
= (0x00f8 & gem_mii_read(dp
, MII_RESV
)) >> 3;
1936 max_value
= min_value
= eq_value
;
1937 for (i
= 1; i
< 10; i
++) {
1938 eq_value
= (0x00f8 & gem_mii_read(dp
, MII_RESV
)) >> 3;
1939 max_value
= max(eq_value
, max_value
);
1940 min_value
= min(eq_value
, min_value
);
1943 /* for 630E, rule to determine the equalizer value */
1944 if (rev
== SIS630E_900_REV
|| rev
== SIS630EA1_900_REV
||
1945 rev
== SIS630ET_900_REV
) {
1946 if (max_value
< 5) {
1947 eq_value
= max_value
;
1948 } else if (5 <= max_value
&& max_value
< 15) {
1952 } else if (15 <= max_value
) {
1958 /* for 630B0&B1, rule to determine the equalizer value */
1960 if (rev
== SIS630A_900_REV
&&
1961 (lp
->bridge_revid
== SIS630B0
||
1962 lp
->bridge_revid
== SIS630B1
)) {
1964 if (max_value
== 0) {
1967 eq_value
= (max_value
+ min_value
+ 1)/2;
1970 /* write equalizer value and setting */
1971 reg14h
= gem_mii_read(dp
, MII_RESV
) & ~0x02f8;
1972 reg14h
|= 0x6000 | (eq_value
<< 3);
1973 gem_mii_write(dp
, MII_RESV
, reg14h
);
1975 reg14h
= (gem_mii_read(dp
, MII_RESV
) & ~0x4000) | 0x2000;
1976 if (rev
== SIS630A_900_REV
&&
1977 (lp
->bridge_revid
== SIS630B0
||
1978 lp
->bridge_revid
== SIS630B1
)) {
1982 gem_mii_write(dp
, MII_RESV
, reg14h
);
1986 /* ======================================================== */
1988 * OS depend (device driver) routine
1990 /* ======================================================== */
1992 sfe_chipinfo_init_sis900(struct gem_dev
*dp
)
1995 struct sfe_dev
*lp
= (struct sfe_dev
*)dp
->private;
1999 if (rev
== SIS630E_900_REV
/* 0x81 */) {
2001 lp
->get_mac_addr
= &sfe_get_mac_addr_sis630e
;
2002 } else if (rev
> 0x81 && rev
<= 0x90) {
2003 /* 630S, 630EA1, 630ET, 635A */
2004 lp
->get_mac_addr
= &sfe_get_mac_addr_sis635
;
2005 } else if (rev
== SIS962_900_REV
/* 0x91 */) {
2006 /* sis962 or later */
2007 lp
->get_mac_addr
= &sfe_get_mac_addr_sis962
;
2010 lp
->get_mac_addr
= &sfe_get_mac_addr_sis900
;
2013 lp
->bridge_revid
= 0;
2015 if (rev
== SIS630E_900_REV
|| rev
== SIS630EA1_900_REV
||
2016 rev
== SIS630A_900_REV
|| rev
== SIS630ET_900_REV
) {
2018 * read host bridge revision
2021 ddi_acc_handle_t bridge_handle
;
2023 if ((bridge
= sfe_search_pci_dev(0x1039, 0x630)) == NULL
) {
2025 "%s: cannot find host bridge (pci1039,630)",
2030 if (pci_config_setup(bridge
, &bridge_handle
) != DDI_SUCCESS
) {
2031 cmn_err(CE_WARN
, "%s: pci_config_setup failed",
2037 pci_config_get8(bridge_handle
, PCI_CONF_REVID
);
2038 pci_config_teardown(&bridge_handle
);
2043 sfe_attach_chip(struct gem_dev
*dp
)
2045 struct sfe_dev
*lp
= (struct sfe_dev
*)dp
->private;
2047 DPRINTF(4, (CE_CONT
, CONS
"!%s: %s called", dp
->name
, __func__
));
2049 /* setup chip-depend get_mac_address function */
2050 if (lp
->chip
->chip_type
== CHIPTYPE_SIS900
) {
2051 sfe_chipinfo_init_sis900(dp
);
2053 lp
->get_mac_addr
= &sfe_get_mac_addr_dp83815
;
2056 /* read MAC address */
2057 if (!(lp
->get_mac_addr
)(dp
)) {
2059 "!%s: %s: failed to get factory mac address"
2060 " please specify a mac address in sfe.conf",
2061 dp
->name
, __func__
);
2062 return (GEM_FAILURE
);
2065 if (lp
->chip
->chip_type
== CHIPTYPE_DP83815
) {
2066 dp
->mii_phy_addr
= -1; /* no need to scan PHY */
2067 dp
->misc_flag
|= GEM_VLAN_SOFT
;
2068 dp
->txthr
+= 4; /* VTAG_SIZE */
2070 dp
->txthr
= min(dp
->txthr
, TXFIFOSIZE
- 2);
2072 return (GEM_SUCCESS
);
2076 sfeattach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
2079 const char *drv_name
;
2081 ddi_acc_handle_t conf_handle
;
2089 struct chip_info
*p
;
2093 ddi_acc_handle_t regs_ha
;
2094 struct gem_conf
*gcp
;
2096 unit
= ddi_get_instance(dip
);
2097 drv_name
= ddi_driver_name(dip
);
2099 DPRINTF(3, (CE_CONT
, CONS
"%s%d: sfeattach: called", drv_name
, unit
));
2102 * Common codes after power-up
2104 if (pci_config_setup(dip
, &conf_handle
) != DDI_SUCCESS
) {
2105 cmn_err(CE_WARN
, "%s%d: ddi_regs_map_setup failed",
2110 vid
= pci_config_get16(conf_handle
, PCI_CONF_VENID
);
2111 did
= pci_config_get16(conf_handle
, PCI_CONF_DEVID
);
2112 rev
= pci_config_get16(conf_handle
, PCI_CONF_REVID
);
2114 iline
= pci_config_get32(conf_handle
, PCI_CONF_ILINE
);
2115 latim
= pci_config_get8(conf_handle
, PCI_CONF_LATENCY_TIMER
);
2117 #ifdef DEBUG_BUILT_IN_SIS900
2118 rev
= SIS630E_900_REV
;
2120 for (i
= 0, p
= sfe_chiptbl
; i
< CHIPTABLESIZE
; i
++, p
++) {
2121 if (p
->venid
== vid
&& p
->devid
== did
) {
2129 "%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)",
2130 drv_name
, unit
, vid
, did
);
2131 pci_config_teardown(&conf_handle
);
2135 pci_config_put16(conf_handle
, PCI_CONF_COMM
,
2136 PCI_COMM_IO
| PCI_COMM_MAE
| PCI_COMM_ME
|
2137 pci_config_get16(conf_handle
, PCI_CONF_COMM
));
2139 /* ensure D0 mode */
2140 (void) gem_pci_set_power_state(dip
, conf_handle
, PCI_PMCSR_D0
);
2142 pci_config_teardown(&conf_handle
);
2146 return (gem_resume(dip
));
2150 DPRINTF(0, (CE_CONT
,
2151 CONS
"%s%d: ilr 0x%08x, latency_timer:0x%02x",
2152 drv_name
, unit
, iline
, latim
));
2155 * Map in the device registers.
2157 if (gem_pci_regs_map_setup(dip
,
2158 (sfe_use_pcimemspace
&& p
->chip_type
== CHIPTYPE_DP83815
)
2159 ? PCI_ADDR_MEM32
: PCI_ADDR_IO
, PCI_ADDR_MASK
,
2160 &sfe_dev_attr
, &base
, ®s_ha
) != DDI_SUCCESS
) {
2162 "%s%d: ddi_regs_map_setup failed",
2168 * construct gem configuration
2170 gcp
= kmem_zalloc(sizeof (*gcp
), KM_SLEEP
);
2173 (void) sprintf(gcp
->gc_name
, "%s%d", drv_name
, unit
);
2175 /* consistency on tx and rx */
2176 gcp
->gc_tx_buf_align
= sizeof (uint8_t) - 1;
2177 gcp
->gc_tx_max_frags
= MAXTXFRAGS
;
2178 gcp
->gc_tx_max_descs_per_pkt
= gcp
->gc_tx_max_frags
;
2179 gcp
->gc_tx_desc_unit_shift
= 4; /* 16 byte */
2180 gcp
->gc_tx_buf_size
= TX_BUF_SIZE
;
2181 gcp
->gc_tx_buf_limit
= gcp
->gc_tx_buf_size
;
2182 gcp
->gc_tx_ring_size
= TX_RING_SIZE
;
2183 gcp
->gc_tx_ring_limit
= gcp
->gc_tx_ring_size
;
2184 gcp
->gc_tx_auto_pad
= B_TRUE
;
2185 gcp
->gc_tx_copy_thresh
= sfe_tx_copy_thresh
;
2186 gcp
->gc_tx_desc_write_oo
= B_TRUE
;
2188 gcp
->gc_rx_buf_align
= sizeof (uint8_t) - 1;
2189 gcp
->gc_rx_max_frags
= MAXRXFRAGS
;
2190 gcp
->gc_rx_desc_unit_shift
= 4;
2191 gcp
->gc_rx_ring_size
= RX_RING_SIZE
;
2192 gcp
->gc_rx_buf_max
= RX_BUF_SIZE
;
2193 gcp
->gc_rx_copy_thresh
= sfe_rx_copy_thresh
;
2195 /* map attributes */
2196 gcp
->gc_dev_attr
= sfe_dev_attr
;
2197 gcp
->gc_buf_attr
= sfe_buf_attr
;
2198 gcp
->gc_desc_attr
= sfe_buf_attr
;
2200 /* dma attributes */
2201 gcp
->gc_dma_attr_desc
= sfe_dma_attr_desc
;
2203 gcp
->gc_dma_attr_txbuf
= sfe_dma_attr_buf
;
2204 gcp
->gc_dma_attr_txbuf
.dma_attr_align
= gcp
->gc_tx_buf_align
+1;
2205 gcp
->gc_dma_attr_txbuf
.dma_attr_sgllen
= gcp
->gc_tx_max_frags
;
2207 gcp
->gc_dma_attr_rxbuf
= sfe_dma_attr_buf
;
2208 gcp
->gc_dma_attr_rxbuf
.dma_attr_align
= gcp
->gc_rx_buf_align
+1;
2209 gcp
->gc_dma_attr_rxbuf
.dma_attr_sgllen
= gcp
->gc_rx_max_frags
;
2211 /* time out parameters */
2212 gcp
->gc_tx_timeout
= 3*ONESEC
;
2213 gcp
->gc_tx_timeout_interval
= ONESEC
;
2214 if (p
->chip_type
== CHIPTYPE_DP83815
) {
2215 /* workaround for tx hang */
2216 gcp
->gc_tx_timeout_interval
= ONESEC
/20; /* 50mS */
2219 /* MII timeout parameters */
2220 gcp
->gc_mii_link_watch_interval
= ONESEC
;
2221 gcp
->gc_mii_an_watch_interval
= ONESEC
/5;
2222 gcp
->gc_mii_reset_timeout
= MII_RESET_TIMEOUT
; /* 1 sec */
2223 gcp
->gc_mii_an_timeout
= MII_AN_TIMEOUT
; /* 5 sec */
2224 gcp
->gc_mii_an_wait
= 0;
2225 gcp
->gc_mii_linkdown_timeout
= MII_LINKDOWN_TIMEOUT
;
2227 /* setting for general PHY */
2228 gcp
->gc_mii_an_delay
= 0;
2229 gcp
->gc_mii_linkdown_action
= MII_ACTION_RSA
;
2230 gcp
->gc_mii_linkdown_timeout_action
= MII_ACTION_RESET
;
2231 gcp
->gc_mii_dont_reset
= B_FALSE
;
2237 gcp
->gc_attach_chip
= &sfe_attach_chip
;
2238 if (p
->chip_type
== CHIPTYPE_DP83815
) {
2239 gcp
->gc_reset_chip
= &sfe_reset_chip_dp83815
;
2241 gcp
->gc_reset_chip
= &sfe_reset_chip_sis900
;
2243 gcp
->gc_init_chip
= &sfe_init_chip
;
2244 gcp
->gc_start_chip
= &sfe_start_chip
;
2245 gcp
->gc_stop_chip
= &sfe_stop_chip
;
2246 #ifdef USE_MULTICAST_HASHTBL
2247 gcp
->gc_multicast_hash
= &sfe_mcast_hash
;
2249 if (p
->chip_type
== CHIPTYPE_DP83815
) {
2250 gcp
->gc_set_rx_filter
= &sfe_set_rx_filter_dp83815
;
2252 gcp
->gc_set_rx_filter
= &sfe_set_rx_filter_sis900
;
2254 gcp
->gc_set_media
= &sfe_set_media
;
2255 gcp
->gc_get_stats
= &sfe_get_stats
;
2256 gcp
->gc_interrupt
= &sfe_interrupt
;
2258 /* descriptor operation */
2259 gcp
->gc_tx_desc_write
= &sfe_tx_desc_write
;
2260 gcp
->gc_tx_start
= &sfe_tx_start
;
2261 gcp
->gc_rx_desc_write
= &sfe_rx_desc_write
;
2262 gcp
->gc_rx_start
= NULL
;
2264 gcp
->gc_tx_desc_stat
= &sfe_tx_desc_stat
;
2265 gcp
->gc_rx_desc_stat
= &sfe_rx_desc_stat
;
2266 gcp
->gc_tx_desc_init
= &sfe_tx_desc_init
;
2267 gcp
->gc_rx_desc_init
= &sfe_rx_desc_init
;
2268 gcp
->gc_tx_desc_clean
= &sfe_tx_desc_clean
;
2269 gcp
->gc_rx_desc_clean
= &sfe_rx_desc_clean
;
2271 /* mii operations */
2272 if (p
->chip_type
== CHIPTYPE_DP83815
) {
2273 gcp
->gc_mii_probe
= &sfe_mii_probe_dp83815
;
2274 gcp
->gc_mii_init
= &sfe_mii_init_dp83815
;
2275 gcp
->gc_mii_config
= &sfe_mii_config_dp83815
;
2276 gcp
->gc_mii_sync
= &sfe_mii_sync_dp83815
;
2277 gcp
->gc_mii_read
= &sfe_mii_read_dp83815
;
2278 gcp
->gc_mii_write
= &sfe_mii_write_dp83815
;
2279 gcp
->gc_mii_tune_phy
= NULL
;
2280 gcp
->gc_flow_control
= FLOW_CONTROL_NONE
;
2282 gcp
->gc_mii_probe
= &gem_mii_probe_default
;
2283 gcp
->gc_mii_init
= NULL
;
2284 gcp
->gc_mii_config
= &sfe_mii_config_sis900
;
2285 gcp
->gc_mii_sync
= &sfe_mii_sync_sis900
;
2286 gcp
->gc_mii_read
= &sfe_mii_read_sis900
;
2287 gcp
->gc_mii_write
= &sfe_mii_write_sis900
;
2288 gcp
->gc_mii_tune_phy
= &sfe_set_eq_sis630
;
2289 gcp
->gc_flow_control
= FLOW_CONTROL_RX_PAUSE
;
2292 lp
= kmem_zalloc(sizeof (*lp
), KM_SLEEP
);
2295 lp
->our_intr_bits
= 0;
2298 cmn_err(CE_CONT
, CONS
"%s%d: chip:%s rev:0x%02x",
2299 drv_name
, unit
, p
->chip_name
, rev
);
2301 dp
= gem_do_attach(dip
, 0, gcp
, base
, ®s_ha
,
2303 kmem_free(gcp
, sizeof (*gcp
));
2309 return (DDI_SUCCESS
);
2312 kmem_free(lp
, sizeof (struct sfe_dev
));
2314 return (DDI_FAILURE
);
2316 return (DDI_FAILURE
);
2320 sfedetach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
2324 return (gem_suspend(dip
));
2327 return (gem_do_detach(dip
));
2329 return (DDI_FAILURE
);
2333 * quiesce(9E) entry point.
2335 * This function is called when the system is single-threaded at high
2336 * PIL with preemption disabled. Therefore, this function must not be
2339 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2340 * DDI_FAILURE indicates an error condition and should almost never happen.
2343 #define sfe_quiesce ddi_quiesce_not_supported
2346 sfe_quiesce(dev_info_t
*dip
)
2351 dp
= GEM_GET_DEV(dip
);
2354 return (DDI_FAILURE
);
2356 ret
= sfe_stop_chip_quiesce(dp
);
2362 /* ======================================================== */
2364 * OS depend (loadable streams driver) routine
2366 /* ======================================================== */
2367 DDI_DEFINE_STREAM_OPS(sfe_ops
, nulldev
, nulldev
, sfeattach
, sfedetach
,
2368 nodev
, NULL
, D_MP
, NULL
, sfe_quiesce
);
2370 static struct modldrv modldrv
= {
2371 &mod_driverops
, /* Type of module. This one is a driver */
2373 &sfe_ops
, /* driver ops */
2376 static struct modlinkage modlinkage
= {
2377 MODREV_1
, &modldrv
, NULL
2380 /* ======================================================== */
2382 * Loadable module support
2384 /* ======================================================== */
2390 DPRINTF(2, (CE_CONT
, CONS
"sfe: _init: called"));
2391 gem_mod_init(&sfe_ops
, "sfe");
2392 status
= mod_install(&modlinkage
);
2393 if (status
!= DDI_SUCCESS
) {
2394 gem_mod_fini(&sfe_ops
);
2407 DPRINTF(2, (CE_CONT
, CONS
"sfe: _fini: called"));
2408 status
= mod_remove(&modlinkage
);
2409 if (status
== DDI_SUCCESS
) {
2410 gem_mod_fini(&sfe_ops
);
2416 _info(struct modinfo
*modinfop
)
2418 return (mod_info(&modlinkage
, modinfop
));