Merge commit '06307114472bd8aad5ff18ccdb8e25f128ae6652'
[unleashed.git] / kernel / drivers / net / vr / vr.c
blob4447b85622fc9e23644fe68e8fb4a5df5cedc6ce
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/strsun.h>
30 #include <sys/stat.h>
31 #include <sys/pci.h>
32 #include <sys/modctl.h>
33 #include <sys/kstat.h>
34 #include <sys/ethernet.h>
35 #include <sys/devops.h>
36 #include <sys/debug.h>
37 #include <sys/conf.h>
38 #include <sys/mac.h>
39 #include <sys/mac_provider.h>
40 #include <sys/mac_ether.h>
41 #include <sys/sysmacros.h>
42 #include <sys/dditypes.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/miiregs.h>
46 #include <sys/byteorder.h>
47 #include <sys/note.h>
48 #include <sys/vlan.h>
50 #include "vr.h"
51 #include "vr_impl.h"
54 * VR in a nutshell
55 * The card uses two rings of data structures to communicate with the host.
56 * These are referred to as "descriptor rings" and there is one for transmit
57 * (TX) and one for receive (RX).
59 * The driver uses a "DMA buffer" data type for mapping to those descriptor
60 * rings. This is a structure with handles and a DMA'able buffer attached to it.
62 * Receive
63 * The receive ring is filled with DMA buffers. Received packets are copied into
64 * a newly allocated mblk's and passed upstream.
66 * Transmit
67 * Each transmit descriptor has a DMA buffer attached to it. The data of TX
68 * packets is copied into the DMA buffer which is then enqueued for
69 * transmission.
71 * Reclaim of transmitted packets is done as a result of a transmit completion
72 * interrupt which is generated 3 times per ring at minimum.
75 #if defined(DEBUG)
76 uint32_t vrdebug = 1;
77 #define VR_DEBUG(args) do { \
78 if (vrdebug > 0) \
79 (*vr_debug()) args; \
80 _NOTE(CONSTANTCONDITION) \
81 } while (0)
82 static void vr_prt(const char *fmt, ...);
83 void (*vr_debug())(const char *fmt, ...);
84 #else
85 #define VR_DEBUG(args) do ; _NOTE(CONSTANTCONDITION) while (0)
86 #endif
88 static char vr_ident[] = "VIA Rhine Ethernet";
91 * Attributes for accessing registers and memory descriptors for this device.
93 static ddi_device_acc_attr_t vr_dev_dma_accattr = {
94 DDI_DEVICE_ATTR_V0,
95 DDI_STRUCTURE_LE_ACC,
96 DDI_STRICTORDER_ACC
100 * Attributes for accessing data.
102 static ddi_device_acc_attr_t vr_data_dma_accattr = {
103 DDI_DEVICE_ATTR_V0,
104 DDI_NEVERSWAP_ACC,
105 DDI_STRICTORDER_ACC
109 * DMA attributes for descriptors for communication with the device
110 * This driver assumes that all descriptors of one ring fit in one consequitive
111 * memory area of max 4K (256 descriptors) that does not cross a page boundary.
112 * Therefore, we request 4K alignement.
114 static ddi_dma_attr_t vr_dev_dma_attr = {
115 DMA_ATTR_V0, /* version number */
116 0, /* low DMA address range */
117 0xFFFFFFFF, /* high DMA address range */
118 0x7FFFFFFF, /* DMA counter register */
119 0x1000, /* DMA address alignment */
120 0x7F, /* DMA burstsizes */
121 1, /* min effective DMA size */
122 0xFFFFFFFF, /* max DMA xfer size */
123 0xFFFFFFFF, /* segment boundary */
124 1, /* s/g list length */
125 1, /* granularity of device */
126 0 /* DMA transfer flags */
130 * DMA attributes for the data moved to/from the device
131 * Note that the alignement is set to 2K so hat a 1500 byte packet never
132 * crosses a page boundary and thus that a DMA transfer is not split up in
133 * multiple cookies with a 4K/8K pagesize
135 static ddi_dma_attr_t vr_data_dma_attr = {
136 DMA_ATTR_V0, /* version number */
137 0, /* low DMA address range */
138 0xFFFFFFFF, /* high DMA address range */
139 0x7FFFFFFF, /* DMA counter register */
140 0x800, /* DMA address alignment */
141 0xfff, /* DMA burstsizes */
142 1, /* min effective DMA size */
143 0xFFFFFFFF, /* max DMA xfer size */
144 0xFFFFFFFF, /* segment boundary */
145 1, /* s/g list length */
146 1, /* granularity of device */
147 0 /* DMA transfer flags */
150 static mac_callbacks_t vr_mac_callbacks = {
151 MC_SETPROP|MC_GETPROP|MC_PROPINFO, /* Which callbacks are set */
152 vr_mac_getstat, /* Get the value of a statistic */
153 vr_mac_start, /* Start the device */
154 vr_mac_stop, /* Stop the device */
155 vr_mac_set_promisc, /* Enable or disable promiscuous mode */
156 vr_mac_set_multicast, /* Enable or disable a multicast addr */
157 vr_mac_set_ether_addr, /* Set the unicast MAC address */
158 vr_mac_tx_enqueue_list, /* Transmit a packet */
159 NULL,
160 NULL, /* Process an unknown ioctl */
161 NULL, /* Get capability information */
162 NULL, /* Open the device */
163 NULL, /* Close the device */
164 vr_mac_setprop, /* Set properties of the device */
165 vr_mac_getprop, /* Get properties of the device */
166 vr_mac_propinfo /* Get properties attributes */
170 * Table with bugs and features for each incarnation of the card.
172 static const chip_info_t vr_chip_info [] = {
174 0x0, 0x0,
175 "VIA Rhine Fast Ethernet",
176 (VR_BUG_NO_MEMIO),
177 (VR_FEATURE_NONE)
180 0x04, 0x21,
181 "VIA VT86C100A Fast Ethernet",
182 (VR_BUG_NEEDMODE2PCEROPT | VR_BUG_NO_TXQUEUEING |
183 VR_BUG_NEEDMODE10T | VR_BUG_TXALIGN | VR_BUG_NO_MEMIO |
184 VR_BUG_MIIPOLLSTOP),
185 (VR_FEATURE_NONE)
188 0x40, 0x41,
189 "VIA VT6102-A Rhine II Fast Ethernet",
190 (VR_BUG_NEEDMODE2PCEROPT),
191 (VR_FEATURE_RX_PAUSE_CAP)
194 0x42, 0x7f,
195 "VIA VT6102-C Rhine II Fast Ethernet",
196 (VR_BUG_NEEDMODE2PCEROPT),
197 (VR_FEATURE_RX_PAUSE_CAP)
200 0x80, 0x82,
201 "VIA VT6105-A Rhine III Fast Ethernet",
202 (VR_BUG_NONE),
203 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
206 0x83, 0x89,
207 "VIA VT6105-B Rhine III Fast Ethernet",
208 (VR_BUG_NONE),
209 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
212 0x8a, 0x8b,
213 "VIA VT6105-LOM Rhine III Fast Ethernet",
214 (VR_BUG_NONE),
215 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
218 0x8c, 0x8c,
219 "VIA VT6107-A0 Rhine III Fast Ethernet",
220 (VR_BUG_NONE),
221 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
224 0x8d, 0x8f,
225 "VIA VT6107-A1 Rhine III Fast Ethernet",
226 (VR_BUG_NONE),
227 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
228 VR_FEATURE_MRDLNMULTIPLE)
231 0x90, 0x93,
232 "VIA VT6105M-A0 Rhine III Fast Ethernet Management Adapter",
233 (VR_BUG_NONE),
234 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
235 VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
236 VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
237 VR_FEATURE_MIBCOUNTER)
240 0x94, 0xff,
241 "VIA VT6105M-B1 Rhine III Fast Ethernet Management Adapter",
242 (VR_BUG_NONE),
243 (VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
244 VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
245 VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
246 VR_FEATURE_MIBCOUNTER)
251 * Function prototypes
253 static vr_result_t vr_add_intr(vr_t *vrp);
254 static void vr_remove_intr(vr_t *vrp);
255 static int32_t vr_cam_index(vr_t *vrp, const uint8_t *maddr);
256 static uint32_t ether_crc_be(const uint8_t *address);
257 static void vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp);
258 static void vr_log(vr_t *vrp, int level, const char *fmt, ...);
259 static int vr_resume(dev_info_t *devinfo);
260 static int vr_suspend(dev_info_t *devinfo);
261 static vr_result_t vr_bus_config(vr_t *vrp);
262 static void vr_bus_unconfig(vr_t *vrp);
263 static void vr_reset(vr_t *vrp);
264 static int vr_start(vr_t *vrp);
265 static int vr_stop(vr_t *vrp);
266 static vr_result_t vr_rings_init(vr_t *vrp);
267 static void vr_rings_fini(vr_t *vrp);
268 static vr_result_t vr_alloc_ring(vr_t *vrp, vr_ring_t *r, size_t n);
269 static void vr_free_ring(vr_ring_t *r, size_t n);
270 static vr_result_t vr_rxring_init(vr_t *vrp);
271 static void vr_rxring_fini(vr_t *vrp);
272 static vr_result_t vr_txring_init(vr_t *vrp);
273 static void vr_txring_fini(vr_t *vrp);
274 static vr_result_t vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap,
275 uint_t flags);
276 static void vr_free_dmabuf(vr_data_dma_t *dmap);
277 static void vr_param_init(vr_t *vrp);
278 static mblk_t *vr_receive(vr_t *vrp);
279 static void vr_tx_reclaim(vr_t *vrp);
280 static void vr_periodic(void *p);
281 static void vr_error(vr_t *vrp);
282 static void vr_phy_read(vr_t *vrp, int offset, uint16_t *value);
283 static void vr_phy_write(vr_t *vrp, int offset, uint16_t value);
284 static void vr_phy_autopoll_disable(vr_t *vrp);
285 static void vr_phy_autopoll_enable(vr_t *vrp);
286 static void vr_link_init(vr_t *vrp);
287 static void vr_link_state(vr_t *vrp);
288 static void vr_kstats_init(vr_t *vrp);
289 static int vr_update_kstats(kstat_t *ksp, int access);
290 static void vr_remove_kstats(vr_t *vrp);
292 static int
293 vr_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
295 vr_t *vrp;
296 mac_register_t *macreg;
298 if (cmd == DDI_RESUME)
299 return (vr_resume(devinfo));
300 else if (cmd != DDI_ATTACH)
301 return (DDI_FAILURE);
304 * Attach.
306 vrp = kmem_zalloc(sizeof (vr_t), KM_SLEEP);
307 ddi_set_driver_private(devinfo, vrp);
308 vrp->devinfo = devinfo;
311 * Store the name+instance of the module.
313 (void) snprintf(vrp->ifname, sizeof (vrp->ifname), "%s%d",
314 MODULENAME, ddi_get_instance(devinfo));
317 * Bus initialization.
319 if (vr_bus_config(vrp) != VR_SUCCESS) {
320 vr_log(vrp, CE_WARN, "vr_bus_config failed");
321 goto fail0;
325 * Initialize default parameters.
327 vr_param_init(vrp);
330 * Setup the descriptor rings.
332 if (vr_rings_init(vrp) != VR_SUCCESS) {
333 vr_log(vrp, CE_WARN, "vr_rings_init failed");
334 goto fail1;
338 * Initialize kstats.
340 vr_kstats_init(vrp);
343 * Add interrupt to the OS.
345 if (vr_add_intr(vrp) != VR_SUCCESS) {
346 vr_log(vrp, CE_WARN, "vr_add_intr failed in attach");
347 goto fail3;
351 * Add mutexes.
353 mutex_init(&vrp->intrlock, NULL, MUTEX_DRIVER,
354 DDI_INTR_PRI(vrp->intr_pri));
355 mutex_init(&vrp->oplock, NULL, MUTEX_DRIVER, NULL);
356 mutex_init(&vrp->tx.lock, NULL, MUTEX_DRIVER, NULL);
359 * Enable interrupt.
361 if (ddi_intr_enable(vrp->intr_hdl) != DDI_SUCCESS) {
362 vr_log(vrp, CE_NOTE, "ddi_intr_enable failed");
363 goto fail5;
367 * Register with parent, mac.
369 if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
370 vr_log(vrp, CE_WARN, "mac_alloc failed in attach");
371 goto fail6;
374 macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
375 macreg->m_driver = vrp;
376 macreg->m_dip = devinfo;
377 macreg->m_src_addr = vrp->vendor_ether_addr;
378 macreg->m_callbacks = &vr_mac_callbacks;
379 macreg->m_min_sdu = 0;
380 macreg->m_max_sdu = ETHERMTU;
381 macreg->m_margin = VLAN_TAGSZ;
383 if (mac_register(macreg, &vrp->machdl) != 0) {
384 vr_log(vrp, CE_WARN, "mac_register failed in attach");
385 goto fail7;
387 mac_free(macreg);
388 return (DDI_SUCCESS);
390 fail7:
391 mac_free(macreg);
392 fail6:
393 (void) ddi_intr_disable(vrp->intr_hdl);
394 fail5:
395 mutex_destroy(&vrp->tx.lock);
396 mutex_destroy(&vrp->oplock);
397 mutex_destroy(&vrp->intrlock);
398 vr_remove_intr(vrp);
399 fail3:
400 vr_remove_kstats(vrp);
401 fail2:
402 vr_rings_fini(vrp);
403 fail1:
404 vr_bus_unconfig(vrp);
405 fail0:
406 kmem_free(vrp, sizeof (vr_t));
407 return (DDI_FAILURE);
410 static int
411 vr_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
413 vr_t *vrp;
415 vrp = ddi_get_driver_private(devinfo);
417 if (cmd == DDI_SUSPEND)
418 return (vr_suspend(devinfo));
419 else if (cmd != DDI_DETACH)
420 return (DDI_FAILURE);
422 if (vrp->chip.state == CHIPSTATE_RUNNING)
423 return (DDI_FAILURE);
426 * Try to un-register from the MAC layer.
428 if (mac_unregister(vrp->machdl) != 0)
429 return (DDI_FAILURE);
431 (void) ddi_intr_disable(vrp->intr_hdl);
432 vr_remove_intr(vrp);
433 mutex_destroy(&vrp->tx.lock);
434 mutex_destroy(&vrp->oplock);
435 mutex_destroy(&vrp->intrlock);
436 vr_remove_kstats(vrp);
437 vr_rings_fini(vrp);
438 vr_bus_unconfig(vrp);
439 kmem_free(vrp, sizeof (vr_t));
440 return (DDI_SUCCESS);
444 * quiesce the card for fast reboot.
447 vr_quiesce(dev_info_t *dev_info)
449 vr_t *vrp;
451 vrp = (vr_t *)ddi_get_driver_private(dev_info);
454 * Stop interrupts.
456 VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
457 VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
460 * Stop DMA.
462 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
463 return (DDI_SUCCESS);
467 * Add an interrupt for our device to the OS.
469 static vr_result_t
470 vr_add_intr(vr_t *vrp)
472 int nintrs;
473 int rc;
475 rc = ddi_intr_alloc(vrp->devinfo, &vrp->intr_hdl,
476 DDI_INTR_TYPE_FIXED, /* type */
477 0, /* number */
478 1, /* count */
479 &nintrs, /* actualp */
480 DDI_INTR_ALLOC_STRICT);
482 if (rc != DDI_SUCCESS) {
483 vr_log(vrp, CE_NOTE, "ddi_intr_alloc failed: %d", rc);
484 return (VR_FAILURE);
487 rc = ddi_intr_add_handler(vrp->intr_hdl, vr_intr, vrp, NULL);
488 if (rc != DDI_SUCCESS) {
489 vr_log(vrp, CE_NOTE, "ddi_intr_add_handler failed");
490 if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
491 vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
492 return (VR_FAILURE);
495 rc = ddi_intr_get_pri(vrp->intr_hdl, &vrp->intr_pri);
496 if (rc != DDI_SUCCESS) {
497 vr_log(vrp, CE_NOTE, "ddi_intr_get_pri failed");
498 if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
499 vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
501 if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
502 vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
504 return (VR_FAILURE);
506 return (VR_SUCCESS);
510 * Remove our interrupt from the OS.
512 static void
513 vr_remove_intr(vr_t *vrp)
515 if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
516 vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
518 if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
519 vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
523 * Resume operation after suspend.
525 static int
526 vr_resume(dev_info_t *devinfo)
528 vr_t *vrp;
530 vrp = (vr_t *)ddi_get_driver_private(devinfo);
531 mutex_enter(&vrp->oplock);
532 if (vrp->chip.state == CHIPSTATE_SUSPENDED_RUNNING)
533 (void) vr_start(vrp);
534 mutex_exit(&vrp->oplock);
535 return (DDI_SUCCESS);
539 * Suspend operation.
541 static int
542 vr_suspend(dev_info_t *devinfo)
544 vr_t *vrp;
546 vrp = (vr_t *)ddi_get_driver_private(devinfo);
547 mutex_enter(&vrp->oplock);
548 if (vrp->chip.state == CHIPSTATE_RUNNING) {
549 (void) vr_stop(vrp);
550 vrp->chip.state = CHIPSTATE_SUSPENDED_RUNNING;
552 mutex_exit(&vrp->oplock);
553 return (DDI_SUCCESS);
557 * Initial bus- and device configuration during attach(9E).
559 static vr_result_t
560 vr_bus_config(vr_t *vrp)
562 uint32_t addr;
563 int n, nsets, rc;
564 uint_t elem;
565 pci_regspec_t *regs;
568 * Get the reg property which describes the various access methods.
570 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, vrp->devinfo,
571 0, "reg", (int **)&regs, &elem) != DDI_PROP_SUCCESS) {
572 vr_log(vrp, CE_WARN, "Can't get reg property");
573 return (VR_FAILURE);
575 nsets = (elem * sizeof (uint_t)) / sizeof (pci_regspec_t);
578 * Setup access to all available sets.
580 vrp->nsets = nsets;
581 vrp->regset = kmem_zalloc(nsets * sizeof (vr_acc_t), KM_SLEEP);
582 for (n = 0; n < nsets; n++) {
583 rc = ddi_regs_map_setup(vrp->devinfo, n,
584 &vrp->regset[n].addr, 0, 0,
585 &vr_dev_dma_accattr,
586 &vrp->regset[n].hdl);
587 if (rc != DDI_SUCCESS) {
588 vr_log(vrp, CE_NOTE,
589 "Setup of register set %d failed", n);
590 while (--n >= 0)
591 ddi_regs_map_free(&vrp->regset[n].hdl);
592 kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
593 ddi_prop_free(regs);
594 return (VR_FAILURE);
596 bcopy(&regs[n], &vrp->regset[n].reg, sizeof (pci_regspec_t));
598 ddi_prop_free(regs);
601 * Assign type-named pointers to the register sets.
603 for (n = 0; n < nsets; n++) {
604 addr = vrp->regset[n].reg.pci_phys_hi & PCI_REG_ADDR_M;
605 if (addr == PCI_ADDR_CONFIG && vrp->acc_cfg == NULL)
606 vrp->acc_cfg = &vrp->regset[n];
607 else if (addr == PCI_ADDR_IO && vrp->acc_io == NULL)
608 vrp->acc_io = &vrp->regset[n];
609 else if (addr == PCI_ADDR_MEM32 && vrp->acc_mem == NULL)
610 vrp->acc_mem = &vrp->regset[n];
614 * Assure there is one of each type.
616 if (vrp->acc_cfg == NULL ||
617 vrp->acc_io == NULL ||
618 vrp->acc_mem == NULL) {
619 for (n = 0; n < nsets; n++)
620 ddi_regs_map_free(&vrp->regset[n].hdl);
621 kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
622 vr_log(vrp, CE_WARN,
623 "Config-, I/O- and memory sets not available");
624 return (VR_FAILURE);
628 * Store vendor/device/revision.
630 vrp->chip.vendor = VR_GET16(vrp->acc_cfg, PCI_CONF_VENID);
631 vrp->chip.device = VR_GET16(vrp->acc_cfg, PCI_CONF_DEVID);
632 vrp->chip.revision = VR_GET16(vrp->acc_cfg, PCI_CONF_REVID);
635 * Copy the matching chip_info_t structure.
637 elem = sizeof (vr_chip_info) / sizeof (chip_info_t);
638 for (n = 0; n < elem; n++) {
639 if (vrp->chip.revision >= vr_chip_info[n].revmin &&
640 vrp->chip.revision <= vr_chip_info[n].revmax) {
641 bcopy((void*)&vr_chip_info[n],
642 (void*)&vrp->chip.info,
643 sizeof (chip_info_t));
644 break;
649 * If we didn't find a chip_info_t for this card, copy the first
650 * entry of the info structures. This is a generic Rhine whith no
651 * bugs and no features.
653 if (vrp->chip.info.name == NULL) {
654 bcopy((void*)&vr_chip_info[0],
655 (void*) &vrp->chip.info,
656 sizeof (chip_info_t));
660 * Tell what is found.
662 vr_log(vrp, CE_NOTE, "pci%d,%d,%d: %s, revision 0x%0x",
663 PCI_REG_BUS_G(vrp->acc_cfg->reg.pci_phys_hi),
664 PCI_REG_DEV_G(vrp->acc_cfg->reg.pci_phys_hi),
665 PCI_REG_FUNC_G(vrp->acc_cfg->reg.pci_phys_hi),
666 vrp->chip.info.name,
667 vrp->chip.revision);
670 * Assure that the device is prepared for memory space accesses
671 * This should be the default as the device advertises memory
672 * access in it's BAR's. However, my VT6102 on a EPIA CL board doesn't
673 * and thus we explicetely enable it.
675 VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
678 * Setup a handle for regular usage, prefer memory space accesses.
680 if (vrp->acc_mem != NULL &&
681 (vrp->chip.info.bugs & VR_BUG_NO_MEMIO) == 0)
682 vrp->acc_reg = vrp->acc_mem;
683 else
684 vrp->acc_reg = vrp->acc_io;
687 * Store the vendor's MAC address.
689 for (n = 0; n < ETHERADDRL; n++) {
690 vrp->vendor_ether_addr[n] = VR_GET8(vrp->acc_reg,
691 VR_ETHERADDR + n);
693 return (VR_SUCCESS);
696 static void
697 vr_bus_unconfig(vr_t *vrp)
699 uint_t n;
702 * Free the register access handles.
704 for (n = 0; n < vrp->nsets; n++)
705 ddi_regs_map_free(&vrp->regset[n].hdl);
706 kmem_free(vrp->regset, vrp->nsets * sizeof (vr_acc_t));
710 * Initialize parameter structures.
712 static void
713 vr_param_init(vr_t *vrp)
716 * Initialize default link configuration parameters.
718 vrp->param.an_en = VR_LINK_AUTONEG_ON;
719 vrp->param.anadv_en = 1; /* Select 802.3 autonegotiation */
720 vrp->param.anadv_en |= MII_ABILITY_100BASE_T4;
721 vrp->param.anadv_en |= MII_ABILITY_100BASE_TX_FD;
722 vrp->param.anadv_en |= MII_ABILITY_100BASE_TX;
723 vrp->param.anadv_en |= MII_ABILITY_10BASE_T_FD;
724 vrp->param.anadv_en |= MII_ABILITY_10BASE_T;
725 /* Not a PHY ability, but advertised on behalf of MAC */
726 vrp->param.anadv_en |= MII_ABILITY_PAUSE;
727 vrp->param.mtu = ETHERMTU;
730 * Store the PHY identity.
732 vr_phy_read(vrp, MII_PHYIDH, &vrp->chip.mii.identh);
733 vr_phy_read(vrp, MII_PHYIDL, &vrp->chip.mii.identl);
736 * Clear incapabilities imposed by PHY in phymask.
738 vrp->param.an_phymask = vrp->param.anadv_en;
739 vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
740 if ((vrp->chip.mii.status & MII_STATUS_10) == 0)
741 vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T;
743 if ((vrp->chip.mii.status & MII_STATUS_10_FD) == 0)
744 vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T_FD;
746 if ((vrp->chip.mii.status & MII_STATUS_100_BASEX) == 0)
747 vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX;
749 if ((vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) == 0)
750 vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX_FD;
752 if ((vrp->chip.mii.status & MII_STATUS_100_BASE_T4) == 0)
753 vrp->param.an_phymask &= ~MII_ABILITY_100BASE_T4;
756 * Clear incapabilities imposed by MAC in macmask
757 * Note that flowcontrol (FCS?) is never masked. All of our adapters
758 * have the ability to honor incoming pause frames. Only the newer can
759 * transmit pause frames. Since there's no asym flowcontrol in 100Mbit
760 * Ethernet, we always advertise (symmetric) pause.
762 vrp->param.an_macmask = vrp->param.anadv_en;
765 * Advertised capabilities is enabled minus incapable.
767 vrp->chip.mii.anadv = vrp->param.anadv_en &
768 (vrp->param.an_phymask & vrp->param.an_macmask);
771 * Ensure that autoneg of the PHY matches our default.
773 if (vrp->param.an_en == VR_LINK_AUTONEG_ON)
774 vrp->chip.mii.control = MII_CONTROL_ANE;
775 else
776 vrp->chip.mii.control =
777 (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
781 * Setup the descriptor rings.
783 static vr_result_t
784 vr_rings_init(vr_t *vrp)
787 vrp->rx.ndesc = VR_RX_N_DESC;
788 vrp->tx.ndesc = VR_TX_N_DESC;
791 * Create a ring for receive.
793 if (vr_alloc_ring(vrp, &vrp->rxring, vrp->rx.ndesc) != VR_SUCCESS)
794 return (VR_FAILURE);
797 * Create a ring for transmit.
799 if (vr_alloc_ring(vrp, &vrp->txring, vrp->tx.ndesc) != VR_SUCCESS) {
800 vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
801 return (VR_FAILURE);
804 vrp->rx.ring = vrp->rxring.desc;
805 vrp->tx.ring = vrp->txring.desc;
806 return (VR_SUCCESS);
809 static void
810 vr_rings_fini(vr_t *vrp)
812 vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
813 vr_free_ring(&vrp->txring, vrp->tx.ndesc);
817 * Allocate a descriptor ring
818 * The number of descriptor entries must fit in a single page so that the
819 * whole ring fits in one consequtive space.
820 * i386: 4K page / 16 byte descriptor = 256 entries
821 * sparc: 8K page / 16 byte descriptor = 512 entries
823 static vr_result_t
824 vr_alloc_ring(vr_t *vrp, vr_ring_t *ring, size_t n)
826 ddi_dma_cookie_t desc_dma_cookie;
827 uint_t desc_cookiecnt;
828 int i, rc;
829 size_t rbytes;
832 * Allocate a DMA handle for the chip descriptors.
834 rc = ddi_dma_alloc_handle(vrp->devinfo,
835 &vr_dev_dma_attr,
836 DDI_DMA_SLEEP,
837 NULL,
838 &ring->handle);
840 if (rc != DDI_SUCCESS) {
841 vr_log(vrp, CE_WARN,
842 "ddi_dma_alloc_handle in vr_alloc_ring failed.");
843 return (VR_FAILURE);
847 * Allocate memory for the chip descriptors.
849 rc = ddi_dma_mem_alloc(ring->handle,
850 n * sizeof (vr_chip_desc_t),
851 &vr_dev_dma_accattr,
852 DDI_DMA_CONSISTENT,
853 DDI_DMA_SLEEP,
854 NULL,
855 (caddr_t *)&ring->cdesc,
856 &rbytes,
857 &ring->acchdl);
859 if (rc != DDI_SUCCESS) {
860 vr_log(vrp, CE_WARN,
861 "ddi_dma_mem_alloc in vr_alloc_ring failed.");
862 ddi_dma_free_handle(&ring->handle);
863 return (VR_FAILURE);
867 * Map the descriptor memory.
869 rc = ddi_dma_addr_bind_handle(ring->handle,
870 NULL,
871 (caddr_t)ring->cdesc,
872 rbytes,
873 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
874 DDI_DMA_SLEEP,
875 NULL,
876 &desc_dma_cookie,
877 &desc_cookiecnt);
879 if (rc != DDI_DMA_MAPPED || desc_cookiecnt > 1) {
880 vr_log(vrp, CE_WARN,
881 "ddi_dma_addr_bind_handle in vr_alloc_ring failed: "
882 "rc = %d, cookiecnt = %d", rc, desc_cookiecnt);
883 ddi_dma_mem_free(&ring->acchdl);
884 ddi_dma_free_handle(&ring->handle);
885 return (VR_FAILURE);
887 ring->cdesc_paddr = desc_dma_cookie.dmac_address;
890 * Allocate memory for the host descriptor ring.
892 ring->desc =
893 (vr_desc_t *)kmem_zalloc(n * sizeof (vr_desc_t), KM_SLEEP);
896 * Interlink the descriptors and connect host- to chip descriptors.
898 for (i = 0; i < n; i++) {
900 * Connect the host descriptor to a chip descriptor.
902 ring->desc[i].cdesc = &ring->cdesc[i];
905 * Store the DMA address and offset in the descriptor
906 * Offset is for ddi_dma_sync() and paddr is for ddi_get/-put().
908 ring->desc[i].offset = i * sizeof (vr_chip_desc_t);
909 ring->desc[i].paddr = ring->cdesc_paddr + ring->desc[i].offset;
912 * Link the previous descriptor to this one.
914 if (i > 0) {
915 /* Host */
916 ring->desc[i-1].next = &ring->desc[i];
918 /* Chip */
919 ddi_put32(ring->acchdl,
920 &ring->cdesc[i-1].next,
921 ring->desc[i].paddr);
926 * Make rings out of this list by pointing last to first.
928 i = n - 1;
929 ring->desc[i].next = &ring->desc[0];
930 ddi_put32(ring->acchdl, &ring->cdesc[i].next, ring->desc[0].paddr);
931 return (VR_SUCCESS);
935 * Free the memory allocated for a ring.
937 static void
938 vr_free_ring(vr_ring_t *r, size_t n)
941 * Unmap and free the chip descriptors.
943 (void) ddi_dma_unbind_handle(r->handle);
944 ddi_dma_mem_free(&r->acchdl);
945 ddi_dma_free_handle(&r->handle);
948 * Free the memory for storing host descriptors
950 kmem_free(r->desc, n * sizeof (vr_desc_t));
954 * Initialize the receive ring.
956 static vr_result_t
957 vr_rxring_init(vr_t *vrp)
959 int i, rc;
960 vr_desc_t *rp;
963 * Set the read pointer at the start of the ring.
965 vrp->rx.rp = &vrp->rx.ring[0];
968 * Assign a DMA buffer to each receive descriptor.
970 for (i = 0; i < vrp->rx.ndesc; i++) {
971 rp = &vrp->rx.ring[i];
972 rc = vr_alloc_dmabuf(vrp,
973 &vrp->rx.ring[i].dmabuf,
974 DDI_DMA_STREAMING | DDI_DMA_READ);
976 if (rc != VR_SUCCESS) {
977 while (--i >= 0)
978 vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
979 return (VR_FAILURE);
983 * Store the address of the dma buffer in the chip descriptor
985 ddi_put32(vrp->rxring.acchdl,
986 &rp->cdesc->data,
987 rp->dmabuf.paddr);
990 * Put the buffer length in the chip descriptor. Ensure that
991 * length fits in the 11 bits of stat1 (2047/0x7FF)
993 ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat1,
994 MIN(VR_MAX_PKTSZ, rp->dmabuf.bufsz));
997 * Set descriptor ownership to the card
999 ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat0, VR_RDES0_OWN);
1002 * Sync the descriptor with main memory
1004 (void) ddi_dma_sync(vrp->rxring.handle, rp->offset,
1005 sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1007 return (VR_SUCCESS);
1011 * Free the DMA buffers assigned to the receive ring.
1013 static void
1014 vr_rxring_fini(vr_t *vrp)
1016 int i;
1018 for (i = 0; i < vrp->rx.ndesc; i++)
1019 vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
1022 static vr_result_t
1023 vr_txring_init(vr_t *vrp)
1025 vr_desc_t *wp;
1026 int i, rc;
1029 * Set the write- and claim pointer.
1031 vrp->tx.wp = &vrp->tx.ring[0];
1032 vrp->tx.cp = &vrp->tx.ring[0];
1035 * (Re)set the TX bookkeeping.
1037 vrp->tx.stallticks = 0;
1038 vrp->tx.resched = 0;
1041 * Every transmit decreases nfree. Every reclaim increases nfree.
1043 vrp->tx.nfree = vrp->tx.ndesc;
1046 * Attach a DMA buffer to each transmit descriptor.
1048 for (i = 0; i < vrp->tx.ndesc; i++) {
1049 rc = vr_alloc_dmabuf(vrp,
1050 &vrp->tx.ring[i].dmabuf,
1051 DDI_DMA_STREAMING | DDI_DMA_WRITE);
1053 if (rc != VR_SUCCESS) {
1054 while (--i >= 0)
1055 vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1056 return (VR_FAILURE);
1061 * Init & sync the TX descriptors so the device sees a valid ring.
1063 for (i = 0; i < vrp->tx.ndesc; i++) {
1064 wp = &vrp->tx.ring[i];
1065 ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, 0);
1066 ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1, 0);
1067 ddi_put32(vrp->txring.acchdl, &wp->cdesc->data,
1068 wp->dmabuf.paddr);
1069 (void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1070 sizeof (vr_chip_desc_t),
1071 DDI_DMA_SYNC_FORDEV);
1073 return (VR_SUCCESS);
1077 * Free the DMA buffers attached to the TX ring.
1079 static void
1080 vr_txring_fini(vr_t *vrp)
1082 int i;
1085 * Free the DMA buffers attached to the TX ring
1087 for (i = 0; i < vrp->tx.ndesc; i++)
1088 vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1092 * Allocate a DMA buffer.
1094 static vr_result_t
1095 vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap, uint_t dmaflags)
1097 ddi_dma_cookie_t dma_cookie;
1098 uint_t cookiecnt;
1099 int rc;
1102 * Allocate a DMA handle for the buffer
1104 rc = ddi_dma_alloc_handle(vrp->devinfo,
1105 &vr_data_dma_attr,
1106 DDI_DMA_DONTWAIT, NULL,
1107 &dmap->handle);
1109 if (rc != DDI_SUCCESS) {
1110 vr_log(vrp, CE_WARN,
1111 "ddi_dma_alloc_handle failed in vr_alloc_dmabuf");
1112 return (VR_FAILURE);
1116 * Allocate the buffer
1117 * The allocated buffer is aligned on 2K boundary. This ensures that
1118 * a 1500 byte frame never cross a page boundary and thus that the DMA
1119 * mapping can be established in 1 fragment.
1121 rc = ddi_dma_mem_alloc(dmap->handle,
1122 VR_DMABUFSZ,
1123 &vr_data_dma_accattr,
1124 DDI_DMA_RDWR | DDI_DMA_STREAMING,
1125 DDI_DMA_DONTWAIT, NULL,
1126 &dmap->buf,
1127 &dmap->bufsz,
1128 &dmap->acchdl);
1130 if (rc != DDI_SUCCESS) {
1131 vr_log(vrp, CE_WARN,
1132 "ddi_dma_mem_alloc failed in vr_alloc_dmabuf");
1133 ddi_dma_free_handle(&dmap->handle);
1134 return (VR_FAILURE);
1138 * Map the memory
1140 rc = ddi_dma_addr_bind_handle(dmap->handle,
1141 NULL,
1142 (caddr_t)dmap->buf,
1143 dmap->bufsz,
1144 dmaflags,
1145 DDI_DMA_DONTWAIT,
1146 NULL,
1147 &dma_cookie,
1148 &cookiecnt);
1151 * The cookiecount should never > 1 because we requested 2K alignment
1153 if (rc != DDI_DMA_MAPPED || cookiecnt > 1) {
1154 vr_log(vrp, CE_WARN,
1155 "dma_addr_bind_handle failed in vr_alloc_dmabuf: "
1156 "rc = %d, cookiecnt = %d", rc, cookiecnt);
1157 ddi_dma_mem_free(&dmap->acchdl);
1158 ddi_dma_free_handle(&dmap->handle);
1159 return (VR_FAILURE);
1161 dmap->paddr = dma_cookie.dmac_address;
1162 return (VR_SUCCESS);
1166 * Destroy a DMA buffer.
1168 static void
1169 vr_free_dmabuf(vr_data_dma_t *dmap)
1171 (void) ddi_dma_unbind_handle(dmap->handle);
1172 ddi_dma_mem_free(&dmap->acchdl);
1173 ddi_dma_free_handle(&dmap->handle);
1177 * Interrupt service routine
1178 * When our vector is shared with another device, av_dispatch_autovect calls
1179 * all service routines for the vector until *none* of them return claimed
1180 * That means that, when sharing vectors, this routine is called at least
1181 * twice for each interrupt.
1183 uint_t
1184 vr_intr(caddr_t arg1, caddr_t arg2)
1186 vr_t *vrp;
1187 uint16_t status;
1188 mblk_t *lp = NULL;
1189 uint32_t tx_resched;
1190 uint32_t link_change;
1192 tx_resched = 0;
1193 link_change = 0;
1194 vrp = (void *)arg1;
1195 _NOTE(ARGUNUSED(arg2))
1197 mutex_enter(&vrp->intrlock);
1199 * If the driver is not in running state it is not our interrupt.
1200 * Shared interrupts can end up here without us being started.
1202 if (vrp->chip.state != CHIPSTATE_RUNNING) {
1203 mutex_exit(&vrp->intrlock);
1204 return (DDI_INTR_UNCLAIMED);
1208 * Read the status register to see if the interrupt is from our device
1209 * This read also ensures that posted writes are brought to main memory.
1211 status = VR_GET16(vrp->acc_reg, VR_ISR0) & VR_ICR0_CFG;
1212 if (status == 0) {
1214 * Status contains no configured interrupts
1215 * The interrupt was not generated by our device.
1217 vrp->stats.intr_unclaimed++;
1218 mutex_exit(&vrp->intrlock);
1219 return (DDI_INTR_UNCLAIMED);
1221 vrp->stats.intr_claimed++;
1224 * Acknowledge the event(s) that caused interruption.
1226 VR_PUT16(vrp->acc_reg, VR_ISR0, status);
1229 * Receive completion.
1231 if ((status & (VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS)) != 0) {
1233 * Received some packets.
1235 lp = vr_receive(vrp);
1238 * DMA stops after a conflict in the FIFO.
1240 if ((status & VR_ISR_RX_ERR_BITS) != 0)
1241 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1242 status &= ~(VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS);
1246 * Transmit completion.
1248 if ((status & (VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS)) != 0) {
1250 * Card done with transmitting some packets
1251 * TX_DONE is generated 3 times per ring but it appears
1252 * more often because it is also set when an RX_DONE
1253 * interrupt is generated.
1255 mutex_enter(&vrp->tx.lock);
1256 vr_tx_reclaim(vrp);
1257 tx_resched = vrp->tx.resched;
1258 vrp->tx.resched = 0;
1259 mutex_exit(&vrp->tx.lock);
1260 status &= ~(VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS);
1264 * Link status change.
1266 if ((status & VR_ICR0_LINKSTATUS) != 0) {
1268 * Get new link state and inform the mac layer.
1270 mutex_enter(&vrp->oplock);
1271 mutex_enter(&vrp->tx.lock);
1272 vr_link_state(vrp);
1273 mutex_exit(&vrp->tx.lock);
1274 mutex_exit(&vrp->oplock);
1275 status &= ~VR_ICR0_LINKSTATUS;
1276 vrp->stats.linkchanges++;
1277 link_change = 1;
1281 * Bus error.
1283 if ((status & VR_ISR0_BUSERR) != 0) {
1284 vr_log(vrp, CE_WARN, "bus error occured");
1285 vrp->reset = 1;
1286 status &= ~VR_ISR0_BUSERR;
1290 * We must have handled all things here.
1292 ASSERT(status == 0);
1293 mutex_exit(&vrp->intrlock);
1296 * Reset the device if requested
1297 * The request can come from the periodic tx check or from the interrupt
1298 * status.
1300 if (vrp->reset != 0) {
1301 vr_error(vrp);
1302 vrp->reset = 0;
1306 * Pass up the list with received packets.
1308 if (lp != NULL)
1309 mac_rx(vrp->machdl, 0, lp);
1312 * Inform the upper layer on the linkstatus if there was a change.
1314 if (link_change != 0)
1315 mac_link_update(vrp->machdl,
1316 (link_state_t)vrp->chip.link.state);
1318 * Restart transmissions if we were waiting for tx descriptors.
1320 if (tx_resched == 1)
1321 mac_tx_update(vrp->machdl);
1324 * Read something from the card to ensure that all of our configuration
1325 * writes are delivered to the device before the interrupt is ended.
1327 (void) VR_GET8(vrp->acc_reg, VR_ETHERADDR);
1328 return (DDI_INTR_CLAIMED);
1332 * Respond to an unforseen situation by resetting the card and our bookkeeping.
1334 static void
1335 vr_error(vr_t *vrp)
1337 vr_log(vrp, CE_WARN, "resetting MAC.");
1338 mutex_enter(&vrp->intrlock);
1339 mutex_enter(&vrp->oplock);
1340 mutex_enter(&vrp->tx.lock);
1341 (void) vr_stop(vrp);
1342 vr_reset(vrp);
1343 (void) vr_start(vrp);
1344 mutex_exit(&vrp->tx.lock);
1345 mutex_exit(&vrp->oplock);
1346 mutex_exit(&vrp->intrlock);
1347 vrp->stats.resets++;
1351 * Collect received packets in a list.
1353 static mblk_t *
1354 vr_receive(vr_t *vrp)
1356 mblk_t *lp, *mp, *np;
1357 vr_desc_t *rxp;
1358 vr_data_dma_t *dmap;
1359 uint32_t pklen;
1360 uint32_t rxstat0;
1361 uint32_t n;
1363 lp = NULL;
1364 n = 0;
1365 for (rxp = vrp->rx.rp; ; rxp = rxp->next, n++) {
1367 * Sync the descriptor before looking at it.
1369 (void) ddi_dma_sync(vrp->rxring.handle, rxp->offset,
1370 sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORKERNEL);
1373 * Get the status from the descriptor.
1375 rxstat0 = ddi_get32(vrp->rxring.acchdl, &rxp->cdesc->stat0);
1378 * We're done if the descriptor is owned by the card.
1380 if ((rxstat0 & VR_RDES0_OWN) != 0)
1381 break;
1382 else if ((rxstat0 & VR_RDES0_RXOK) != 0) {
1384 * Received a good packet
1386 dmap = &rxp->dmabuf;
1387 pklen = (rxstat0 >> 16) - ETHERFCSL;
1390 * Sync the data.
1392 (void) ddi_dma_sync(dmap->handle, 0,
1393 pklen, DDI_DMA_SYNC_FORKERNEL);
1396 * Send a new copied message upstream.
1398 np = allocb(pklen, 0);
1399 if (np != NULL) {
1400 bcopy(dmap->buf, np->b_rptr, pklen);
1401 np->b_wptr = np->b_rptr + pklen;
1403 vrp->stats.mac_stat_ipackets++;
1404 vrp->stats.mac_stat_rbytes += pklen;
1406 if ((rxstat0 & VR_RDES0_BAR) != 0)
1407 vrp->stats.mac_stat_brdcstrcv++;
1408 else if ((rxstat0 & VR_RDES0_MAR) != 0)
1409 vrp->stats.mac_stat_multircv++;
1412 * Link this packet in the list.
1414 np->b_next = NULL;
1415 if (lp == NULL)
1416 lp = mp = np;
1417 else {
1418 mp->b_next = np;
1419 mp = np;
1421 } else {
1422 vrp->stats.allocbfail++;
1423 vrp->stats.mac_stat_norcvbuf++;
1426 } else {
1428 * Received with errors.
1430 vrp->stats.mac_stat_ierrors++;
1431 if ((rxstat0 & VR_RDES0_FAE) != 0)
1432 vrp->stats.ether_stat_align_errors++;
1433 if ((rxstat0 & VR_RDES0_CRCERR) != 0)
1434 vrp->stats.ether_stat_fcs_errors++;
1435 if ((rxstat0 & VR_RDES0_LONG) != 0)
1436 vrp->stats.ether_stat_toolong_errors++;
1437 if ((rxstat0 & VR_RDES0_RUNT) != 0)
1438 vrp->stats.ether_stat_tooshort_errors++;
1439 if ((rxstat0 & VR_RDES0_FOV) != 0)
1440 vrp->stats.mac_stat_overflows++;
1444 * Reset descriptor ownership to the MAC.
1446 ddi_put32(vrp->rxring.acchdl,
1447 &rxp->cdesc->stat0,
1448 VR_RDES0_OWN);
1449 (void) ddi_dma_sync(vrp->rxring.handle,
1450 rxp->offset,
1451 sizeof (vr_chip_desc_t),
1452 DDI_DMA_SYNC_FORDEV);
1454 vrp->rx.rp = rxp;
1457 * If we do flowcontrol and if the card can transmit pause frames,
1458 * increment the "available receive descriptors" register.
1460 if (n > 0 && vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
1462 * Whenever the card moves a fragment to host memory it
1463 * decrements the RXBUFCOUNT register. If the value in the
1464 * register reaches a low watermark, the card transmits a pause
1465 * frame. If the value in this register reaches a high
1466 * watermark, the card sends a "cancel pause" frame
1468 * Non-zero values written to this byte register are added
1469 * by the chip to the register's contents, so we must write
1470 * the number of descriptors free'd.
1472 VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT, MIN(n, 0xFF));
1474 return (lp);
1478 * Enqueue a list of packets for transmission
1479 * Return the packets not transmitted.
1481 mblk_t *
1482 vr_mac_tx_enqueue_list(void *p, mblk_t *mp)
1484 vr_t *vrp;
1485 mblk_t *nextp;
1487 vrp = (vr_t *)p;
1488 mutex_enter(&vrp->tx.lock);
1489 do {
1490 if (vrp->tx.nfree == 0) {
1491 vrp->stats.ether_stat_defer_xmts++;
1492 vrp->tx.resched = 1;
1493 break;
1495 nextp = mp->b_next;
1496 mp->b_next = mp->b_prev = NULL;
1497 vr_tx_enqueue_msg(vrp, mp);
1498 mp = nextp;
1499 vrp->tx.nfree--;
1500 } while (mp != NULL);
1501 mutex_exit(&vrp->tx.lock);
1504 * Tell the chip to poll the TX ring.
1506 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1507 return (mp);
1511 * Enqueue a message for transmission.
1513 static void
1514 vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp)
1516 vr_desc_t *wp;
1517 vr_data_dma_t *dmap;
1518 uint32_t pklen;
1519 uint32_t nextp;
1520 int padlen;
1522 if ((uchar_t)mp->b_rptr[0] == 0xff &&
1523 (uchar_t)mp->b_rptr[1] == 0xff &&
1524 (uchar_t)mp->b_rptr[2] == 0xff &&
1525 (uchar_t)mp->b_rptr[3] == 0xff &&
1526 (uchar_t)mp->b_rptr[4] == 0xff &&
1527 (uchar_t)mp->b_rptr[5] == 0xff)
1528 vrp->stats.mac_stat_brdcstxmt++;
1529 else if ((uchar_t)mp->b_rptr[0] == 1)
1530 vrp->stats.mac_stat_multixmt++;
1532 pklen = msgsize(mp);
1533 wp = vrp->tx.wp;
1534 dmap = &wp->dmabuf;
1537 * Copy the message into the pre-mapped buffer and free mp
1539 mcopymsg(mp, dmap->buf);
1542 * Clean padlen bytes of short packet.
1544 padlen = ETHERMIN - pklen;
1545 if (padlen > 0) {
1546 bzero(dmap->buf + pklen, padlen);
1547 pklen += padlen;
1551 * Most of the statistics are updated on reclaim, after the actual
1552 * transmit. obytes is maintained here because the length is cleared
1553 * after transmission
1555 vrp->stats.mac_stat_obytes += pklen;
1558 * Sync the data so the device sees the new content too.
1560 (void) ddi_dma_sync(dmap->handle, 0, pklen, DDI_DMA_SYNC_FORDEV);
1563 * If we have reached the TX interrupt distance, enable a TX interrupt
1564 * for this packet. The Interrupt Control (IC) bit in the transmit
1565 * descriptor doesn't have any effect on the interrupt generation
1566 * despite the vague statements in the datasheet. Thus, we use the
1567 * more obscure interrupt suppress bit which is probably part of the
1568 * MAC's bookkeeping for TX interrupts and fragmented packets.
1570 vrp->tx.intr_distance++;
1571 nextp = ddi_get32(vrp->txring.acchdl, &wp->cdesc->next);
1572 if (vrp->tx.intr_distance >= VR_TX_MAX_INTR_DISTANCE) {
1574 * Don't suppress the interrupt for this packet.
1576 vrp->tx.intr_distance = 0;
1577 nextp &= (~VR_TDES3_SUPPRESS_INTR);
1578 } else {
1580 * Suppress the interrupt for this packet.
1582 nextp |= VR_TDES3_SUPPRESS_INTR;
1586 * Write and sync the chip's descriptor
1588 ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1,
1589 pklen | (VR_TDES1_STP | VR_TDES1_EDP | VR_TDES1_CHN));
1590 ddi_put32(vrp->txring.acchdl, &wp->cdesc->next, nextp);
1591 ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, VR_TDES0_OWN);
1592 (void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1593 sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1596 * The ticks counter is cleared by reclaim when it reclaimed some
1597 * descriptors and incremented by the periodic TX stall check.
1599 vrp->tx.stallticks = 1;
1600 vrp->tx.wp = wp->next;
1604 * Free transmitted descriptors.
1606 static void
1607 vr_tx_reclaim(vr_t *vrp)
1609 vr_desc_t *cp;
1610 uint32_t stat0, stat1, freed, dirty;
1612 ASSERT(mutex_owned(&vrp->tx.lock));
1614 freed = 0;
1615 dirty = vrp->tx.ndesc - vrp->tx.nfree;
1616 for (cp = vrp->tx.cp; dirty > 0; cp = cp->next) {
1618 * Sync & get descriptor status.
1620 (void) ddi_dma_sync(vrp->txring.handle, cp->offset,
1621 sizeof (vr_chip_desc_t),
1622 DDI_DMA_SYNC_FORKERNEL);
1623 stat0 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat0);
1625 if ((stat0 & VR_TDES0_OWN) != 0)
1626 break;
1629 * Do stats for the first descriptor in a chain.
1631 stat1 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat1);
1632 if ((stat1 & VR_TDES1_STP) != 0) {
1633 if ((stat0 & VR_TDES0_TERR) != 0) {
1634 vrp->stats.ether_stat_macxmt_errors++;
1635 if ((stat0 & VR_TDES0_UDF) != 0)
1636 vrp->stats.mac_stat_underflows++;
1637 if ((stat0 & VR_TDES0_ABT) != 0)
1638 vrp-> stats.ether_stat_ex_collisions++;
1640 * Abort and FIFO underflow stop the MAC.
1641 * Packet queueing must be disabled with HD
1642 * links because otherwise the MAC is also lost
1643 * after a few of these events.
1645 VR_PUT8(vrp->acc_reg, VR_CTRL0,
1646 VR_CTRL0_DMA_GO);
1647 } else
1648 vrp->stats.mac_stat_opackets++;
1650 if ((stat0 & VR_TDES0_COL) != 0) {
1651 if ((stat0 & VR_TDES0_NCR) == 1) {
1652 vrp->stats.
1653 ether_stat_first_collisions++;
1654 } else {
1655 vrp->stats.
1656 ether_stat_multi_collisions++;
1658 vrp->stats.mac_stat_collisions +=
1659 (stat0 & VR_TDES0_NCR);
1662 if ((stat0 & VR_TDES0_CRS) != 0)
1663 vrp->stats.ether_stat_carrier_errors++;
1665 if ((stat0 & VR_TDES0_OWC) != 0)
1666 vrp->stats.ether_stat_tx_late_collisions++;
1668 freed += 1;
1669 dirty -= 1;
1671 vrp->tx.cp = cp;
1673 if (freed > 0) {
1674 vrp->tx.nfree += freed;
1675 vrp->tx.stallticks = 0;
1676 vrp->stats.txreclaims += 1;
1677 } else
1678 vrp->stats.txreclaim0 += 1;
1682 * Check TX health every 2 seconds.
1684 static void
1685 vr_periodic(void *p)
1687 vr_t *vrp;
1689 vrp = (vr_t *)p;
1690 if (vrp->chip.state == CHIPSTATE_RUNNING &&
1691 vrp->chip.link.state == VR_LINK_STATE_UP && vrp->reset == 0) {
1692 if (mutex_tryenter(&vrp->intrlock) != 0) {
1693 mutex_enter(&vrp->tx.lock);
1694 if (vrp->tx.resched == 1) {
1695 if (vrp->tx.stallticks >= VR_MAXTXCHECKS) {
1697 * No succesful reclaim in the last n
1698 * intervals. Reset the MAC.
1700 vrp->reset = 1;
1701 vr_log(vrp, CE_WARN,
1702 "TX stalled, resetting MAC");
1703 vrp->stats.txstalls++;
1704 } else {
1706 * Increase until we find that we've
1707 * waited long enough.
1709 vrp->tx.stallticks += 1;
1712 mutex_exit(&vrp->tx.lock);
1713 mutex_exit(&vrp->intrlock);
1714 vrp->stats.txchecks++;
1717 vrp->stats.cyclics++;
1721 * Bring the device to our desired initial state.
1723 static void
1724 vr_reset(vr_t *vrp)
1726 uint32_t time;
1729 * Reset the MAC
1730 * If we don't wait long enough for the forced reset to complete,
1731 * MAC looses sync with PHY. Result link up, no link change interrupt
1732 * and no data transfer.
1734 time = 0;
1735 VR_PUT8(vrp->acc_io, VR_CTRL1, VR_CTRL1_RESET);
1736 do {
1737 drv_usecwait(100);
1738 time += 100;
1739 if (time >= 100000) {
1740 VR_PUT8(vrp->acc_io, VR_MISC1, VR_MISC1_RESET);
1741 delay(drv_usectohz(200000));
1743 } while ((VR_GET8(vrp->acc_io, VR_CTRL1) & VR_CTRL1_RESET) != 0);
1744 delay(drv_usectohz(10000));
1747 * Load the PROM contents into the MAC again.
1749 VR_SETBIT8(vrp->acc_io, VR_PROMCTL, VR_PROMCTL_RELOAD);
1750 delay(drv_usectohz(100000));
1753 * Tell the MAC via IO space that we like to use memory space for
1754 * accessing registers.
1756 VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
1760 * Prepare and enable the card (MAC + PHY + PCI).
1762 static int
1763 vr_start(vr_t *vrp)
1765 uint8_t pci_latency, pci_mode;
1767 ASSERT(mutex_owned(&vrp->oplock));
1770 * Allocate DMA buffers for RX.
1772 if (vr_rxring_init(vrp) != VR_SUCCESS) {
1773 vr_log(vrp, CE_NOTE, "vr_rxring_init() failed");
1774 return (ENOMEM);
1778 * Allocate DMA buffers for TX.
1780 if (vr_txring_init(vrp) != VR_SUCCESS) {
1781 vr_log(vrp, CE_NOTE, "vr_txring_init() failed");
1782 vr_rxring_fini(vrp);
1783 return (ENOMEM);
1787 * Changes of the chip specific registers as done in VIA's fet driver
1788 * These bits are not in the datasheet and controlled by vr_chip_info.
1790 pci_mode = VR_GET8(vrp->acc_reg, VR_MODE2);
1791 if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE10T) != 0)
1792 pci_mode |= VR_MODE2_MODE10T;
1794 if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE2PCEROPT) != 0)
1795 pci_mode |= VR_MODE2_PCEROPT;
1797 if ((vrp->chip.info.features & VR_FEATURE_MRDLNMULTIPLE) != 0)
1798 pci_mode |= VR_MODE2_MRDPL;
1799 VR_PUT8(vrp->acc_reg, VR_MODE2, pci_mode);
1801 pci_mode = VR_GET8(vrp->acc_reg, VR_MODE3);
1802 if ((vrp->chip.info.bugs & VR_BUG_NEEDMIION) != 0)
1803 pci_mode |= VR_MODE3_MIION;
1804 VR_PUT8(vrp->acc_reg, VR_MODE3, pci_mode);
1807 * RX: Accept broadcast packets.
1809 VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTBROAD);
1812 * RX: Start DMA when there are 256 bytes in the FIFO.
1814 VR_SETBITS8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_FIFO_THRESHOLD_BITS,
1815 VR_RXCFG_FIFO_THRESHOLD_256);
1816 VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_RX_FIFO_THRESHOLD_BITS,
1817 VR_BCR0_RX_FIFO_THRESHOLD_256);
1820 * TX: Start transmit when there are 256 bytes in the FIFO.
1822 VR_SETBITS8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_FIFO_THRESHOLD_BITS,
1823 VR_TXCFG_FIFO_THRESHOLD_256);
1824 VR_SETBITS8(vrp->acc_reg, VR_BCR1, VR_BCR1_TX_FIFO_THRESHOLD_BITS,
1825 VR_BCR1_TX_FIFO_THRESHOLD_256);
1828 * Burst transfers up to 256 bytes.
1830 VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_DMABITS, VR_BCR0_DMA256);
1833 * Disable TX autopolling as it is bad for RX performance
1834 * I assume this is because the RX process finds the bus often occupied
1835 * by the polling process.
1837 VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_NOAUTOPOLL);
1840 * Honor the PCI latency timer if it is reasonable.
1842 pci_latency = VR_GET8(vrp->acc_cfg, PCI_CONF_LATENCY_TIMER);
1843 if (pci_latency != 0 && pci_latency != 0xFF)
1844 VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1845 else
1846 VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1849 * Ensure that VLAN filtering is off, because this strips the tag.
1851 if ((vrp->chip.info.features & VR_FEATURE_VLANTAGGING) != 0) {
1852 VR_CLRBIT8(vrp->acc_reg, VR_BCR1, VR_BCR1_VLANFILTER);
1853 VR_CLRBIT8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_8021PQ_EN);
1857 * Clear the CAM filter.
1859 if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
1860 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
1861 VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 0);
1862 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1864 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
1865 VR_CAM_CTRL_ENABLE|VR_CAM_CTRL_SELECT_VLAN);
1866 VR_PUT8(vrp->acc_reg, VR_VCAM0, 0);
1867 VR_PUT8(vrp->acc_reg, VR_VCAM1, 0);
1868 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_WRITE);
1869 VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 1);
1870 drv_usecwait(2);
1871 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1875 * Give the start addresses of the descriptor rings to the DMA
1876 * controller on the MAC.
1878 VR_PUT32(vrp->acc_reg, VR_RXADDR, vrp->rx.rp->paddr);
1879 VR_PUT32(vrp->acc_reg, VR_TXADDR, vrp->tx.wp->paddr);
1882 * We don't use the additionally invented interrupt ICR1 register,
1883 * so make sure these are disabled.
1885 VR_PUT8(vrp->acc_reg, VR_ISR1, 0xFF);
1886 VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1889 * Enable interrupts.
1891 VR_PUT16(vrp->acc_reg, VR_ISR0, 0xFFFF);
1892 VR_PUT16(vrp->acc_reg, VR_ICR0, VR_ICR0_CFG);
1895 * Enable the DMA controller.
1897 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1900 * Configure the link. Rely on the link change interrupt for getting
1901 * the link state into the driver.
1903 vr_link_init(vrp);
1906 * Set the software view on the state to 'running'.
1908 vrp->chip.state = CHIPSTATE_RUNNING;
1909 return (0);
1913 * Stop DMA and interrupts.
1915 static int
1916 vr_stop(vr_t *vrp)
1918 ASSERT(mutex_owned(&vrp->oplock));
1921 * Stop interrupts.
1923 VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
1924 VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1927 * Stop DMA.
1929 VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
1932 * Set the software view on the state to stopped.
1934 vrp->chip.state = CHIPSTATE_STOPPED;
1937 * Remove DMA buffers from the rings.
1939 vr_rxring_fini(vrp);
1940 vr_txring_fini(vrp);
1941 return (0);
1945 vr_mac_start(void *p)
1947 vr_t *vrp;
1948 int rc;
1950 vrp = (vr_t *)p;
1951 mutex_enter(&vrp->oplock);
1954 * Reset the card.
1956 vr_reset(vrp);
1959 * Prepare and enable the card.
1961 rc = vr_start(vrp);
1964 * Configure a cyclic function to keep the card & driver from diverting.
1966 vrp->periodic_id =
1967 ddi_periodic_add(vr_periodic, vrp, VR_CHECK_INTERVAL, DDI_IPL_0);
1969 mutex_exit(&vrp->oplock);
1970 return (rc);
1973 void
1974 vr_mac_stop(void *p)
1976 vr_t *vrp = p;
1978 mutex_enter(&vrp->oplock);
1979 mutex_enter(&vrp->tx.lock);
1982 * Stop the device.
1984 (void) vr_stop(vrp);
1985 mutex_exit(&vrp->tx.lock);
1988 * Remove the cyclic from the system.
1990 ddi_periodic_delete(vrp->periodic_id);
1991 mutex_exit(&vrp->oplock);
1995 * Add or remove a multicast address to/from the filter
1997 * From the 21143 manual:
1998 * The 21143 can store 512 bits serving as hash bucket heads, and one physical
1999 * 48-bit Ethernet address. Incoming frames with multicast destination
2000 * addresses are subjected to imperfect filtering. Frames with physical
2001 * destination addresses are checked against the single physical address.
2002 * For any incoming frame with a multicast destination address, the 21143
2003 * applies the standard Ethernet cyclic redundancy check (CRC) function to the
2004 * first 6 bytes containing the destination address, then it uses the most
2005 * significant 9 bits of the result as a bit index into the table. If the
2006 * indexed bit is set, the frame is accepted. If the bit is cleared, the frame
2007 * is rejected. This filtering mode is called imperfect because multicast
2008 * frames not addressed to this station may slip through, but it still
2009 * decreases the number of frames that the host can receive.
2010 * I assume the above is also the way the VIA chips work. There's not a single
2011 * word about the multicast filter in the datasheet.
2013 * Another word on the CAM filter on VT6105M controllers:
2014 * The VT6105M has content addressable memory which can be used for perfect
2015 * filtering of 32 multicast addresses and a few VLAN id's
2017 * I think it works like this: When the controller receives a multicast
2018 * address, it looks up the address using CAM. When it is found, it takes the
2019 * matching cell address (index) and compares this to the bit position in the
2020 * cam mask. If the bit is set, the packet is passed up. If CAM lookup does not
2021 * result in a match, the packet is filtered using the hash based filter,
2022 * if that matches, the packet is passed up and dropped otherwise
2023 * Also, there's not a single word in the datasheet on how this cam is supposed
2024 * to work ...
2027 vr_mac_set_multicast(void *p, boolean_t add, const uint8_t *mca)
2029 vr_t *vrp;
2030 uint32_t crc_index;
2031 int32_t cam_index;
2032 uint32_t cam_mask;
2033 boolean_t use_hash_filter;
2034 ether_addr_t taddr;
2035 uint32_t a;
2037 vrp = (vr_t *)p;
2038 mutex_enter(&vrp->oplock);
2039 mutex_enter(&vrp->intrlock);
2040 use_hash_filter = B_FALSE;
2042 if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
2044 * Program the perfect filter.
2046 cam_mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2047 if (add == B_TRUE) {
2049 * Get index of first empty slot.
2051 bzero(&taddr, sizeof (taddr));
2052 cam_index = vr_cam_index(vrp, taddr);
2053 if (cam_index != -1) {
2055 * Add address at cam_index.
2057 cam_mask |= (1 << cam_index);
2058 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2059 VR_CAM_CTRL_ENABLE);
2060 VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, cam_index);
2061 VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2062 for (a = 0; a < ETHERADDRL; a++) {
2063 VR_PUT8(vrp->acc_reg,
2064 VR_MCAM0 + a, mca[a]);
2066 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2067 VR_CAM_CTRL_WRITE);
2068 drv_usecwait(2);
2069 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2070 VR_CAM_CTRL_DONE);
2071 } else {
2073 * No free CAM slots available
2074 * Add mca to the imperfect filter.
2076 use_hash_filter = B_TRUE;
2078 } else {
2080 * Find the index of the entry to remove
2081 * If the entry was not found (-1), the addition was
2082 * probably done when the table was full.
2084 cam_index = vr_cam_index(vrp, mca);
2085 if (cam_index != -1) {
2087 * Disable the corresponding mask bit.
2089 cam_mask &= ~(1 << cam_index);
2090 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2091 VR_CAM_CTRL_ENABLE);
2092 VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2093 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2094 VR_CAM_CTRL_DONE);
2095 } else {
2097 * The entry to be removed was not found
2098 * The likely cause is that the CAM was full
2099 * during addition. The entry is added to the
2100 * hash filter in that case and needs to be
2101 * removed there too.
2103 use_hash_filter = B_TRUE;
2106 } else {
2108 * No CAM in the MAC, thus we need the hash filter.
2110 use_hash_filter = B_TRUE;
2113 if (use_hash_filter == B_TRUE) {
2115 * Get the CRC-32 of the multicast address
2116 * The card uses the "MSB first" direction when calculating the
2117 * the CRC. This is odd because ethernet is "LSB first"
2118 * We have to use that "big endian" approach as well.
2120 crc_index = ether_crc_be(mca) >> (32 - 6);
2121 if (add == B_TRUE) {
2123 * Turn bit[crc_index] on.
2125 if (crc_index < 32)
2126 vrp->mhash0 |= (1 << crc_index);
2127 else
2128 vrp->mhash1 |= (1 << (crc_index - 32));
2129 } else {
2131 * Turn bit[crc_index] off.
2133 if (crc_index < 32)
2134 vrp->mhash0 &= ~(0 << crc_index);
2135 else
2136 vrp->mhash1 &= ~(0 << (crc_index - 32));
2140 * When not promiscuous write the filter now. When promiscuous,
2141 * the filter is open and will be written when promiscuous ends.
2143 if (vrp->promisc == B_FALSE) {
2144 VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2145 VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2150 * Enable/disable multicast receivements based on mcount.
2152 if (add == B_TRUE)
2153 vrp->mcount++;
2154 else if (vrp->mcount != 0)
2155 vrp->mcount --;
2156 if (vrp->mcount != 0)
2157 VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2158 else
2159 VR_CLRBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2161 mutex_exit(&vrp->intrlock);
2162 mutex_exit(&vrp->oplock);
2163 return (0);
2167 * Calculate the CRC32 for 6 bytes of multicast address in MSB(it) first order.
2168 * The MSB first order is a bit odd because Ethernet standard is LSB first
2170 static uint32_t
2171 ether_crc_be(const uint8_t *data)
2173 uint32_t crc = (uint32_t)0xFFFFFFFFU;
2174 uint32_t carry;
2175 uint32_t bit;
2176 uint32_t length;
2177 uint8_t c;
2179 for (length = 0; length < ETHERADDRL; length++) {
2180 c = data[length];
2181 for (bit = 0; bit < 8; bit++) {
2182 carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01);
2183 crc <<= 1;
2184 c >>= 1;
2185 if (carry)
2186 crc = (crc ^ 0x04C11DB6) | carry;
2189 return (crc);
2194 * Return the CAM index (base 0) of maddr or -1 if maddr is not found
2195 * If maddr is 0, return the index of an empty slot in CAM or -1 when no free
2196 * slots available.
2198 static int32_t
2199 vr_cam_index(vr_t *vrp, const uint8_t *maddr)
2201 ether_addr_t taddr;
2202 int32_t index;
2203 uint32_t mask;
2204 uint32_t a;
2206 bzero(&taddr, sizeof (taddr));
2209 * Read the CAM mask from the controller.
2211 mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2214 * If maddr is 0, return the first unused slot or -1 for no unused.
2216 if (bcmp(maddr, taddr, ETHERADDRL) == 0) {
2218 * Look for the first unused position in mask.
2220 for (index = 0; index < VR_CAM_SZ; index++) {
2221 if (((mask >> index) & 1) == 0)
2222 return (index);
2224 return (-1);
2225 } else {
2227 * Look for maddr in CAM.
2229 for (index = 0; index < VR_CAM_SZ; index++) {
2230 /* Look at enabled entries only */
2231 if (((mask >> index) & 1) == 0)
2232 continue;
2234 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
2235 VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, index);
2236 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_READ);
2237 drv_usecwait(2);
2238 for (a = 0; a < ETHERADDRL; a++)
2239 taddr[a] = VR_GET8(vrp->acc_reg, VR_MCAM0 + a);
2240 VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
2241 if (bcmp(maddr, taddr, ETHERADDRL) == 0)
2242 return (index);
2245 return (-1);
2249 * Set promiscuous mode on or off.
2252 vr_mac_set_promisc(void *p, boolean_t promiscflag)
2254 vr_t *vrp;
2255 uint8_t rxcfg;
2257 vrp = (vr_t *)p;
2259 mutex_enter(&vrp->intrlock);
2260 mutex_enter(&vrp->oplock);
2261 mutex_enter(&vrp->tx.lock);
2264 * Get current receive configuration.
2266 rxcfg = VR_GET8(vrp->acc_reg, VR_RXCFG);
2267 vrp->promisc = promiscflag;
2269 if (promiscflag == B_TRUE) {
2271 * Enable promiscuous mode and open the multicast filter.
2273 rxcfg |= (VR_RXCFG_PROMISC | VR_RXCFG_ACCEPTMULTI);
2274 VR_PUT32(vrp->acc_reg, VR_MAR0, 0xffffffff);
2275 VR_PUT32(vrp->acc_reg, VR_MAR1, 0xffffffff);
2276 } else {
2278 * Restore the multicast filter and disable promiscuous mode.
2280 VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2281 VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2282 rxcfg &= ~VR_RXCFG_PROMISC;
2283 if (vrp->mcount != 0)
2284 rxcfg |= VR_RXCFG_ACCEPTMULTI;
2286 VR_PUT8(vrp->acc_reg, VR_RXCFG, rxcfg);
2287 mutex_exit(&vrp->tx.lock);
2288 mutex_exit(&vrp->oplock);
2289 mutex_exit(&vrp->intrlock);
2290 return (0);
2294 vr_mac_getstat(void *arg, uint_t stat, uint64_t *val)
2296 vr_t *vrp;
2297 uint64_t v;
2299 vrp = (void *) arg;
2301 switch (stat) {
2302 default:
2303 return (ENOTSUP);
2305 case ETHER_STAT_ADV_CAP_100T4:
2306 v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_T4) != 0;
2307 break;
2309 case ETHER_STAT_ADV_CAP_100FDX:
2310 v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX_FD) != 0;
2311 break;
2313 case ETHER_STAT_ADV_CAP_100HDX:
2314 v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX) != 0;
2315 break;
2317 case ETHER_STAT_ADV_CAP_10FDX:
2318 v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T_FD) != 0;
2319 break;
2321 case ETHER_STAT_ADV_CAP_10HDX:
2322 v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T) != 0;
2323 break;
2325 case ETHER_STAT_ADV_CAP_ASMPAUSE:
2326 v = 0;
2327 break;
2329 case ETHER_STAT_ADV_CAP_AUTONEG:
2330 v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0;
2331 break;
2333 case ETHER_STAT_ADV_CAP_PAUSE:
2334 v = (vrp->chip.mii.anadv & MII_ABILITY_PAUSE) != 0;
2335 break;
2337 case ETHER_STAT_ADV_REMFAULT:
2338 v = (vrp->chip.mii.anadv & MII_AN_ADVERT_REMFAULT) != 0;
2339 break;
2341 case ETHER_STAT_ALIGN_ERRORS:
2342 v = vrp->stats.ether_stat_align_errors;
2343 break;
2345 case ETHER_STAT_CAP_100T4:
2346 v = (vrp->chip.mii.status & MII_STATUS_100_BASE_T4) != 0;
2347 break;
2349 case ETHER_STAT_CAP_100FDX:
2350 v = (vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) != 0;
2351 break;
2353 case ETHER_STAT_CAP_100HDX:
2354 v = (vrp->chip.mii.status & MII_STATUS_100_BASEX) != 0;
2355 break;
2357 case ETHER_STAT_CAP_10FDX:
2358 v = (vrp->chip.mii.status & MII_STATUS_10_FD) != 0;
2359 break;
2361 case ETHER_STAT_CAP_10HDX:
2362 v = (vrp->chip.mii.status & MII_STATUS_10) != 0;
2363 break;
2365 case ETHER_STAT_CAP_ASMPAUSE:
2366 v = 0;
2367 break;
2369 case ETHER_STAT_CAP_AUTONEG:
2370 v = (vrp->chip.mii.status & MII_STATUS_CANAUTONEG) != 0;
2371 break;
2373 case ETHER_STAT_CAP_PAUSE:
2374 v = 1;
2375 break;
2377 case ETHER_STAT_CAP_REMFAULT:
2378 v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2379 break;
2381 case ETHER_STAT_CARRIER_ERRORS:
2383 * Number of times carrier was lost or never detected on a
2384 * transmission attempt.
2386 v = vrp->stats.ether_stat_carrier_errors;
2387 break;
2389 case ETHER_STAT_JABBER_ERRORS:
2390 return (ENOTSUP);
2392 case ETHER_STAT_DEFER_XMTS:
2394 * Packets without collisions where first transmit attempt was
2395 * delayed because the medium was busy.
2397 v = vrp->stats.ether_stat_defer_xmts;
2398 break;
2400 case ETHER_STAT_EX_COLLISIONS:
2402 * Frames where excess collisions occurred on transmit, causing
2403 * transmit failure.
2405 v = vrp->stats.ether_stat_ex_collisions;
2406 break;
2408 case ETHER_STAT_FCS_ERRORS:
2410 * Packets received with CRC errors.
2412 v = vrp->stats.ether_stat_fcs_errors;
2413 break;
2415 case ETHER_STAT_FIRST_COLLISIONS:
2417 * Packets successfully transmitted with exactly one collision.
2419 v = vrp->stats.ether_stat_first_collisions;
2420 break;
2422 case ETHER_STAT_LINK_ASMPAUSE:
2423 v = 0;
2424 break;
2426 case ETHER_STAT_LINK_AUTONEG:
2427 v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0 &&
2428 (vrp->chip.mii.status & MII_STATUS_ANDONE) != 0;
2429 break;
2431 case ETHER_STAT_LINK_DUPLEX:
2432 v = vrp->chip.link.duplex;
2433 break;
2435 case ETHER_STAT_LINK_PAUSE:
2436 v = vrp->chip.link.flowctrl;
2437 break;
2439 case ETHER_STAT_LP_CAP_100T4:
2440 v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_T4) != 0;
2441 break;
2443 case ETHER_STAT_LP_CAP_1000FDX:
2444 v = 0;
2445 break;
2447 case ETHER_STAT_LP_CAP_1000HDX:
2448 v = 0;
2449 break;
2451 case ETHER_STAT_LP_CAP_100FDX:
2452 v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX_FD) != 0;
2453 break;
2455 case ETHER_STAT_LP_CAP_100HDX:
2456 v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX) != 0;
2457 break;
2459 case ETHER_STAT_LP_CAP_10FDX:
2460 v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T_FD) != 0;
2461 break;
2463 case ETHER_STAT_LP_CAP_10HDX:
2464 v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T) != 0;
2465 break;
2467 case ETHER_STAT_LP_CAP_ASMPAUSE:
2468 v = 0;
2469 break;
2471 case ETHER_STAT_LP_CAP_AUTONEG:
2472 v = (vrp->chip.mii.anexp & MII_AN_EXP_LPCANAN) != 0;
2473 break;
2475 case ETHER_STAT_LP_CAP_PAUSE:
2476 v = (vrp->chip.mii.lpable & MII_ABILITY_PAUSE) != 0;
2477 break;
2479 case ETHER_STAT_LP_REMFAULT:
2480 v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2481 break;
2483 case ETHER_STAT_MACRCV_ERRORS:
2485 * Packets received with MAC errors, except align_errors,
2486 * fcs_errors, and toolong_errors.
2488 v = vrp->stats.ether_stat_macrcv_errors;
2489 break;
2491 case ETHER_STAT_MACXMT_ERRORS:
2493 * Packets encountering transmit MAC failures, except carrier
2494 * and collision failures.
2496 v = vrp->stats.ether_stat_macxmt_errors;
2497 break;
2499 case ETHER_STAT_MULTI_COLLISIONS:
2501 * Packets successfully transmitted with multiple collisions.
2503 v = vrp->stats.ether_stat_multi_collisions;
2504 break;
2506 case ETHER_STAT_SQE_ERRORS:
2508 * Number of times signal quality error was reported
2509 * This one is reported by the PHY.
2511 return (ENOTSUP);
2513 case ETHER_STAT_TOOLONG_ERRORS:
2515 * Packets received larger than the maximum permitted length.
2517 v = vrp->stats.ether_stat_toolong_errors;
2518 break;
2520 case ETHER_STAT_TOOSHORT_ERRORS:
2521 v = vrp->stats.ether_stat_tooshort_errors;
2522 break;
2524 case ETHER_STAT_TX_LATE_COLLISIONS:
2526 * Number of times a transmit collision occurred late
2527 * (after 512 bit times).
2529 v = vrp->stats.ether_stat_tx_late_collisions;
2530 break;
2532 case ETHER_STAT_XCVR_ADDR:
2534 * MII address in the 0 to 31 range of the physical layer
2535 * device in use for a given Ethernet device.
2537 v = vrp->chip.phyaddr;
2538 break;
2540 case ETHER_STAT_XCVR_ID:
2542 * MII transceiver manufacturer and device ID.
2544 v = (vrp->chip.mii.identh << 16) | vrp->chip.mii.identl;
2545 break;
2547 case ETHER_STAT_XCVR_INUSE:
2548 v = vrp->chip.link.mau;
2549 break;
2551 case MAC_STAT_BRDCSTRCV:
2552 v = vrp->stats.mac_stat_brdcstrcv;
2553 break;
2555 case MAC_STAT_BRDCSTXMT:
2556 v = vrp->stats.mac_stat_brdcstxmt;
2557 break;
2559 case MAC_STAT_MULTIXMT:
2560 v = vrp->stats.mac_stat_multixmt;
2561 break;
2563 case MAC_STAT_COLLISIONS:
2564 v = vrp->stats.mac_stat_collisions;
2565 break;
2567 case MAC_STAT_IERRORS:
2568 v = vrp->stats.mac_stat_ierrors;
2569 break;
2571 case MAC_STAT_IFSPEED:
2572 if (vrp->chip.link.speed == VR_LINK_SPEED_100MBS)
2573 v = 100 * 1000 * 1000;
2574 else if (vrp->chip.link.speed == VR_LINK_SPEED_10MBS)
2575 v = 10 * 1000 * 1000;
2576 else
2577 v = 0;
2578 break;
2580 case MAC_STAT_IPACKETS:
2581 v = vrp->stats.mac_stat_ipackets;
2582 break;
2584 case MAC_STAT_MULTIRCV:
2585 v = vrp->stats.mac_stat_multircv;
2586 break;
2588 case MAC_STAT_NORCVBUF:
2589 vrp->stats.mac_stat_norcvbuf +=
2590 VR_GET16(vrp->acc_reg, VR_TALLY_MPA);
2591 VR_PUT16(vrp->acc_reg, VR_TALLY_MPA, 0);
2592 v = vrp->stats.mac_stat_norcvbuf;
2593 break;
2595 case MAC_STAT_NOXMTBUF:
2596 v = vrp->stats.mac_stat_noxmtbuf;
2597 break;
2599 case MAC_STAT_OBYTES:
2600 v = vrp->stats.mac_stat_obytes;
2601 break;
2603 case MAC_STAT_OERRORS:
2604 v = vrp->stats.ether_stat_macxmt_errors +
2605 vrp->stats.mac_stat_underflows +
2606 vrp->stats.ether_stat_align_errors +
2607 vrp->stats.ether_stat_carrier_errors +
2608 vrp->stats.ether_stat_fcs_errors;
2609 break;
2611 case MAC_STAT_OPACKETS:
2612 v = vrp->stats.mac_stat_opackets;
2613 break;
2615 case MAC_STAT_RBYTES:
2616 v = vrp->stats.mac_stat_rbytes;
2617 break;
2619 case MAC_STAT_UNKNOWNS:
2621 * Isn't this something for the MAC layer to maintain?
2623 return (ENOTSUP);
2625 case MAC_STAT_UNDERFLOWS:
2626 v = vrp->stats.mac_stat_underflows;
2627 break;
2629 case MAC_STAT_OVERFLOWS:
2630 v = vrp->stats.mac_stat_overflows;
2631 break;
2633 *val = v;
2634 return (0);
2638 vr_mac_set_ether_addr(void *p, const uint8_t *ea)
2640 vr_t *vrp;
2641 int i;
2643 vrp = (vr_t *)p;
2644 mutex_enter(&vrp->oplock);
2645 mutex_enter(&vrp->intrlock);
2648 * Set a new station address.
2650 for (i = 0; i < ETHERADDRL; i++)
2651 VR_PUT8(vrp->acc_reg, VR_ETHERADDR + i, ea[i]);
2653 mutex_exit(&vrp->intrlock);
2654 mutex_exit(&vrp->oplock);
2655 return (0);
2659 * Configure the ethernet link according to param and chip.mii.
2661 static void
2662 vr_link_init(vr_t *vrp)
2664 ASSERT(mutex_owned(&vrp->oplock));
2665 if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2667 * If we do autoneg, ensure restart autoneg is ON.
2669 vrp->chip.mii.control |= MII_CONTROL_RSAN;
2672 * The advertisements are prepared by param_init.
2674 vr_phy_write(vrp, MII_AN_ADVERT, vrp->chip.mii.anadv);
2675 } else {
2677 * If we don't autoneg, we need speed, duplex and flowcontrol
2678 * to configure the link. However, dladm doesn't allow changes
2679 * to speed and duplex (readonly). The way this is solved
2680 * (ahem) is to select the highest enabled combination
2681 * Speed and duplex should be r/w when autoneg is off.
2683 if ((vrp->param.anadv_en &
2684 MII_ABILITY_100BASE_TX_FD) != 0) {
2685 vrp->chip.mii.control |= MII_CONTROL_100MB;
2686 vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2687 } else if ((vrp->param.anadv_en &
2688 MII_ABILITY_100BASE_TX) != 0) {
2689 vrp->chip.mii.control |= MII_CONTROL_100MB;
2690 vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2691 } else if ((vrp->param.anadv_en &
2692 MII_ABILITY_10BASE_T_FD) != 0) {
2693 vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2694 vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2695 } else {
2696 vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2697 vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2701 * Write the control register.
2703 vr_phy_write(vrp, MII_CONTROL, vrp->chip.mii.control);
2706 * With autoneg off we cannot rely on the link_change interrupt for
2707 * for getting the status into the driver.
2709 if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
2710 vr_link_state(vrp);
2711 mac_link_update(vrp->machdl,
2712 (link_state_t)vrp->chip.link.state);
2717 * Get link state in the driver and configure the MAC accordingly.
2719 static void
2720 vr_link_state(vr_t *vrp)
2722 uint16_t mask;
2724 ASSERT(mutex_owned(&vrp->oplock));
2726 vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
2727 vr_phy_read(vrp, MII_CONTROL, &vrp->chip.mii.control);
2728 vr_phy_read(vrp, MII_AN_ADVERT, &vrp->chip.mii.anadv);
2729 vr_phy_read(vrp, MII_AN_LPABLE, &vrp->chip.mii.lpable);
2730 vr_phy_read(vrp, MII_AN_EXPANSION, &vrp->chip.mii.anexp);
2733 * If we did autongeg, deduce the link type/speed by selecting the
2734 * highest common denominator.
2736 if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2737 mask = vrp->chip.mii.anadv & vrp->chip.mii.lpable;
2738 if ((mask & MII_ABILITY_100BASE_TX_FD) != 0) {
2739 vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2740 vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2741 vrp->chip.link.mau = VR_MAU_100X;
2742 } else if ((mask & MII_ABILITY_100BASE_T4) != 0) {
2743 vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2744 vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2745 vrp->chip.link.mau = VR_MAU_100T4;
2746 } else if ((mask & MII_ABILITY_100BASE_TX) != 0) {
2747 vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2748 vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2749 vrp->chip.link.mau = VR_MAU_100X;
2750 } else if ((mask & MII_ABILITY_10BASE_T_FD) != 0) {
2751 vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2752 vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2753 vrp->chip.link.mau = VR_MAU_10;
2754 } else if ((mask & MII_ABILITY_10BASE_T) != 0) {
2755 vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2756 vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2757 vrp->chip.link.mau = VR_MAU_10;
2758 } else {
2759 vrp->chip.link.speed = VR_LINK_SPEED_UNKNOWN;
2760 vrp->chip.link.duplex = VR_LINK_DUPLEX_UNKNOWN;
2761 vrp->chip.link.mau = VR_MAU_UNKNOWN;
2765 * Did we negotiate pause?
2767 if ((mask & MII_ABILITY_PAUSE) != 0 &&
2768 vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL)
2769 vrp->chip.link.flowctrl = VR_PAUSE_BIDIRECTIONAL;
2770 else
2771 vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2774 * Did either one detect a AN fault?
2776 if ((vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0)
2777 vr_log(vrp, CE_WARN,
2778 "AN remote fault reported by LP.");
2780 if ((vrp->chip.mii.lpable & MII_AN_ADVERT_REMFAULT) != 0)
2781 vr_log(vrp, CE_WARN, "AN remote fault caused for LP.");
2782 } else {
2784 * We didn't autoneg
2785 * The link type is defined by the control register.
2787 if ((vrp->chip.mii.control & MII_CONTROL_100MB) != 0) {
2788 vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2789 vrp->chip.link.mau = VR_MAU_100X;
2790 } else {
2791 vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2792 vrp->chip.link.mau = VR_MAU_10;
2795 if ((vrp->chip.mii.control & MII_CONTROL_FDUPLEX) != 0)
2796 vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2797 else {
2798 vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2800 * No pause on HDX links.
2802 vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2807 * Set the duplex mode on the MAC according to that of the PHY.
2809 if (vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL) {
2810 VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2812 * Enable packet queueing on FDX links.
2814 if ((vrp->chip.info.bugs & VR_BUG_NO_TXQUEUEING) == 0)
2815 VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2816 } else {
2817 VR_CLRBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2819 * Disable packet queueing on HDX links. With queueing enabled,
2820 * this MAC get's lost after a TX abort (too many colisions).
2822 VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2826 * Set pause options on the MAC.
2828 if (vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
2830 * All of our MAC's can receive pause frames.
2832 VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXRFEN);
2835 * VT6105 and above can transmit pause frames.
2837 if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2839 * Set the number of available receive descriptors
2840 * Non-zero values written to this register are added
2841 * to the register's contents. Careful: Writing zero
2842 * clears the register and thus causes a (long) pause
2843 * request.
2845 VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT,
2846 MIN(vrp->rx.ndesc, 0xFF) -
2847 VR_GET8(vrp->acc_reg,
2848 VR_FCR0_RXBUFCOUNT));
2851 * Request pause when we have 4 descs left.
2853 VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2854 VR_FCR1_PAUSEONBITS, VR_FCR1_PAUSEON_04);
2857 * Cancel the pause when there are 24 descriptors again.
2859 VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2860 VR_FCR1_PAUSEOFFBITS, VR_FCR1_PAUSEOFF_24);
2863 * Request a pause of FFFF bit-times. This long pause
2864 * is cancelled when the high watermark is reached.
2866 VR_PUT16(vrp->acc_reg, VR_FCR2_PAUSE, 0xFFFF);
2869 * Enable flow control on the MAC.
2871 VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXTFEN);
2872 VR_SETBIT8(vrp->acc_reg, VR_FCR1, VR_FCR1_FD_RX_EN |
2873 VR_FCR1_FD_TX_EN | VR_FCR1_XONXOFF_EN);
2875 } else {
2877 * Turn flow control OFF.
2879 VR_CLRBIT8(vrp->acc_reg,
2880 VR_MISC0, VR_MISC0_FDXRFEN | VR_MISC0_FDXTFEN);
2881 if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2882 VR_CLRBIT8(vrp->acc_reg, VR_FCR1,
2883 VR_FCR1_FD_RX_EN | VR_FCR1_FD_TX_EN |
2884 VR_FCR1_XONXOFF_EN);
2889 * Set link state.
2891 if ((vrp->chip.mii.status & MII_STATUS_LINKUP) != 0)
2892 vrp->chip.link.state = VR_LINK_STATE_UP;
2893 else
2894 vrp->chip.link.state = VR_LINK_STATE_DOWN;
2898 * The PHY is automatically polled by the MAC once per 1024 MD clock cycles
2899 * MD is clocked once per 960ns so polling happens about every 1M ns, some
2900 * 1000 times per second
2901 * This polling process is required for the functionality of the link change
2902 * interrupt. Polling process must be disabled in order to access PHY registers
2903 * using MDIO
2905 * Turn off PHY polling so that the PHY registers can be accessed.
2907 static void
2908 vr_phy_autopoll_disable(vr_t *vrp)
2910 uint32_t time;
2911 uint8_t miicmd, miiaddr;
2914 * Special procedure to stop the autopolling.
2916 if ((vrp->chip.info.bugs & VR_BUG_MIIPOLLSTOP) != 0) {
2918 * If polling is enabled.
2920 miicmd = VR_GET8(vrp->acc_reg, VR_MIICMD);
2921 if ((miicmd & VR_MIICMD_MD_AUTO) != 0) {
2923 * Wait for the end of a cycle (mdone set).
2925 time = 0;
2926 do {
2927 drv_usecwait(10);
2928 if (time >= VR_MMI_WAITMAX) {
2929 vr_log(vrp, CE_WARN,
2930 "Timeout in "
2931 "disable MII polling");
2932 break;
2934 time += VR_MMI_WAITINCR;
2935 miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2936 } while ((miiaddr & VR_MIIADDR_MDONE) == 0);
2939 * Once paused, we can disable autopolling.
2941 VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2942 } else {
2944 * Turn off MII polling.
2946 VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2949 * Wait for MIDLE in MII address register.
2951 time = 0;
2952 do {
2953 drv_usecwait(VR_MMI_WAITINCR);
2954 if (time >= VR_MMI_WAITMAX) {
2955 vr_log(vrp, CE_WARN,
2956 "Timeout in disable MII polling");
2957 break;
2959 time += VR_MMI_WAITINCR;
2960 miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2961 } while ((miiaddr & VR_MIIADDR_MIDLE) == 0);
2966 * Turn on PHY polling. PHY's registers cannot be accessed.
2968 static void
2969 vr_phy_autopoll_enable(vr_t *vrp)
2971 uint32_t time;
2973 VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2974 VR_PUT8(vrp->acc_reg, VR_MIIADDR, MII_STATUS|VR_MIIADDR_MAUTO);
2975 VR_PUT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_AUTO);
2978 * Wait for the polling process to finish.
2980 time = 0;
2981 do {
2982 drv_usecwait(VR_MMI_WAITINCR);
2983 if (time >= VR_MMI_WAITMAX) {
2984 vr_log(vrp, CE_NOTE, "Timeout in enable MII polling");
2985 break;
2987 time += VR_MMI_WAITINCR;
2988 } while ((VR_GET8(vrp->acc_reg, VR_MIIADDR) & VR_MIIADDR_MDONE) == 0);
2991 * Initiate a polling.
2993 VR_SETBIT8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_MAUTO);
2997 * Read a register from the PHY using MDIO.
2999 static void
3000 vr_phy_read(vr_t *vrp, int offset, uint16_t *value)
3002 uint32_t time;
3004 vr_phy_autopoll_disable(vrp);
3007 * Write the register number to the lower 5 bits of the MII address
3008 * register.
3010 VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3013 * Write a READ command to the MII control register
3014 * This bit will be cleared when the read is finished.
3016 VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_READ);
3019 * Wait until the read is done.
3021 time = 0;
3022 do {
3023 drv_usecwait(VR_MMI_WAITINCR);
3024 if (time >= VR_MMI_WAITMAX) {
3025 vr_log(vrp, CE_NOTE, "Timeout in MII read command");
3026 break;
3028 time += VR_MMI_WAITINCR;
3029 } while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_READ) != 0);
3031 *value = VR_GET16(vrp->acc_reg, VR_MIIDATA);
3032 vr_phy_autopoll_enable(vrp);
3036 * Write to a PHY's register.
3038 static void
3039 vr_phy_write(vr_t *vrp, int offset, uint16_t value)
3041 uint32_t time;
3043 vr_phy_autopoll_disable(vrp);
3046 * Write the register number to the MII address register.
3048 VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3051 * Write the value to the data register.
3053 VR_PUT16(vrp->acc_reg, VR_MIIDATA, value);
3056 * Issue the WRITE command to the command register.
3057 * This bit will be cleared when the write is finished.
3059 VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_WRITE);
3061 time = 0;
3062 do {
3063 drv_usecwait(VR_MMI_WAITINCR);
3064 if (time >= VR_MMI_WAITMAX) {
3065 vr_log(vrp, CE_NOTE, "Timeout in MII write command");
3066 break;
3068 time += VR_MMI_WAITINCR;
3069 } while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_WRITE) != 0);
3070 vr_phy_autopoll_enable(vrp);
3074 * Initialize and install some private kstats.
3076 typedef struct {
3077 char *name;
3078 uchar_t type;
3079 } vr_kstat_t;
3081 static const vr_kstat_t vr_driver_stats [] = {
3082 {"allocbfail", KSTAT_DATA_INT32},
3083 {"intr_claimed", KSTAT_DATA_INT64},
3084 {"intr_unclaimed", KSTAT_DATA_INT64},
3085 {"linkchanges", KSTAT_DATA_INT64},
3086 {"txnfree", KSTAT_DATA_INT32},
3087 {"txstalls", KSTAT_DATA_INT32},
3088 {"resets", KSTAT_DATA_INT32},
3089 {"txreclaims", KSTAT_DATA_INT64},
3090 {"txreclaim0", KSTAT_DATA_INT64},
3091 {"cyclics", KSTAT_DATA_INT64},
3092 {"txchecks", KSTAT_DATA_INT64},
3095 static void
3096 vr_kstats_init(vr_t *vrp)
3098 kstat_t *ksp;
3099 struct kstat_named *knp;
3100 int i;
3101 int nstats;
3103 nstats = sizeof (vr_driver_stats) / sizeof (vr_kstat_t);
3105 ksp = kstat_create(MODULENAME, ddi_get_instance(vrp->devinfo),
3106 "driver", "net", KSTAT_TYPE_NAMED, nstats, 0);
3108 if (ksp == NULL)
3109 vr_log(vrp, CE_WARN, "kstat_create failed");
3111 ksp->ks_update = vr_update_kstats;
3112 ksp->ks_private = (void*) vrp;
3113 knp = ksp->ks_data;
3115 for (i = 0; i < nstats; i++, knp++) {
3116 kstat_named_init(knp, vr_driver_stats[i].name,
3117 vr_driver_stats[i].type);
3119 kstat_install(ksp);
3120 vrp->ksp = ksp;
3123 static int
3124 vr_update_kstats(kstat_t *ksp, int access)
3126 vr_t *vrp;
3127 struct kstat_named *knp;
3129 vrp = (vr_t *)ksp->ks_private;
3130 knp = ksp->ks_data;
3132 if (access != KSTAT_READ)
3133 return (EACCES);
3135 (knp++)->value.ui32 = vrp->stats.allocbfail;
3136 (knp++)->value.ui64 = vrp->stats.intr_claimed;
3137 (knp++)->value.ui64 = vrp->stats.intr_unclaimed;
3138 (knp++)->value.ui64 = vrp->stats.linkchanges;
3139 (knp++)->value.ui32 = vrp->tx.nfree;
3140 (knp++)->value.ui32 = vrp->stats.txstalls;
3141 (knp++)->value.ui32 = vrp->stats.resets;
3142 (knp++)->value.ui64 = vrp->stats.txreclaims;
3143 (knp++)->value.ui64 = vrp->stats.txreclaim0;
3144 (knp++)->value.ui64 = vrp->stats.cyclics;
3145 (knp++)->value.ui64 = vrp->stats.txchecks;
3146 return (0);
3150 * Remove 'private' kstats.
3152 static void
3153 vr_remove_kstats(vr_t *vrp)
3155 if (vrp->ksp != NULL)
3156 kstat_delete(vrp->ksp);
3160 * Get a property of the device/driver
3161 * Remarks:
3162 * - pr_val is always an integer of size pr_valsize
3163 * - ENABLED (EN) is what is configured via dladm
3164 * - ADVERTISED (ADV) is ENABLED minus constraints, like PHY/MAC capabilities
3165 * - DEFAULT are driver- and hardware defaults (DEFAULT is implemented as a
3166 * flag in pr_flags instead of MAC_PROP_DEFAULT_)
3167 * - perm is the permission printed on ndd -get /.. \?
3170 vr_mac_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3171 uint_t pr_valsize, void *pr_val)
3173 vr_t *vrp;
3174 uint32_t err;
3175 uint64_t val;
3177 /* Since we have no private properties */
3178 _NOTE(ARGUNUSED(pr_name))
3180 err = 0;
3181 vrp = (vr_t *)arg;
3182 switch (pr_num) {
3183 case MAC_PROP_ADV_1000FDX_CAP:
3184 case MAC_PROP_ADV_1000HDX_CAP:
3185 case MAC_PROP_EN_1000FDX_CAP:
3186 case MAC_PROP_EN_1000HDX_CAP:
3187 val = 0;
3188 break;
3190 case MAC_PROP_ADV_100FDX_CAP:
3191 val = (vrp->chip.mii.anadv &
3192 MII_ABILITY_100BASE_TX_FD) != 0;
3193 break;
3195 case MAC_PROP_ADV_100HDX_CAP:
3196 val = (vrp->chip.mii.anadv &
3197 MII_ABILITY_100BASE_TX) != 0;
3198 break;
3200 case MAC_PROP_ADV_100T4_CAP:
3201 val = (vrp->chip.mii.anadv &
3202 MII_ABILITY_100BASE_T4) != 0;
3203 break;
3205 case MAC_PROP_ADV_10FDX_CAP:
3206 val = (vrp->chip.mii.anadv &
3207 MII_ABILITY_10BASE_T_FD) != 0;
3208 break;
3210 case MAC_PROP_ADV_10HDX_CAP:
3211 val = (vrp->chip.mii.anadv &
3212 MII_ABILITY_10BASE_T) != 0;
3213 break;
3215 case MAC_PROP_AUTONEG:
3216 val = (vrp->chip.mii.control &
3217 MII_CONTROL_ANE) != 0;
3218 break;
3220 case MAC_PROP_DUPLEX:
3221 val = vrp->chip.link.duplex;
3222 break;
3224 case MAC_PROP_EN_100FDX_CAP:
3225 val = (vrp->param.anadv_en &
3226 MII_ABILITY_100BASE_TX_FD) != 0;
3227 break;
3229 case MAC_PROP_EN_100HDX_CAP:
3230 val = (vrp->param.anadv_en &
3231 MII_ABILITY_100BASE_TX) != 0;
3232 break;
3234 case MAC_PROP_EN_100T4_CAP:
3235 val = (vrp->param.anadv_en &
3236 MII_ABILITY_100BASE_T4) != 0;
3237 break;
3239 case MAC_PROP_EN_10FDX_CAP:
3240 val = (vrp->param.anadv_en &
3241 MII_ABILITY_10BASE_T_FD) != 0;
3242 break;
3244 case MAC_PROP_EN_10HDX_CAP:
3245 val = (vrp->param.anadv_en &
3246 MII_ABILITY_10BASE_T) != 0;
3247 break;
3249 case MAC_PROP_EN_AUTONEG:
3250 val = vrp->param.an_en == VR_LINK_AUTONEG_ON;
3251 break;
3253 case MAC_PROP_FLOWCTRL:
3254 val = vrp->chip.link.flowctrl;
3255 break;
3257 case MAC_PROP_MTU:
3258 val = vrp->param.mtu;
3259 break;
3261 case MAC_PROP_SPEED:
3262 if (vrp->chip.link.speed ==
3263 VR_LINK_SPEED_100MBS)
3264 val = 100 * 1000 * 1000;
3265 else if (vrp->chip.link.speed ==
3266 VR_LINK_SPEED_10MBS)
3267 val = 10 * 1000 * 1000;
3268 else
3269 val = 0;
3270 break;
3272 case MAC_PROP_STATUS:
3273 val = vrp->chip.link.state;
3274 break;
3276 default:
3277 err = ENOTSUP;
3278 break;
3281 if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3282 if (pr_valsize == sizeof (uint64_t))
3283 *(uint64_t *)pr_val = val;
3284 else if (pr_valsize == sizeof (uint32_t))
3285 *(uint32_t *)pr_val = val;
3286 else if (pr_valsize == sizeof (uint16_t))
3287 *(uint16_t *)pr_val = val;
3288 else if (pr_valsize == sizeof (uint8_t))
3289 *(uint8_t *)pr_val = val;
3290 else
3291 err = EINVAL;
3293 return (err);
3296 void
3297 vr_mac_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3298 mac_prop_info_handle_t prh)
3300 vr_t *vrp = (vr_t *)arg;
3301 uint8_t val, perm;
3303 /* Since we have no private properties */
3304 _NOTE(ARGUNUSED(pr_name))
3306 switch (pr_num) {
3307 case MAC_PROP_ADV_1000FDX_CAP:
3308 case MAC_PROP_ADV_1000HDX_CAP:
3309 case MAC_PROP_EN_1000FDX_CAP:
3310 case MAC_PROP_EN_1000HDX_CAP:
3311 case MAC_PROP_ADV_100FDX_CAP:
3312 case MAC_PROP_ADV_100HDX_CAP:
3313 case MAC_PROP_ADV_100T4_CAP:
3314 case MAC_PROP_ADV_10FDX_CAP:
3315 case MAC_PROP_ADV_10HDX_CAP:
3316 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3317 return;
3319 case MAC_PROP_EN_100FDX_CAP:
3320 val = (vrp->chip.mii.status &
3321 MII_STATUS_100_BASEX_FD) != 0;
3322 break;
3324 case MAC_PROP_EN_100HDX_CAP:
3325 val = (vrp->chip.mii.status &
3326 MII_STATUS_100_BASEX) != 0;
3327 break;
3329 case MAC_PROP_EN_100T4_CAP:
3330 val = (vrp->chip.mii.status &
3331 MII_STATUS_100_BASE_T4) != 0;
3332 break;
3334 case MAC_PROP_EN_10FDX_CAP:
3335 val = (vrp->chip.mii.status &
3336 MII_STATUS_10_FD) != 0;
3337 break;
3339 case MAC_PROP_EN_10HDX_CAP:
3340 val = (vrp->chip.mii.status &
3341 MII_STATUS_10) != 0;
3342 break;
3344 case MAC_PROP_AUTONEG:
3345 case MAC_PROP_EN_AUTONEG:
3346 val = (vrp->chip.mii.status &
3347 MII_STATUS_CANAUTONEG) != 0;
3348 break;
3350 case MAC_PROP_FLOWCTRL:
3351 mac_prop_info_set_default_link_flowctrl(prh,
3352 LINK_FLOWCTRL_BI);
3353 return;
3355 case MAC_PROP_MTU:
3356 mac_prop_info_set_range_uint32(prh,
3357 ETHERMTU, ETHERMTU);
3358 return;
3360 case MAC_PROP_DUPLEX:
3362 * Writability depends on autoneg.
3364 perm = ((vrp->chip.mii.control &
3365 MII_CONTROL_ANE) == 0) ? MAC_PROP_PERM_RW :
3366 MAC_PROP_PERM_READ;
3367 mac_prop_info_set_perm(prh, perm);
3369 if (perm == MAC_PROP_PERM_RW) {
3370 mac_prop_info_set_default_uint8(prh,
3371 VR_LINK_DUPLEX_FULL);
3373 return;
3375 case MAC_PROP_SPEED:
3376 perm = ((vrp->chip.mii.control &
3377 MII_CONTROL_ANE) == 0) ?
3378 MAC_PROP_PERM_RW : MAC_PROP_PERM_READ;
3379 mac_prop_info_set_perm(prh, perm);
3381 if (perm == MAC_PROP_PERM_RW) {
3382 mac_prop_info_set_default_uint64(prh,
3383 100 * 1000 * 1000);
3385 return;
3387 case MAC_PROP_STATUS:
3388 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3389 return;
3391 default:
3392 return;
3395 mac_prop_info_set_default_uint8(prh, val);
3399 * Set a property of the device.
3402 vr_mac_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3403 uint_t pr_valsize, const void *pr_val)
3405 vr_t *vrp;
3406 uint32_t err;
3407 uint64_t val;
3409 /* Since we have no private properties */
3410 _NOTE(ARGUNUSED(pr_name))
3412 err = 0;
3413 vrp = (vr_t *)arg;
3414 mutex_enter(&vrp->oplock);
3417 * The current set of public property values are passed as integers
3418 * Private properties are passed as strings in pr_val length pr_valsize.
3420 if (pr_num != MAC_PROP_PRIVATE) {
3421 if (pr_valsize == sizeof (uint64_t))
3422 val = *(uint64_t *)pr_val;
3423 else if (pr_valsize == sizeof (uint32_t))
3424 val = *(uint32_t *)pr_val;
3425 else if (pr_valsize == sizeof (uint16_t))
3426 val = *(uint32_t *)pr_val;
3427 else if (pr_valsize == sizeof (uint8_t))
3428 val = *(uint8_t *)pr_val;
3429 else {
3430 mutex_exit(&vrp->oplock);
3431 return (EINVAL);
3435 switch (pr_num) {
3436 case MAC_PROP_DUPLEX:
3437 if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
3438 if (val == LINK_DUPLEX_FULL)
3439 vrp->chip.mii.control |=
3440 MII_CONTROL_FDUPLEX;
3441 else if (val == LINK_DUPLEX_HALF)
3442 vrp->chip.mii.control &=
3443 ~MII_CONTROL_FDUPLEX;
3444 else
3445 err = EINVAL;
3446 } else
3447 err = EINVAL;
3448 break;
3450 case MAC_PROP_EN_100FDX_CAP:
3451 if (val == 0)
3452 vrp->param.anadv_en &=
3453 ~MII_ABILITY_100BASE_TX_FD;
3454 else
3455 vrp->param.anadv_en |=
3456 MII_ABILITY_100BASE_TX_FD;
3457 break;
3459 case MAC_PROP_EN_100HDX_CAP:
3460 if (val == 0)
3461 vrp->param.anadv_en &=
3462 ~MII_ABILITY_100BASE_TX;
3463 else
3464 vrp->param.anadv_en |=
3465 MII_ABILITY_100BASE_TX;
3466 break;
3468 case MAC_PROP_EN_100T4_CAP:
3469 if (val == 0)
3470 vrp->param.anadv_en &=
3471 ~MII_ABILITY_100BASE_T4;
3472 else
3473 vrp->param.anadv_en |=
3474 MII_ABILITY_100BASE_T4;
3475 break;
3477 case MAC_PROP_EN_10FDX_CAP:
3478 if (val == 0)
3479 vrp->param.anadv_en &=
3480 ~MII_ABILITY_10BASE_T_FD;
3481 else
3482 vrp->param.anadv_en |=
3483 MII_ABILITY_10BASE_T_FD;
3484 break;
3486 case MAC_PROP_EN_10HDX_CAP:
3487 if (val == 0)
3488 vrp->param.anadv_en &=
3489 ~MII_ABILITY_10BASE_T;
3490 else
3491 vrp->param.anadv_en |=
3492 MII_ABILITY_10BASE_T;
3493 break;
3495 case MAC_PROP_AUTONEG:
3496 case MAC_PROP_EN_AUTONEG:
3497 if (val == 0) {
3498 vrp->param.an_en = VR_LINK_AUTONEG_OFF;
3499 vrp->chip.mii.control &= ~MII_CONTROL_ANE;
3500 } else {
3501 vrp->param.an_en = VR_LINK_AUTONEG_ON;
3502 if ((vrp->chip.mii.status &
3503 MII_STATUS_CANAUTONEG) != 0)
3504 vrp->chip.mii.control |=
3505 MII_CONTROL_ANE;
3506 else
3507 err = EINVAL;
3509 break;
3511 case MAC_PROP_FLOWCTRL:
3512 if (val == LINK_FLOWCTRL_NONE)
3513 vrp->param.anadv_en &= ~MII_ABILITY_PAUSE;
3514 else if (val == LINK_FLOWCTRL_BI)
3515 vrp->param.anadv_en |= MII_ABILITY_PAUSE;
3516 else
3517 err = EINVAL;
3518 break;
3520 case MAC_PROP_MTU:
3521 if (val >= ETHERMIN && val <= ETHERMTU)
3522 vrp->param.mtu = (uint32_t)val;
3523 else
3524 err = EINVAL;
3525 break;
3527 case MAC_PROP_SPEED:
3528 if (val == 10 * 1000 * 1000)
3529 vrp->chip.link.speed =
3530 VR_LINK_SPEED_10MBS;
3531 else if (val == 100 * 1000 * 1000)
3532 vrp->chip.link.speed =
3533 VR_LINK_SPEED_100MBS;
3534 else
3535 err = EINVAL;
3536 break;
3538 default:
3539 err = ENOTSUP;
3540 break;
3542 if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3543 vrp->chip.mii.anadv = vrp->param.anadv_en &
3544 (vrp->param.an_phymask & vrp->param.an_macmask);
3545 vr_link_init(vrp);
3547 mutex_exit(&vrp->oplock);
3548 return (err);
3553 * Logging and debug functions.
3555 static struct {
3556 kmutex_t mutex[1];
3557 const char *ifname;
3558 const char *fmt;
3559 int level;
3560 } prtdata;
3562 static void
3563 vr_vprt(const char *fmt, va_list args)
3565 char buf[512];
3567 ASSERT(mutex_owned(prtdata.mutex));
3568 (void) vsnprintf(buf, sizeof (buf), fmt, args);
3569 cmn_err(prtdata.level, prtdata.fmt, prtdata.ifname, buf);
3572 static void
3573 vr_log(vr_t *vrp, int level, const char *fmt, ...)
3575 va_list args;
3577 mutex_enter(prtdata.mutex);
3578 prtdata.ifname = vrp->ifname;
3579 prtdata.fmt = "!%s: %s";
3580 prtdata.level = level;
3582 va_start(args, fmt);
3583 vr_vprt(fmt, args);
3584 va_end(args);
3586 mutex_exit(prtdata.mutex);
3589 #if defined(DEBUG)
3590 static void
3591 vr_prt(const char *fmt, ...)
3593 va_list args;
3595 ASSERT(mutex_owned(prtdata.mutex));
3597 va_start(args, fmt);
3598 vr_vprt(fmt, args);
3599 va_end(args);
3601 mutex_exit(prtdata.mutex);
3604 void
3605 (*vr_debug())(const char *fmt, ...)
3607 mutex_enter(prtdata.mutex);
3608 prtdata.ifname = MODULENAME;
3609 prtdata.fmt = "^%s: %s\n";
3610 prtdata.level = CE_CONT;
3612 return (vr_prt);
3614 #endif /* DEBUG */
3616 DDI_DEFINE_STREAM_OPS(vr_dev_ops, nulldev, nulldev, vr_attach, vr_detach,
3617 nodev, NULL, D_MP, NULL, vr_quiesce);
3619 static struct modldrv vr_modldrv = {
3620 &mod_driverops, /* Type of module. This one is a driver */
3621 vr_ident, /* short description */
3622 &vr_dev_ops /* driver specific ops */
3625 static struct modlinkage modlinkage = {
3626 MODREV_1, (void *)&vr_modldrv, NULL
3630 _info(struct modinfo *modinfop)
3632 return (mod_info(&modlinkage, modinfop));
3636 _init(void)
3638 int status;
3640 mac_init_ops(&vr_dev_ops, MODULENAME);
3641 status = mod_install(&modlinkage);
3642 if (status == DDI_SUCCESS)
3643 mutex_init(prtdata.mutex, NULL, MUTEX_DRIVER, NULL);
3644 else
3645 mac_fini_ops(&vr_dev_ops);
3646 return (status);
3650 _fini(void)
3652 int status;
3654 status = mod_remove(&modlinkage);
3655 if (status == 0) {
3656 mac_fini_ops(&vr_dev_ops);
3657 mutex_destroy(prtdata.mutex);
3659 return (status);