4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
26 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
28 #include <sys/nxge/nxge_impl.h>
29 #include <sys/nxge/nxge_hio.h>
30 #include <sys/nxge/nxge_rxdma.h>
33 uint32_t nxge_use_partition
= 0; /* debug partition flag */
34 uint32_t nxge_dma_obp_props_only
= 1; /* use obp published props */
35 uint32_t nxge_use_rdc_intr
= 1; /* debug to assign rdc intr */
37 * PSARC/2007/453 MSI-X interrupt limit override
39 uint32_t nxge_msi_enable
= 2;
42 * Software workaround for a Neptune (PCI-E)
43 * hardware interrupt bug which the hardware
44 * may generate spurious interrupts after the
45 * device interrupt handler was removed. If this flag
46 * is enabled, the driver will reset the
47 * hardware when devices are being detached.
49 uint32_t nxge_peu_reset_enable
= 0;
52 * Software workaround for the hardware
53 * checksum bugs that affect packet transmission
56 * Usage of nxge_cksum_offload:
58 * (1) nxge_cksum_offload = 0 (default):
59 * - transmits packets:
60 * TCP: uses the hardware checksum feature.
61 * UDP: driver will compute the software checksum
62 * based on the partial checksum computed
65 * TCP: marks packets checksum flags based on hardware result.
66 * UDP: will not mark checksum flags.
68 * (2) nxge_cksum_offload = 1:
70 * TCP/UDP: uses the hardware checksum feature.
72 * TCP/UDP: marks packet checksum flags based on hardware result.
74 * (3) nxge_cksum_offload = 2:
75 * - The driver will not register its checksum capability.
76 * Checksum for both TCP and UDP will be computed
78 * - The software LSO is not allowed in this case.
80 * (4) nxge_cksum_offload > 2:
81 * - Will be treated as it is set to 2
82 * (stack will compute the checksum).
84 * (5) If the hardware bug is fixed, this workaround
85 * needs to be updated accordingly to reflect
86 * the new hardware revision.
88 uint32_t nxge_cksum_offload
= 0;
91 * Globals: tunable parameters (/etc/system or adb)
94 uint32_t nxge_rbr_size
= NXGE_RBR_RBB_DEFAULT
;
95 uint32_t nxge_rbr_spare_size
= 0;
96 uint32_t nxge_rcr_size
= NXGE_RCR_DEFAULT
;
97 uint16_t nxge_rdc_buf_offset
= SW_OFFSET_NO_OFFSET
;
98 uint32_t nxge_tx_ring_size
= NXGE_TX_RING_DEFAULT
;
99 boolean_t nxge_no_msg
= B_TRUE
; /* control message display */
100 uint32_t nxge_no_link_notify
= 0; /* control DL_NOTIFY */
101 uint32_t nxge_bcopy_thresh
= TX_BCOPY_MAX
;
102 uint32_t nxge_dvma_thresh
= TX_FASTDVMA_MIN
;
103 uint32_t nxge_dma_stream_thresh
= TX_STREAM_MIN
;
104 uint32_t nxge_jumbo_mtu
= TX_JUMBO_MTU
;
105 nxge_tx_mode_t nxge_tx_scheme
= NXGE_USE_SERIAL
;
108 #define NXGE_LSO_MAXLEN 65535
109 uint32_t nxge_lso_max
= NXGE_LSO_MAXLEN
;
113 * Add tunable to reduce the amount of time spent in the
114 * ISR doing Rx Processing.
116 uint32_t nxge_max_rx_pkts
= 1024;
119 * Tunables to manage the receive buffer blocks.
121 * nxge_rx_threshold_hi: copy all buffers.
122 * nxge_rx_bcopy_size_type: receive buffer block size type.
123 * nxge_rx_threshold_lo: copy only up to tunable block size type.
125 nxge_rxbuf_threshold_t nxge_rx_threshold_hi
= NXGE_RX_COPY_6
;
126 nxge_rxbuf_type_t nxge_rx_buf_size_type
= RCR_PKTBUFSZ_0
;
127 nxge_rxbuf_threshold_t nxge_rx_threshold_lo
= NXGE_RX_COPY_3
;
129 /* Use kmem_alloc() to allocate data buffers. */
131 uint32_t nxge_use_kmem_alloc
= 1;
132 #elif defined(__i386)
133 uint32_t nxge_use_kmem_alloc
= 0;
135 uint32_t nxge_use_kmem_alloc
= 1;
138 rtrace_t npi_rtracebuf
;
141 * The hardware sometimes fails to allow enough time for the link partner
142 * to send an acknowledgement for packets that the hardware sent to it. The
143 * hardware resends the packets earlier than it should be in those instances.
144 * This behavior caused some switches to acknowledge the wrong packets
145 * and it triggered the fatal error.
146 * This software workaround is to set the replay timer to a value
147 * suggested by the hardware team.
149 * PCI config space replay timer register:
150 * The following replay timeout value is 0xc
153 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8
154 #define PCI_REPLAY_TIMEOUT_SHIFT 14
156 uint32_t nxge_set_replay_timer
= 1;
157 uint32_t nxge_replay_timeout
= 0xc;
160 * The transmit serialization sometimes causes
161 * longer sleep before calling the driver transmit
162 * function as it sleeps longer than it should.
163 * The performace group suggests that a time wait tunable
164 * can be used to set the maximum wait time when needed
165 * and the default is set to 1 tick.
167 uint32_t nxge_tx_serial_maxsleep
= 1;
171 * Hypervisor N2/NIU services information.
174 * The following is the default API supported:
175 * major 1 and minor 1.
177 * Please update the MAX_NIU_MAJORS,
178 * MAX_NIU_MINORS, and minor number supported
179 * when the newer Hypervior API interfaces
180 * are added. Also, please update nxge_hsvc_register()
183 static hsvc_info_t niu_hsvc
= {
184 HSVC_REV_1
, NULL
, HSVC_GROUP_NIU
, NIU_MAJOR_VER
,
185 NIU_MINOR_VER
, "nxge"
188 static int nxge_hsvc_register(p_nxge_t
);
192 * Function Prototypes
194 static int nxge_attach(dev_info_t
*, ddi_attach_cmd_t
);
195 static int nxge_detach(dev_info_t
*, ddi_detach_cmd_t
);
196 static void nxge_unattach(p_nxge_t
);
197 static int nxge_quiesce(dev_info_t
*);
200 static void nxge_remove_hard_properties(p_nxge_t
);
204 * These two functions are required by nxge_hio.c
206 extern int nxge_m_mmac_remove(void *arg
, int slot
);
207 extern void nxge_grp_cleanup(p_nxge_t nxge
);
209 static nxge_status_t
nxge_setup_system_dma_pages(p_nxge_t
);
211 static nxge_status_t
nxge_setup_mutexes(p_nxge_t
);
212 static void nxge_destroy_mutexes(p_nxge_t
);
214 static nxge_status_t
nxge_map_regs(p_nxge_t nxgep
);
215 static void nxge_unmap_regs(p_nxge_t nxgep
);
217 static void nxge_test_map_regs(p_nxge_t nxgep
);
220 static nxge_status_t
nxge_add_intrs(p_nxge_t nxgep
);
221 static void nxge_remove_intrs(p_nxge_t nxgep
);
223 static nxge_status_t
nxge_add_intrs_adv(p_nxge_t nxgep
);
224 static nxge_status_t
nxge_add_intrs_adv_type(p_nxge_t
, uint32_t);
225 static nxge_status_t
nxge_add_intrs_adv_type_fix(p_nxge_t
, uint32_t);
226 static void nxge_intrs_enable(p_nxge_t nxgep
);
227 static void nxge_intrs_disable(p_nxge_t nxgep
);
229 static void nxge_suspend(p_nxge_t
);
230 static nxge_status_t
nxge_resume(p_nxge_t
);
232 static nxge_status_t
nxge_setup_dev(p_nxge_t
);
233 static void nxge_destroy_dev(p_nxge_t
);
235 static nxge_status_t
nxge_alloc_mem_pool(p_nxge_t
);
236 static void nxge_free_mem_pool(p_nxge_t
);
238 nxge_status_t
nxge_alloc_rx_mem_pool(p_nxge_t
);
239 static void nxge_free_rx_mem_pool(p_nxge_t
);
241 nxge_status_t
nxge_alloc_tx_mem_pool(p_nxge_t
);
242 static void nxge_free_tx_mem_pool(p_nxge_t
);
244 static nxge_status_t
nxge_dma_mem_alloc(p_nxge_t
, dma_method_t
,
245 struct ddi_dma_attr
*,
246 size_t, ddi_device_acc_attr_t
*, uint_t
,
247 p_nxge_dma_common_t
);
249 static void nxge_dma_mem_free(p_nxge_dma_common_t
);
250 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t
);
252 static nxge_status_t
nxge_alloc_rx_buf_dma(p_nxge_t
, uint16_t,
253 p_nxge_dma_common_t
*, size_t, size_t, uint32_t *);
254 static void nxge_free_rx_buf_dma(p_nxge_t
, p_nxge_dma_common_t
, uint32_t);
256 static nxge_status_t
nxge_alloc_rx_cntl_dma(p_nxge_t
, uint16_t,
257 p_nxge_dma_common_t
*, size_t);
258 static void nxge_free_rx_cntl_dma(p_nxge_t
, p_nxge_dma_common_t
);
260 extern nxge_status_t
nxge_alloc_tx_buf_dma(p_nxge_t
, uint16_t,
261 p_nxge_dma_common_t
*, size_t, size_t, uint32_t *);
262 static void nxge_free_tx_buf_dma(p_nxge_t
, p_nxge_dma_common_t
, uint32_t);
264 extern nxge_status_t
nxge_alloc_tx_cntl_dma(p_nxge_t
, uint16_t,
265 p_nxge_dma_common_t
*,
267 static void nxge_free_tx_cntl_dma(p_nxge_t
, p_nxge_dma_common_t
);
269 static int nxge_init_common_dev(p_nxge_t
);
270 static void nxge_uninit_common_dev(p_nxge_t
);
271 extern int nxge_param_set_mac(p_nxge_t
, queue_t
*, mblk_t
*,
274 extern nxge_status_t
nxge_hio_rdc_enable(p_nxge_t nxgep
);
275 extern nxge_status_t
nxge_hio_rdc_intr_arm(p_nxge_t nxge
, boolean_t arm
);
279 * The next declarations are for the GLDv3 interface.
281 static int nxge_m_start(void *);
282 static void nxge_m_stop(void *);
283 static int nxge_m_multicst(void *, boolean_t
, const uint8_t *);
284 static int nxge_m_promisc(void *, boolean_t
);
285 static void nxge_m_ioctl(void *, queue_t
*, mblk_t
*);
286 nxge_status_t
nxge_mac_register(p_nxge_t
);
287 static int nxge_altmac_set(p_nxge_t nxgep
, uint8_t *mac_addr
,
288 int slot
, int rdctbl
, boolean_t usetbl
);
289 void nxge_mmac_kstat_update(p_nxge_t nxgep
, int slot
,
292 static void nxge_m_getfactaddr(void *, uint_t
, uint8_t *);
293 static boolean_t
nxge_m_getcapab(void *, mac_capab_t
, void *);
294 static int nxge_m_setprop(void *, const char *, mac_prop_id_t
,
295 uint_t
, const void *);
296 static int nxge_m_getprop(void *, const char *, mac_prop_id_t
,
298 static void nxge_m_propinfo(void *, const char *, mac_prop_id_t
,
299 mac_prop_info_handle_t
);
300 static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t
);
301 static int nxge_set_priv_prop(nxge_t
*, const char *, uint_t
,
303 static int nxge_get_priv_prop(nxge_t
*, const char *, uint_t
, void *);
304 static void nxge_fill_ring(void *, mac_ring_type_t
, const int, const int,
305 mac_ring_info_t
*, mac_ring_handle_t
);
306 static void nxge_group_add_ring(mac_group_driver_t
, mac_ring_driver_t
,
308 static void nxge_group_rem_ring(mac_group_driver_t
, mac_ring_driver_t
,
311 static void nxge_niu_peu_reset(p_nxge_t nxgep
);
312 static void nxge_set_pci_replay_timeout(nxge_t
*);
314 char *nxge_priv_props
[] = {
323 "_class_opt_ipv4_tcp",
324 "_class_opt_ipv4_udp",
325 "_class_opt_ipv4_ah",
326 "_class_opt_ipv4_sctp",
327 "_class_opt_ipv6_tcp",
328 "_class_opt_ipv6_udp",
329 "_class_opt_ipv6_ah",
330 "_class_opt_ipv6_sctp",
335 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL
336 #define MAX_DUMP_SZ 256
338 #define NXGE_M_CALLBACK_FLAGS \
339 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
341 mac_callbacks_t nxge_m_callbacks
= {
342 NXGE_M_CALLBACK_FLAGS
,
361 nxge_err_inject(p_nxge_t
, queue_t
*, mblk_t
*);
363 /* PSARC/2007/453 MSI-X interrupt limit override. */
364 #define NXGE_MSIX_REQUEST_10G 8
365 #define NXGE_MSIX_REQUEST_1G 2
366 static int nxge_create_msi_property(p_nxge_t
);
368 * For applications that care about the
369 * latency, it was requested by PAE and the
370 * customers that the driver has tunables that
371 * allow the user to tune it to a higher number
372 * interrupts to spread the interrupts among
373 * multiple channels. The DDI framework limits
374 * the maximum number of MSI-X resources to allocate
375 * to 8 (ddi_msix_alloc_limit). If more than 8
376 * is set, ddi_msix_alloc_limit must be set accordingly.
377 * The default number of MSI interrupts are set to
378 * 8 for 10G and 2 for 1G link.
380 #define NXGE_MSIX_MAX_ALLOWED 32
381 uint32_t nxge_msix_10g_intrs
= NXGE_MSIX_REQUEST_10G
;
382 uint32_t nxge_msix_1g_intrs
= NXGE_MSIX_REQUEST_1G
;
385 * These global variables control the message
388 out_dbgmsg_t nxge_dbgmsg_out
= DBG_CONSOLE
| STR_LOG
;
389 uint64_t nxge_debug_level
;
392 * This list contains the instance structures for the Neptune
393 * devices present in the system. The lock exists to guarantee
394 * mutually exclusive access to the list.
396 void *nxge_list
= NULL
;
397 void *nxge_hw_list
= NULL
;
398 nxge_os_mutex_t nxge_common_lock
;
399 nxge_os_mutex_t nxgedebuglock
;
401 extern uint64_t npi_debug_level
;
403 extern nxge_status_t
nxge_ldgv_init(p_nxge_t
, int *, int *);
404 extern nxge_status_t
nxge_ldgv_init_n2(p_nxge_t
, int *, int *);
405 extern nxge_status_t
nxge_ldgv_uninit(p_nxge_t
);
406 extern nxge_status_t
nxge_intr_ldgv_init(p_nxge_t
);
407 extern void nxge_fm_init(p_nxge_t
,
408 ddi_device_acc_attr_t
*,
410 extern void nxge_fm_fini(p_nxge_t
);
411 extern npi_status_t
npi_mac_altaddr_disable(npi_handle_t
, uint8_t, uint8_t);
414 * Count used to maintain the number of buffers being used
415 * by Neptune instances and loaned up to the upper layers.
417 uint32_t nxge_mblks_pending
= 0;
420 * Device register access attributes for PIO.
422 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr
= {
424 DDI_STRUCTURE_LE_ACC
,
430 * Device descriptor access attributes for DMA.
432 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr
= {
434 DDI_STRUCTURE_LE_ACC
,
439 * Device buffer access attributes for DMA.
441 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr
= {
443 DDI_STRUCTURE_BE_ACC
,
447 ddi_dma_attr_t nxge_desc_dma_attr
= {
448 DMA_ATTR_V0
, /* version number. */
450 0xffffffffffffffff, /* high address */
451 0xffffffffffffffff, /* address counter max */
452 #ifndef NIU_PA_WORKAROUND
453 0x100000, /* alignment */
457 0xfc00fc, /* dlim_burstsizes */
458 0x1, /* minimum transfer size */
459 0xffffffffffffffff, /* maximum transfer size */
460 0xffffffffffffffff, /* maximum segment size */
461 1, /* scatter/gather list length */
462 (unsigned int) 1, /* granularity */
463 0 /* attribute flags */
466 ddi_dma_attr_t nxge_tx_dma_attr
= {
467 DMA_ATTR_V0
, /* version number. */
469 0xffffffffffffffff, /* high address */
470 0xffffffffffffffff, /* address counter max */
471 #if defined(_BIG_ENDIAN)
472 0x2000, /* alignment */
474 0x1000, /* alignment */
476 0xfc00fc, /* dlim_burstsizes */
477 0x1, /* minimum transfer size */
478 0xffffffffffffffff, /* maximum transfer size */
479 0xffffffffffffffff, /* maximum segment size */
480 5, /* scatter/gather list length */
481 (unsigned int) 1, /* granularity */
482 0 /* attribute flags */
485 ddi_dma_attr_t nxge_rx_dma_attr
= {
486 DMA_ATTR_V0
, /* version number. */
488 0xffffffffffffffff, /* high address */
489 0xffffffffffffffff, /* address counter max */
490 0x2000, /* alignment */
491 0xfc00fc, /* dlim_burstsizes */
492 0x1, /* minimum transfer size */
493 0xffffffffffffffff, /* maximum transfer size */
494 0xffffffffffffffff, /* maximum segment size */
495 1, /* scatter/gather list length */
496 (unsigned int) 1, /* granularity */
497 DDI_DMA_RELAXED_ORDERING
/* attribute flags */
500 ddi_dma_lim_t nxge_dma_limits
= {
501 (uint_t
)0, /* dlim_addr_lo */
502 (uint_t
)0xffffffff, /* dlim_addr_hi */
503 (uint_t
)0xffffffff, /* dlim_cntr_max */
504 (uint_t
)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
505 0x1, /* dlim_minxfer */
506 1024 /* dlim_speed */
509 dma_method_t nxge_force_dma
= DVMA
;
514 * Try to allocate the largest possible size
515 * so that fewer number of dma chunks would be managed
517 #ifdef NIU_PA_WORKAROUND
518 size_t alloc_sizes
[] = {0x2000};
520 size_t alloc_sizes
[] = {0x1000, 0x2000, 0x4000, 0x8000,
521 0x10000, 0x20000, 0x40000, 0x80000,
522 0x100000, 0x200000, 0x400000, 0x800000,
523 0x1000000, 0x2000000, 0x4000000};
527 * Translate "dev_t" to a pointer to the associated "dev_info_t".
530 extern void nxge_get_environs(nxge_t
*);
533 nxge_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
535 p_nxge_t nxgep
= NULL
;
537 int status
= DDI_SUCCESS
;
539 nxge_mmac_t
*mmac_info
;
541 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_attach"));
544 * Get the device instance since we'll need to setup
545 * or retrieve a soft state for this instance.
547 instance
= ddi_get_instance(dip
);
551 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "doing DDI_ATTACH"));
555 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "doing DDI_RESUME"));
556 nxgep
= (p_nxge_t
)ddi_get_soft_state(nxge_list
, instance
);
558 status
= DDI_FAILURE
;
561 if (nxgep
->dip
!= dip
) {
562 status
= DDI_FAILURE
;
565 if (nxgep
->suspended
== DDI_PM_SUSPEND
) {
566 status
= ddi_dev_is_needed(nxgep
->dip
, 0, 1);
568 status
= nxge_resume(nxgep
);
570 goto nxge_attach_exit
;
573 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "doing DDI_PM_RESUME"));
574 nxgep
= (p_nxge_t
)ddi_get_soft_state(nxge_list
, instance
);
576 status
= DDI_FAILURE
;
579 if (nxgep
->dip
!= dip
) {
580 status
= DDI_FAILURE
;
583 status
= nxge_resume(nxgep
);
584 goto nxge_attach_exit
;
587 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "doing unknown"));
588 status
= DDI_FAILURE
;
589 goto nxge_attach_exit
;
593 if (ddi_soft_state_zalloc(nxge_list
, instance
) == DDI_FAILURE
) {
594 status
= DDI_FAILURE
;
595 goto nxge_attach_exit
;
598 nxgep
= ddi_get_soft_state(nxge_list
, instance
);
601 goto nxge_attach_fail2
;
604 nxgep
->nxge_magic
= NXGE_MAGIC
;
606 nxgep
->drv_state
= 0;
608 nxgep
->instance
= instance
;
609 nxgep
->p_dip
= ddi_get_parent(dip
);
610 nxgep
->nxge_debug_level
= nxge_debug_level
;
611 npi_debug_level
= nxge_debug_level
;
613 /* Are we a guest running in a Hybrid I/O environment? */
614 nxge_get_environs(nxgep
);
616 status
= nxge_map_regs(nxgep
);
618 if (status
!= NXGE_OK
) {
619 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "nxge_map_regs failed"));
620 goto nxge_attach_fail3
;
623 nxge_fm_init(nxgep
, &nxge_dev_reg_acc_attr
, &nxge_rx_dma_attr
);
625 /* Create & initialize the per-Neptune data structure */
626 /* (even if we're a guest). */
627 status
= nxge_init_common_dev(nxgep
);
628 if (status
!= NXGE_OK
) {
629 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
630 "nxge_init_common_dev failed"));
631 goto nxge_attach_fail4
;
635 * Software workaround: set the replay timer.
637 if (nxgep
->niu_type
!= N2_NIU
) {
638 nxge_set_pci_replay_timeout(nxgep
);
642 /* This is required by nxge_hio_init(), which follows. */
643 if ((status
= nxge_hsvc_register(nxgep
)) != DDI_SUCCESS
)
644 goto nxge_attach_fail4
;
647 if ((status
= nxge_hio_init(nxgep
)) != NXGE_OK
) {
648 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
649 "nxge_hio_init failed"));
650 goto nxge_attach_fail4
;
653 if (nxgep
->niu_type
== NEPTUNE_2_10GF
) {
654 if (nxgep
->function_num
> 1) {
655 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "Unsupported"
656 " function %d. Only functions 0 and 1 are "
657 "supported for this card.", nxgep
->function_num
));
659 goto nxge_attach_fail4
;
663 if (isLDOMguest(nxgep
)) {
665 * Use the function number here.
667 nxgep
->mac
.portnum
= nxgep
->function_num
;
668 nxgep
->mac
.porttype
= PORT_TYPE_LOGICAL
;
670 /* XXX We'll set the MAC address counts to 1 for now. */
671 mmac_info
= &nxgep
->nxge_mmac_info
;
672 mmac_info
->num_mmac
= 1;
673 mmac_info
->naddrfree
= 1;
675 portn
= NXGE_GET_PORT_NUM(nxgep
->function_num
);
676 nxgep
->mac
.portnum
= portn
;
677 if ((portn
== 0) || (portn
== 1))
678 nxgep
->mac
.porttype
= PORT_TYPE_XMAC
;
680 nxgep
->mac
.porttype
= PORT_TYPE_BMAC
;
682 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
683 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
684 * The two types of MACs have different characterizations.
686 mmac_info
= &nxgep
->nxge_mmac_info
;
687 if (nxgep
->function_num
< 2) {
688 mmac_info
->num_mmac
= XMAC_MAX_ALT_ADDR_ENTRY
;
689 mmac_info
->naddrfree
= XMAC_MAX_ALT_ADDR_ENTRY
;
691 mmac_info
->num_mmac
= BMAC_MAX_ALT_ADDR_ENTRY
;
692 mmac_info
->naddrfree
= BMAC_MAX_ALT_ADDR_ENTRY
;
696 * Setup the Ndd parameters for the this instance.
698 nxge_init_param(nxgep
);
701 * Setup Register Tracing Buffer.
703 npi_rtrace_buf_init((rtrace_t
*)&npi_rtracebuf
);
706 nxge_init_statsp(nxgep
);
709 * Copy the vpd info from eeprom to a local data
710 * structure, and then check its validity.
712 if (!isLDOMguest(nxgep
)) {
717 nxge_vpd_info_get(nxgep
);
719 /* Find the NIU config handle. */
720 rv
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
,
721 ddi_get_parent(nxgep
->dip
), DDI_PROP_DONTPASS
,
722 "reg", ®p
, ®len
);
724 if (rv
!= DDI_PROP_SUCCESS
) {
725 goto nxge_attach_fail5
;
728 * The address_hi, that is the first int, in the reg
729 * property consists of config handle, but need to remove
730 * the bits 28-31 which are OBP specific info.
732 nxgep
->niu_cfg_hdl
= (*regp
) & 0xFFFFFFF;
737 * Set the defaults for the MTU size.
739 nxge_hw_id_init(nxgep
);
741 if (isLDOMguest(nxgep
)) {
744 uint32_t max_frame_size
;
746 extern void nxge_get_logical_props(p_nxge_t
);
748 nxgep
->statsp
->mac_stats
.xcvr_inuse
= LOGICAL_XCVR
;
749 nxgep
->mac
.portmode
= PORT_LOGICAL
;
750 (void) ddi_prop_update_string(DDI_DEV_T_NONE
, nxgep
->dip
,
751 "phy-type", "virtual transceiver");
754 nxgep
->board_ver
= 0; /* XXX What? */
757 * local-mac-address property gives us info on which
758 * specific MAC address the Hybrid resource is associated
761 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY
, nxgep
->dip
, 0,
762 "local-mac-address", &prop_val
,
763 &prop_len
) != DDI_PROP_SUCCESS
) {
764 goto nxge_attach_fail5
;
766 if (prop_len
!= ETHERADDRL
) {
767 ddi_prop_free(prop_val
);
768 goto nxge_attach_fail5
;
770 ether_copy(prop_val
, nxgep
->hio_mac_addr
);
771 ddi_prop_free(prop_val
);
772 nxge_get_logical_props(nxgep
);
775 * Enable Jumbo property based on the "max-frame-size"
778 max_frame_size
= ddi_prop_get_int(DDI_DEV_T_ANY
,
779 nxgep
->dip
, DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
,
780 "max-frame-size", NXGE_MTU_DEFAULT_MAX
);
781 if ((max_frame_size
> NXGE_MTU_DEFAULT_MAX
) &&
782 (max_frame_size
<= TX_JUMBO_MTU
)) {
783 nxgep
->mac
.is_jumbo
= B_TRUE
;
784 nxgep
->mac
.maxframesize
= (uint16_t)max_frame_size
;
785 nxgep
->mac
.default_mtu
= nxgep
->mac
.maxframesize
-
786 NXGE_EHEADER_VLAN_CRC
;
789 status
= nxge_xcvr_find(nxgep
);
791 if (status
!= NXGE_OK
) {
792 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "nxge_attach: "
793 " Couldn't determine card type"
795 goto nxge_attach_fail5
;
798 status
= nxge_get_config_properties(nxgep
);
800 if (status
!= NXGE_OK
) {
801 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
802 "get_hw create failed"));
803 goto nxge_attach_fail
;
808 * Setup the Kstats for the driver.
810 nxge_setup_kstats(nxgep
);
812 if (!isLDOMguest(nxgep
))
813 nxge_setup_param(nxgep
);
815 status
= nxge_setup_system_dma_pages(nxgep
);
816 if (status
!= NXGE_OK
) {
817 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "set dma page failed"));
818 goto nxge_attach_fail
;
822 if (!isLDOMguest(nxgep
))
823 nxge_hw_init_niu_common(nxgep
);
825 status
= nxge_setup_mutexes(nxgep
);
826 if (status
!= NXGE_OK
) {
827 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "set mutex failed"));
828 goto nxge_attach_fail
;
832 if (isLDOMguest(nxgep
)) {
833 /* Find our VR & channel sets. */
834 status
= nxge_hio_vr_add(nxgep
);
835 if (status
!= DDI_SUCCESS
) {
836 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
837 "nxge_hio_vr_add failed"));
838 (void) hsvc_unregister(&nxgep
->niu_hsvc
);
839 nxgep
->niu_hsvc_available
= B_FALSE
;
840 goto nxge_attach_fail
;
842 goto nxge_attach_exit
;
846 status
= nxge_setup_dev(nxgep
);
847 if (status
!= DDI_SUCCESS
) {
848 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "set dev failed"));
849 goto nxge_attach_fail
;
852 status
= nxge_add_intrs(nxgep
);
853 if (status
!= DDI_SUCCESS
) {
854 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "add_intr failed"));
855 goto nxge_attach_fail
;
858 /* If a guest, register with vio_net instead. */
859 if ((status
= nxge_mac_register(nxgep
)) != NXGE_OK
) {
860 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
861 "unable to register to mac layer (%d)", status
));
862 goto nxge_attach_fail
;
865 mac_link_update(nxgep
->mach
, LINK_STATE_UNKNOWN
);
867 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
868 "registered to mac (instance %d)", instance
));
870 /* nxge_link_monitor calls xcvr.check_link recursively */
871 (void) nxge_link_monitor(nxgep
, LINK_MONITOR_START
);
873 goto nxge_attach_exit
;
876 nxge_unattach(nxgep
);
877 goto nxge_attach_fail1
;
881 * Tear down the ndd parameters setup.
883 nxge_destroy_param(nxgep
);
886 * Tear down the kstat setup.
888 nxge_destroy_kstats(nxgep
);
891 if (nxgep
->nxge_hw_p
) {
892 nxge_uninit_common_dev(nxgep
);
893 nxgep
->nxge_hw_p
= NULL
;
898 * Unmap the register setup.
900 nxge_unmap_regs(nxgep
);
905 ddi_soft_state_free(nxge_list
, nxgep
->instance
);
908 if (status
!= NXGE_OK
)
909 status
= (NXGE_ERROR
| NXGE_DDI_FAILED
);
913 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_attach status = 0x%08x",
920 nxge_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
922 int status
= DDI_SUCCESS
;
924 p_nxge_t nxgep
= NULL
;
926 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_detach"));
927 instance
= ddi_get_instance(dip
);
928 nxgep
= ddi_get_soft_state(nxge_list
, instance
);
930 status
= DDI_FAILURE
;
931 goto nxge_detach_exit
;
936 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "doing DDI_DETACH"));
940 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "doing DDI_PM_SUSPEND"));
941 nxgep
->suspended
= DDI_PM_SUSPEND
;
946 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "doing DDI_SUSPEND"));
947 if (nxgep
->suspended
!= DDI_PM_SUSPEND
) {
948 nxgep
->suspended
= DDI_SUSPEND
;
954 status
= DDI_FAILURE
;
957 if (cmd
!= DDI_DETACH
)
958 goto nxge_detach_exit
;
961 * Stop the xcvr polling.
963 nxgep
->suspended
= cmd
;
965 (void) nxge_link_monitor(nxgep
, LINK_MONITOR_STOP
);
967 if (nxgep
->mach
&& (status
= mac_unregister(nxgep
->mach
)) != 0) {
968 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
969 "<== nxge_detach status = 0x%08X", status
));
970 return (DDI_FAILURE
);
973 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
974 "<== nxge_detach (mac_unregister) status = 0x%08X", status
));
976 nxge_unattach(nxgep
);
980 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_detach status = 0x%08X",
987 nxge_unattach(p_nxge_t nxgep
)
989 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_unattach"));
991 if (nxgep
== NULL
|| nxgep
->dev_regs
== NULL
) {
995 nxgep
->nxge_magic
= 0;
997 if (nxgep
->nxge_timerid
) {
998 nxge_stop_timer(nxgep
, nxgep
->nxge_timerid
);
999 nxgep
->nxge_timerid
= 0;
1003 * If this flag is set, it will affect the Neptune
1006 if ((nxgep
->niu_type
!= N2_NIU
) && nxge_peu_reset_enable
) {
1007 nxge_niu_peu_reset(nxgep
);
1011 if (isLDOMguest(nxgep
)) {
1012 (void) nxge_hio_vr_release(nxgep
);
1016 if (nxgep
->nxge_hw_p
) {
1017 nxge_uninit_common_dev(nxgep
);
1018 nxgep
->nxge_hw_p
= NULL
;
1022 if (nxgep
->niu_type
== N2_NIU
&& nxgep
->niu_hsvc_available
== B_TRUE
) {
1023 (void) hsvc_unregister(&nxgep
->niu_hsvc
);
1024 nxgep
->niu_hsvc_available
= B_FALSE
;
1028 * Stop any further interrupts.
1030 nxge_remove_intrs(nxgep
);
1033 * Stop the device and free resources.
1035 if (!isLDOMguest(nxgep
)) {
1036 nxge_destroy_dev(nxgep
);
1040 * Tear down the ndd parameters setup.
1042 nxge_destroy_param(nxgep
);
1045 * Tear down the kstat setup.
1047 nxge_destroy_kstats(nxgep
);
1050 * Free any memory allocated for PHY properties
1052 if (nxgep
->phy_prop
.cnt
> 0) {
1053 KMEM_FREE(nxgep
->phy_prop
.arr
,
1054 sizeof (nxge_phy_mdio_val_t
) * nxgep
->phy_prop
.cnt
);
1055 nxgep
->phy_prop
.cnt
= 0;
1059 * Destroy all mutexes.
1061 nxge_destroy_mutexes(nxgep
);
1064 * Remove the list of ndd parameters which
1065 * were setup during attach.
1068 NXGE_DEBUG_MSG((nxgep
, OBP_CTL
,
1069 " nxge_unattach: remove all properties"));
1071 (void) ddi_prop_remove_all(nxgep
->dip
);
1075 nxge_remove_hard_properties(nxgep
);
1079 * Unmap the register setup.
1081 nxge_unmap_regs(nxgep
);
1083 nxge_fm_fini(nxgep
);
1085 ddi_soft_state_free(nxge_list
, nxgep
->instance
);
1087 NXGE_DEBUG_MSG((NULL
, DDI_CTL
, "<== nxge_unattach"));
1092 nxge_hsvc_register(nxge_t
*nxgep
)
1094 nxge_status_t status
;
1097 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_hsvc_register"));
1098 if (nxgep
->niu_type
!= N2_NIU
) {
1099 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_hsvc_register"));
1100 return (DDI_SUCCESS
);
1104 * Currently, the NIU Hypervisor API supports two major versions:
1106 * If Hypervisor introduces a higher major or minor version,
1107 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly.
1109 nxgep
->niu_hsvc_available
= B_FALSE
;
1110 bcopy(&niu_hsvc
, &nxgep
->niu_hsvc
,
1111 sizeof (hsvc_info_t
));
1113 for (i
= NIU_MAJOR_HI
; i
> 0; i
--) {
1114 nxgep
->niu_hsvc
.hsvc_major
= i
;
1115 for (j
= NIU_MINOR_HI
; j
>= 0; j
--) {
1116 nxgep
->niu_hsvc
.hsvc_minor
= j
;
1117 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1118 "nxge_hsvc_register: %s: negotiating "
1119 "hypervisor services revision %d "
1120 "group: 0x%lx major: 0x%lx "
1122 nxgep
->niu_hsvc
.hsvc_modname
,
1123 nxgep
->niu_hsvc
.hsvc_rev
,
1124 nxgep
->niu_hsvc
.hsvc_group
,
1125 nxgep
->niu_hsvc
.hsvc_major
,
1126 nxgep
->niu_hsvc
.hsvc_minor
,
1127 nxgep
->niu_min_ver
));
1129 if ((status
= hsvc_register(&nxgep
->niu_hsvc
,
1130 &nxgep
->niu_min_ver
)) == 0) {
1131 /* Use the supported minor */
1132 nxgep
->niu_hsvc
.hsvc_minor
= nxgep
->niu_min_ver
;
1133 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1134 "nxge_hsvc_register: %s: negotiated "
1135 "hypervisor services revision %d "
1136 "group: 0x%lx major: 0x%lx "
1137 "minor: 0x%lx (niu_min_ver 0x%lx)",
1138 nxgep
->niu_hsvc
.hsvc_modname
,
1139 nxgep
->niu_hsvc
.hsvc_rev
,
1140 nxgep
->niu_hsvc
.hsvc_group
,
1141 nxgep
->niu_hsvc
.hsvc_major
,
1142 nxgep
->niu_hsvc
.hsvc_minor
,
1143 nxgep
->niu_min_ver
));
1145 nxgep
->niu_hsvc_available
= B_TRUE
;
1146 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1147 "<== nxge_hsvc_register: "
1148 "NIU Hypervisor service enabled"));
1149 return (DDI_SUCCESS
);
1152 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1153 "nxge_hsvc_register: %s: negotiated failed - "
1154 "try lower major number "
1155 "hypervisor services revision %d "
1156 "group: 0x%lx major: 0x%lx minor: 0x%lx "
1158 nxgep
->niu_hsvc
.hsvc_modname
,
1159 nxgep
->niu_hsvc
.hsvc_rev
,
1160 nxgep
->niu_hsvc
.hsvc_group
,
1161 nxgep
->niu_hsvc
.hsvc_major
,
1162 nxgep
->niu_hsvc
.hsvc_minor
, status
));
1166 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1167 "nxge_hsvc_register: %s: cannot negotiate "
1168 "hypervisor services revision %d group: 0x%lx "
1169 "major: 0x%lx minor: 0x%lx errno: %d",
1170 niu_hsvc
.hsvc_modname
, niu_hsvc
.hsvc_rev
,
1171 niu_hsvc
.hsvc_group
, niu_hsvc
.hsvc_major
,
1172 niu_hsvc
.hsvc_minor
, status
));
1174 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1175 "<== nxge_hsvc_register: Register to NIU Hypervisor failed"));
1177 return (DDI_FAILURE
);
1181 static char n2_siu_name
[] = "niu";
1183 static nxge_status_t
1184 nxge_map_regs(p_nxge_t nxgep
)
1186 int ddi_status
= DDI_SUCCESS
;
1187 p_dev_regs_t dev_regs
;
1188 char buf
[MAXPATHLEN
+ 1];
1194 nxge_status_t status
= NXGE_OK
;
1195 #if !defined(_BIG_ENDIAN)
1197 uint16_t pcie_devctl
;
1200 if (isLDOMguest(nxgep
)) {
1201 return (nxge_guest_regs_map(nxgep
));
1204 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_map_regs"));
1205 nxgep
->dev_regs
= NULL
;
1206 dev_regs
= KMEM_ZALLOC(sizeof (dev_regs_t
), KM_SLEEP
);
1207 dev_regs
->nxge_regh
= NULL
;
1208 dev_regs
->nxge_pciregh
= NULL
;
1209 dev_regs
->nxge_msix_regh
= NULL
;
1210 dev_regs
->nxge_vir_regh
= NULL
;
1211 dev_regs
->nxge_vir2_regh
= NULL
;
1212 nxgep
->niu_type
= NIU_TYPE_NONE
;
1214 devname
= ddi_pathname(nxgep
->dip
, buf
);
1215 ASSERT(strlen(devname
) > 0);
1216 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1217 "nxge_map_regs: pathname devname %s", devname
));
1220 * The driver is running on a N2-NIU system if devname is something
1221 * like "/niu@80/network@0"
1223 if (strstr(devname
, n2_siu_name
)) {
1225 nxgep
->niu_type
= N2_NIU
;
1226 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1227 "nxge_map_regs: N2/NIU devname %s", devname
));
1229 * Get function number:
1230 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1"
1232 nxgep
->function_num
=
1233 (devname
[strlen(devname
) -1] == '1' ? 1 : 0);
1234 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1235 "nxge_map_regs: N2/NIU function number %d",
1236 nxgep
->function_num
));
1242 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, nxgep
->dip
,
1244 &prop_val
, &prop_len
) != DDI_PROP_SUCCESS
) {
1245 NXGE_DEBUG_MSG((nxgep
, VPD_CTL
,
1246 "Reg property not found"));
1247 ddi_status
= DDI_FAILURE
;
1248 goto nxge_map_regs_fail0
;
1251 func_num
= (prop_val
[0] >> 8) & 0x7;
1252 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1253 "Reg property found: fun # %d",
1255 nxgep
->function_num
= func_num
;
1256 if (isLDOMguest(nxgep
)) {
1257 nxgep
->function_num
/= 2;
1260 ddi_prop_free(prop_val
);
1264 switch (nxgep
->niu_type
) {
1266 (void) ddi_dev_regsize(nxgep
->dip
, 0, ®size
);
1267 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1268 "nxge_map_regs: pci config size 0x%x", regsize
));
1270 ddi_status
= ddi_regs_map_setup(nxgep
->dip
, 0,
1271 (caddr_t
*)&(dev_regs
->nxge_pciregp
), 0, 0,
1272 &nxge_dev_reg_acc_attr
, &dev_regs
->nxge_pciregh
);
1273 if (ddi_status
!= DDI_SUCCESS
) {
1274 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1275 "ddi_map_regs, nxge bus config regs failed"));
1276 goto nxge_map_regs_fail0
;
1278 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1279 "nxge_map_reg: PCI config addr 0x%0llx "
1280 " handle 0x%0llx", dev_regs
->nxge_pciregp
,
1281 dev_regs
->nxge_pciregh
));
1284 * workaround for bit swapping bug in HW
1285 * which ends up in no-snoop = yes
1286 * resulting, in DMA not synched properly
1288 #if !defined(_BIG_ENDIAN)
1289 /* workarounds for x86 systems */
1290 pci_offset
= 0x80 + PCIE_DEVCTL
;
1291 pcie_devctl
= pci_config_get16(dev_regs
->nxge_pciregh
,
1293 pcie_devctl
&= ~PCIE_DEVCTL_ENABLE_NO_SNOOP
;
1294 pcie_devctl
|= PCIE_DEVCTL_RO_EN
;
1295 pci_config_put16(dev_regs
->nxge_pciregh
, pci_offset
,
1299 (void) ddi_dev_regsize(nxgep
->dip
, 1, ®size
);
1300 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1301 "nxge_map_regs: pio size 0x%x", regsize
));
1302 /* set up the device mapped register */
1303 ddi_status
= ddi_regs_map_setup(nxgep
->dip
, 1,
1304 (caddr_t
*)&(dev_regs
->nxge_regp
), 0, 0,
1305 &nxge_dev_reg_acc_attr
, &dev_regs
->nxge_regh
);
1306 if (ddi_status
!= DDI_SUCCESS
) {
1307 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1308 "ddi_map_regs for Neptune global reg failed"));
1309 goto nxge_map_regs_fail1
;
1312 /* set up the msi/msi-x mapped register */
1313 (void) ddi_dev_regsize(nxgep
->dip
, 2, ®size
);
1314 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1315 "nxge_map_regs: msix size 0x%x", regsize
));
1316 ddi_status
= ddi_regs_map_setup(nxgep
->dip
, 2,
1317 (caddr_t
*)&(dev_regs
->nxge_msix_regp
), 0, 0,
1318 &nxge_dev_reg_acc_attr
, &dev_regs
->nxge_msix_regh
);
1319 if (ddi_status
!= DDI_SUCCESS
) {
1320 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1321 "ddi_map_regs for msi reg failed"));
1322 goto nxge_map_regs_fail2
;
1325 /* set up the vio region mapped register */
1326 (void) ddi_dev_regsize(nxgep
->dip
, 3, ®size
);
1327 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1328 "nxge_map_regs: vio size 0x%x", regsize
));
1329 ddi_status
= ddi_regs_map_setup(nxgep
->dip
, 3,
1330 (caddr_t
*)&(dev_regs
->nxge_vir_regp
), 0, 0,
1331 &nxge_dev_reg_acc_attr
, &dev_regs
->nxge_vir_regh
);
1333 if (ddi_status
!= DDI_SUCCESS
) {
1334 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1335 "ddi_map_regs for nxge vio reg failed"));
1336 goto nxge_map_regs_fail3
;
1338 nxgep
->dev_regs
= dev_regs
;
1340 NPI_PCI_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_pciregh
);
1341 NPI_PCI_ADD_HANDLE_SET(nxgep
,
1342 (npi_reg_ptr_t
)dev_regs
->nxge_pciregp
);
1343 NPI_MSI_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_msix_regh
);
1344 NPI_MSI_ADD_HANDLE_SET(nxgep
,
1345 (npi_reg_ptr_t
)dev_regs
->nxge_msix_regp
);
1347 NPI_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_regh
);
1348 NPI_ADD_HANDLE_SET(nxgep
, (npi_reg_ptr_t
)dev_regs
->nxge_regp
);
1350 NPI_REG_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_regh
);
1351 NPI_REG_ADD_HANDLE_SET(nxgep
,
1352 (npi_reg_ptr_t
)dev_regs
->nxge_regp
);
1354 NPI_VREG_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_vir_regh
);
1355 NPI_VREG_ADD_HANDLE_SET(nxgep
,
1356 (npi_reg_ptr_t
)dev_regs
->nxge_vir_regp
);
1361 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "ddi_map_regs, NIU"));
1363 * Set up the device mapped register (FWARC 2006/556)
1364 * (changed back to 1: reg starts at 1!)
1366 (void) ddi_dev_regsize(nxgep
->dip
, 1, ®size
);
1367 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1368 "nxge_map_regs: dev size 0x%x", regsize
));
1369 ddi_status
= ddi_regs_map_setup(nxgep
->dip
, 1,
1370 (caddr_t
*)&(dev_regs
->nxge_regp
), 0, 0,
1371 &nxge_dev_reg_acc_attr
, &dev_regs
->nxge_regh
);
1373 if (ddi_status
!= DDI_SUCCESS
) {
1374 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1375 "ddi_map_regs for N2/NIU, global reg failed "));
1376 goto nxge_map_regs_fail1
;
1379 /* set up the first vio region mapped register */
1380 (void) ddi_dev_regsize(nxgep
->dip
, 2, ®size
);
1381 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1382 "nxge_map_regs: vio (1) size 0x%x", regsize
));
1383 ddi_status
= ddi_regs_map_setup(nxgep
->dip
, 2,
1384 (caddr_t
*)&(dev_regs
->nxge_vir_regp
), 0, 0,
1385 &nxge_dev_reg_acc_attr
, &dev_regs
->nxge_vir_regh
);
1387 if (ddi_status
!= DDI_SUCCESS
) {
1388 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1389 "ddi_map_regs for nxge vio reg failed"));
1390 goto nxge_map_regs_fail2
;
1392 /* set up the second vio region mapped register */
1393 (void) ddi_dev_regsize(nxgep
->dip
, 3, ®size
);
1394 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1395 "nxge_map_regs: vio (3) size 0x%x", regsize
));
1396 ddi_status
= ddi_regs_map_setup(nxgep
->dip
, 3,
1397 (caddr_t
*)&(dev_regs
->nxge_vir2_regp
), 0, 0,
1398 &nxge_dev_reg_acc_attr
, &dev_regs
->nxge_vir2_regh
);
1400 if (ddi_status
!= DDI_SUCCESS
) {
1401 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1402 "ddi_map_regs for nxge vio2 reg failed"));
1403 goto nxge_map_regs_fail3
;
1405 nxgep
->dev_regs
= dev_regs
;
1407 NPI_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_regh
);
1408 NPI_ADD_HANDLE_SET(nxgep
, (npi_reg_ptr_t
)dev_regs
->nxge_regp
);
1410 NPI_REG_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_regh
);
1411 NPI_REG_ADD_HANDLE_SET(nxgep
,
1412 (npi_reg_ptr_t
)dev_regs
->nxge_regp
);
1414 NPI_VREG_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_vir_regh
);
1415 NPI_VREG_ADD_HANDLE_SET(nxgep
,
1416 (npi_reg_ptr_t
)dev_regs
->nxge_vir_regp
);
1418 NPI_V2REG_ACC_HANDLE_SET(nxgep
, dev_regs
->nxge_vir2_regh
);
1419 NPI_V2REG_ADD_HANDLE_SET(nxgep
,
1420 (npi_reg_ptr_t
)dev_regs
->nxge_vir2_regp
);
1425 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "nxge_map_reg: hardware addr 0x%0llx "
1426 " handle 0x%0llx", dev_regs
->nxge_regp
, dev_regs
->nxge_regh
));
1428 goto nxge_map_regs_exit
;
1429 nxge_map_regs_fail3
:
1430 if (dev_regs
->nxge_msix_regh
) {
1431 ddi_regs_map_free(&dev_regs
->nxge_msix_regh
);
1433 if (dev_regs
->nxge_vir_regh
) {
1434 ddi_regs_map_free(&dev_regs
->nxge_regh
);
1436 nxge_map_regs_fail2
:
1437 if (dev_regs
->nxge_regh
) {
1438 ddi_regs_map_free(&dev_regs
->nxge_regh
);
1440 nxge_map_regs_fail1
:
1441 if (dev_regs
->nxge_pciregh
) {
1442 ddi_regs_map_free(&dev_regs
->nxge_pciregh
);
1444 nxge_map_regs_fail0
:
1445 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "Freeing register set memory"));
1446 kmem_free(dev_regs
, sizeof (dev_regs_t
));
1449 if (ddi_status
!= DDI_SUCCESS
)
1450 status
|= (NXGE_ERROR
| NXGE_DDI_FAILED
);
1451 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_map_regs"));
1456 nxge_unmap_regs(p_nxge_t nxgep
)
1458 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_unmap_regs"));
1460 if (isLDOMguest(nxgep
)) {
1461 nxge_guest_regs_map_free(nxgep
);
1465 if (nxgep
->dev_regs
) {
1466 if (nxgep
->dev_regs
->nxge_pciregh
) {
1467 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1468 "==> nxge_unmap_regs: bus"));
1469 ddi_regs_map_free(&nxgep
->dev_regs
->nxge_pciregh
);
1470 nxgep
->dev_regs
->nxge_pciregh
= NULL
;
1472 if (nxgep
->dev_regs
->nxge_regh
) {
1473 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1474 "==> nxge_unmap_regs: device registers"));
1475 ddi_regs_map_free(&nxgep
->dev_regs
->nxge_regh
);
1476 nxgep
->dev_regs
->nxge_regh
= NULL
;
1478 if (nxgep
->dev_regs
->nxge_msix_regh
) {
1479 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1480 "==> nxge_unmap_regs: device interrupts"));
1481 ddi_regs_map_free(&nxgep
->dev_regs
->nxge_msix_regh
);
1482 nxgep
->dev_regs
->nxge_msix_regh
= NULL
;
1484 if (nxgep
->dev_regs
->nxge_vir_regh
) {
1485 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1486 "==> nxge_unmap_regs: vio region"));
1487 ddi_regs_map_free(&nxgep
->dev_regs
->nxge_vir_regh
);
1488 nxgep
->dev_regs
->nxge_vir_regh
= NULL
;
1490 if (nxgep
->dev_regs
->nxge_vir2_regh
) {
1491 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1492 "==> nxge_unmap_regs: vio2 region"));
1493 ddi_regs_map_free(&nxgep
->dev_regs
->nxge_vir2_regh
);
1494 nxgep
->dev_regs
->nxge_vir2_regh
= NULL
;
1497 kmem_free(nxgep
->dev_regs
, sizeof (dev_regs_t
));
1498 nxgep
->dev_regs
= NULL
;
1501 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_unmap_regs"));
1504 static nxge_status_t
1505 nxge_setup_mutexes(p_nxge_t nxgep
)
1507 int ddi_status
= DDI_SUCCESS
;
1508 nxge_status_t status
= NXGE_OK
;
1509 nxge_classify_t
*classify_ptr
;
1512 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_setup_mutexes"));
1515 * Get the interrupt cookie so the mutexes can be
1518 if (isLDOMguest(nxgep
)) {
1519 nxgep
->interrupt_cookie
= 0;
1521 ddi_status
= ddi_get_iblock_cookie(nxgep
->dip
, 0,
1522 &nxgep
->interrupt_cookie
);
1524 if (ddi_status
!= DDI_SUCCESS
) {
1525 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1526 "<== nxge_setup_mutexes: failed 0x%x",
1528 goto nxge_setup_mutexes_exit
;
1532 cv_init(&nxgep
->poll_cv
, NULL
, CV_DRIVER
, NULL
);
1533 MUTEX_INIT(&nxgep
->poll_lock
, NULL
,
1534 MUTEX_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1537 * Initialize mutexes for this device.
1539 MUTEX_INIT(nxgep
->genlock
, NULL
,
1540 MUTEX_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1541 MUTEX_INIT(&nxgep
->ouraddr_lock
, NULL
,
1542 MUTEX_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1543 MUTEX_INIT(&nxgep
->mif_lock
, NULL
,
1544 MUTEX_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1545 MUTEX_INIT(&nxgep
->group_lock
, NULL
,
1546 MUTEX_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1547 RW_INIT(&nxgep
->filter_lock
, NULL
,
1548 RW_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1550 classify_ptr
= &nxgep
->classifier
;
1552 * FFLP Mutexes are never used in interrupt context
1553 * as fflp operation can take very long time to
1554 * complete and hence not suitable to invoke from interrupt
1557 MUTEX_INIT(&classify_ptr
->tcam_lock
, NULL
,
1558 NXGE_MUTEX_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1559 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep
)) {
1560 MUTEX_INIT(&classify_ptr
->fcram_lock
, NULL
,
1561 NXGE_MUTEX_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1562 for (partition
= 0; partition
< MAX_PARTITION
; partition
++) {
1563 MUTEX_INIT(&classify_ptr
->hash_lock
[partition
], NULL
,
1564 NXGE_MUTEX_DRIVER
, (void *)nxgep
->interrupt_cookie
);
1568 nxge_setup_mutexes_exit
:
1569 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1570 "<== nxge_setup_mutexes status = %x", status
));
1572 if (ddi_status
!= DDI_SUCCESS
)
1573 status
|= (NXGE_ERROR
| NXGE_DDI_FAILED
);
1579 nxge_destroy_mutexes(p_nxge_t nxgep
)
1582 nxge_classify_t
*classify_ptr
;
1584 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_destroy_mutexes"));
1585 RW_DESTROY(&nxgep
->filter_lock
);
1586 MUTEX_DESTROY(&nxgep
->group_lock
);
1587 MUTEX_DESTROY(&nxgep
->mif_lock
);
1588 MUTEX_DESTROY(&nxgep
->ouraddr_lock
);
1589 MUTEX_DESTROY(nxgep
->genlock
);
1591 classify_ptr
= &nxgep
->classifier
;
1592 MUTEX_DESTROY(&classify_ptr
->tcam_lock
);
1594 /* Destroy all polling resources. */
1595 MUTEX_DESTROY(&nxgep
->poll_lock
);
1596 cv_destroy(&nxgep
->poll_cv
);
1598 /* free data structures, based on HW type */
1599 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep
)) {
1600 MUTEX_DESTROY(&classify_ptr
->fcram_lock
);
1601 for (partition
= 0; partition
< MAX_PARTITION
; partition
++) {
1602 MUTEX_DESTROY(&classify_ptr
->hash_lock
[partition
]);
1606 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_destroy_mutexes"));
1610 nxge_init(p_nxge_t nxgep
)
1612 nxge_status_t status
= NXGE_OK
;
1614 NXGE_DEBUG_MSG((nxgep
, STR_CTL
, "==> nxge_init"));
1616 if (nxgep
->drv_state
& STATE_HW_INITIALIZED
) {
1621 * Allocate system memory for the receive/transmit buffer blocks
1622 * and receive/transmit descriptor rings.
1624 status
= nxge_alloc_mem_pool(nxgep
);
1625 if (status
!= NXGE_OK
) {
1626 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "alloc mem failed\n"));
1627 goto nxge_init_fail1
;
1630 if (!isLDOMguest(nxgep
)) {
1632 * Initialize and enable the TXC registers.
1633 * (Globally enable the Tx controller,
1634 * enable the port, configure the dma channel bitmap,
1635 * configure the max burst size).
1637 status
= nxge_txc_init(nxgep
);
1638 if (status
!= NXGE_OK
) {
1639 NXGE_ERROR_MSG((nxgep
,
1640 NXGE_ERR_CTL
, "init txc failed\n"));
1641 goto nxge_init_fail2
;
1646 * Initialize and enable TXDMA channels.
1648 status
= nxge_init_txdma_channels(nxgep
);
1649 if (status
!= NXGE_OK
) {
1650 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "init txdma failed\n"));
1651 goto nxge_init_fail3
;
1655 * Initialize and enable RXDMA channels.
1657 status
= nxge_init_rxdma_channels(nxgep
);
1658 if (status
!= NXGE_OK
) {
1659 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "init rxdma failed\n"));
1660 goto nxge_init_fail4
;
1664 * The guest domain is now done.
1666 if (isLDOMguest(nxgep
)) {
1667 nxgep
->drv_state
|= STATE_HW_INITIALIZED
;
1668 goto nxge_init_exit
;
1672 * Initialize TCAM and FCRAM (Neptune).
1674 status
= nxge_classify_init(nxgep
);
1675 if (status
!= NXGE_OK
) {
1676 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "init classify failed\n"));
1677 goto nxge_init_fail5
;
1683 status
= nxge_zcp_init(nxgep
);
1684 if (status
!= NXGE_OK
) {
1685 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "init ZCP failed\n"));
1686 goto nxge_init_fail5
;
1692 status
= nxge_ipp_init(nxgep
);
1693 if (status
!= NXGE_OK
) {
1694 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "init IPP failed\n"));
1695 goto nxge_init_fail5
;
1699 * Initialize the MAC block.
1701 status
= nxge_mac_init(nxgep
);
1702 if (status
!= NXGE_OK
) {
1703 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "init MAC failed\n"));
1704 goto nxge_init_fail5
;
1708 * Enable the interrrupts for DDI.
1710 nxge_intrs_enable(nxgep
);
1712 nxgep
->drv_state
|= STATE_HW_INITIALIZED
;
1714 goto nxge_init_exit
;
1717 nxge_uninit_rxdma_channels(nxgep
);
1719 nxge_uninit_txdma_channels(nxgep
);
1721 if (!isLDOMguest(nxgep
)) {
1722 (void) nxge_txc_uninit(nxgep
);
1725 nxge_free_mem_pool(nxgep
);
1727 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
1728 "<== nxge_init status (failed) = 0x%08x", status
));
1732 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_init status = 0x%08x",
1739 nxge_start_timer(p_nxge_t nxgep
, fptrv_t func
, int msec
)
1741 if ((nxgep
->suspended
== 0) || (nxgep
->suspended
== DDI_RESUME
)) {
1742 return (timeout(func
, (caddr_t
)nxgep
,
1743 drv_usectohz(1000 * msec
)));
1750 nxge_stop_timer(p_nxge_t nxgep
, timeout_id_t timerid
)
1753 (void) untimeout(timerid
);
1758 nxge_uninit(p_nxge_t nxgep
)
1760 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_uninit"));
1762 if (!(nxgep
->drv_state
& STATE_HW_INITIALIZED
)) {
1763 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1764 "==> nxge_uninit: not initialized"));
1765 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1766 "<== nxge_uninit"));
1770 if (!isLDOMguest(nxgep
)) {
1772 * Reset the receive MAC side.
1774 (void) nxge_rx_mac_disable(nxgep
);
1779 (void) nxge_ipp_drain(nxgep
);
1783 if (nxgep
->nxge_timerid
) {
1784 nxge_stop_timer(nxgep
, nxgep
->nxge_timerid
);
1785 nxgep
->nxge_timerid
= 0;
1788 (void) nxge_link_monitor(nxgep
, LINK_MONITOR_STOP
);
1789 (void) nxge_intr_hw_disable(nxgep
);
1792 /* Disable and soft reset the IPP */
1793 if (!isLDOMguest(nxgep
))
1794 (void) nxge_ipp_disable(nxgep
);
1796 /* Free classification resources */
1797 (void) nxge_classify_uninit(nxgep
);
1800 * Reset the transmit/receive DMA side.
1802 (void) nxge_txdma_hw_mode(nxgep
, NXGE_DMA_STOP
);
1803 (void) nxge_rxdma_hw_mode(nxgep
, NXGE_DMA_STOP
);
1805 nxge_uninit_txdma_channels(nxgep
);
1806 nxge_uninit_rxdma_channels(nxgep
);
1809 * Reset the transmit MAC side.
1811 (void) nxge_tx_mac_disable(nxgep
);
1813 nxge_free_mem_pool(nxgep
);
1816 * Start the timer if the reset flag is not set.
1817 * If this reset flag is set, the link monitor
1818 * will not be started in order to stop furthur bus
1819 * activities coming from this interface.
1820 * The driver will start the monitor function
1821 * if the interface was initialized again later.
1823 if (!nxge_peu_reset_enable
) {
1824 (void) nxge_link_monitor(nxgep
, LINK_MONITOR_START
);
1827 nxgep
->drv_state
&= ~STATE_HW_INITIALIZED
;
1829 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_uninit: "
1830 "nxge_mblks_pending %d", nxge_mblks_pending
));
1834 nxge_get64(p_nxge_t nxgep
, p_mblk_t mp
)
1840 bcopy((char *)mp
->b_rptr
, (char *)®
, sizeof (uint64_t));
1844 for (i
= 0; i
< retry
; i
++) {
1845 NXGE_REG_RD64(nxgep
->npi_handle
, reg
, ®data
);
1847 bcopy((char *)®data
, (char *)mp
->b_rptr
, sizeof (uint64_t));
1851 nxge_put64(p_nxge_t nxgep
, p_mblk_t mp
)
1856 bcopy((char *)mp
->b_rptr
, (char *)&buf
[0], 2 * sizeof (uint64_t));
1859 NXGE_NPI_PIO_WRITE64(nxgep
->npi_handle
, reg
, buf
[1]);
1865 nxge_debug_msg(p_nxge_t nxgep
, uint64_t level
, char *fmt
, ...)
1867 char msg_buffer
[1048];
1868 char prefix_buffer
[32];
1870 uint64_t debug_level
;
1871 int cmn_level
= CE_CONT
;
1874 if (nxgep
&& nxgep
->nxge_debug_level
!= nxge_debug_level
) {
1875 /* In case a developer has changed nxge_debug_level. */
1876 if (nxgep
->nxge_debug_level
!= nxge_debug_level
)
1877 nxgep
->nxge_debug_level
= nxge_debug_level
;
1880 debug_level
= (nxgep
== NULL
) ? nxge_debug_level
:
1881 nxgep
->nxge_debug_level
;
1883 if ((level
& debug_level
) ||
1884 (level
== NXGE_NOTE
) ||
1885 (level
== NXGE_ERR_CTL
)) {
1886 /* do the msg processing */
1887 MUTEX_ENTER(&nxgedebuglock
);
1889 if ((level
& NXGE_NOTE
)) {
1890 cmn_level
= CE_NOTE
;
1893 if (level
& NXGE_ERR_CTL
) {
1894 cmn_level
= CE_WARN
;
1898 (void) vsprintf(msg_buffer
, fmt
, ap
);
1900 if (nxgep
== NULL
) {
1902 (void) sprintf(prefix_buffer
, "%s :", "nxge");
1904 instance
= nxgep
->instance
;
1905 (void) sprintf(prefix_buffer
,
1906 "%s%d :", "nxge", instance
);
1909 MUTEX_EXIT(&nxgedebuglock
);
1910 cmn_err(cmn_level
, "!%s %s\n",
1911 prefix_buffer
, msg_buffer
);
1917 nxge_dump_packet(char *addr
, int size
)
1919 uchar_t
*ap
= (uchar_t
*)addr
;
1921 static char etherbuf
[1024];
1922 char *cp
= etherbuf
;
1923 char digits
[] = "0123456789abcdef";
1928 if (size
> MAX_DUMP_SZ
) {
1929 /* Dump the leading bytes */
1930 for (i
= 0; i
< MAX_DUMP_SZ
/2; i
++) {
1932 *cp
++ = digits
[*ap
>> 4];
1933 *cp
++ = digits
[*ap
++ & 0xf];
1936 for (i
= 0; i
< 20; i
++)
1938 /* Dump the last MAX_DUMP_SZ/2 bytes */
1939 ap
= (uchar_t
*)(addr
+ (size
- MAX_DUMP_SZ
/2));
1940 for (i
= 0; i
< MAX_DUMP_SZ
/2; i
++) {
1942 *cp
++ = digits
[*ap
>> 4];
1943 *cp
++ = digits
[*ap
++ & 0xf];
1947 for (i
= 0; i
< size
; i
++) {
1949 *cp
++ = digits
[*ap
>> 4];
1950 *cp
++ = digits
[*ap
++ & 0xf];
1960 nxge_test_map_regs(p_nxge_t nxgep
)
1962 ddi_acc_handle_t cfg_handle
;
1963 p_pci_cfg_t cfg_ptr
;
1964 ddi_acc_handle_t dev_handle
;
1966 ddi_acc_handle_t pci_config_handle
;
1970 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_test_map_regs"));
1972 dev_handle
= nxgep
->dev_regs
->nxge_regh
;
1973 dev_ptr
= (char *)nxgep
->dev_regs
->nxge_regp
;
1975 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep
)) {
1976 cfg_handle
= nxgep
->dev_regs
->nxge_pciregh
;
1977 cfg_ptr
= (void *)nxgep
->dev_regs
->nxge_pciregp
;
1979 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1980 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr
));
1981 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1982 "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1983 &cfg_ptr
->vendorid
));
1984 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1985 "\tvendorid 0x%x devid 0x%x",
1986 NXGE_PIO_READ16(cfg_handle
, &cfg_ptr
->vendorid
, 0),
1987 NXGE_PIO_READ16(cfg_handle
, &cfg_ptr
->devid
, 0)));
1988 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1989 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1991 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base
, 0),
1992 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base14
, 0),
1993 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base18
, 0),
1994 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base1c
, 0)));
1995 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
1996 "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1997 "base 28 0x%x bar2c 0x%x\n",
1998 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base20
, 0),
1999 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base24
, 0),
2000 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base28
, 0),
2001 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base2c
, 0)));
2002 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
2003 "\nNeptune PCI BAR: base30 0x%x\n",
2004 NXGE_PIO_READ32(cfg_handle
, &cfg_ptr
->base30
, 0)));
2006 cfg_handle
= nxgep
->dev_regs
->nxge_pciregh
;
2007 cfg_ptr
= (void *)nxgep
->dev_regs
->nxge_pciregp
;
2008 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
2009 "first 0x%llx second 0x%llx third 0x%llx "
2011 NXGE_PIO_READ64(dev_handle
,
2012 (uint64_t *)(dev_ptr
+ 0), 0),
2013 NXGE_PIO_READ64(dev_handle
,
2014 (uint64_t *)(dev_ptr
+ 8), 0),
2015 NXGE_PIO_READ64(dev_handle
,
2016 (uint64_t *)(dev_ptr
+ 16), 0),
2017 NXGE_PIO_READ64(cfg_handle
,
2018 (uint64_t *)(dev_ptr
+ 24), 0)));
2025 nxge_suspend(p_nxge_t nxgep
)
2027 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_suspend"));
2029 nxge_intrs_disable(nxgep
);
2030 nxge_destroy_dev(nxgep
);
2032 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_suspend"));
2035 static nxge_status_t
2036 nxge_resume(p_nxge_t nxgep
)
2038 nxge_status_t status
= NXGE_OK
;
2040 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_resume"));
2042 nxgep
->suspended
= DDI_RESUME
;
2043 (void) nxge_link_monitor(nxgep
, LINK_MONITOR_START
);
2044 (void) nxge_rxdma_hw_mode(nxgep
, NXGE_DMA_START
);
2045 (void) nxge_txdma_hw_mode(nxgep
, NXGE_DMA_START
);
2046 (void) nxge_rx_mac_enable(nxgep
);
2047 (void) nxge_tx_mac_enable(nxgep
);
2048 nxge_intrs_enable(nxgep
);
2049 nxgep
->suspended
= 0;
2051 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
2052 "<== nxge_resume status = 0x%x", status
));
2056 static nxge_status_t
2057 nxge_setup_dev(p_nxge_t nxgep
)
2059 nxge_status_t status
= NXGE_OK
;
2061 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_setup_dev port %d",
2062 nxgep
->mac
.portnum
));
2064 status
= nxge_link_init(nxgep
);
2066 if (fm_check_acc_handle(nxgep
->dev_regs
->nxge_regh
) != DDI_FM_OK
) {
2067 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2068 "port%d Bad register acc handle", nxgep
->mac
.portnum
));
2069 status
= NXGE_ERROR
;
2072 if (status
!= NXGE_OK
) {
2073 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2074 " nxge_setup_dev status "
2075 "(xcvr init 0x%08x)", status
));
2076 goto nxge_setup_dev_exit
;
2079 nxge_setup_dev_exit
:
2080 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
2081 "<== nxge_setup_dev port %d status = 0x%08x",
2082 nxgep
->mac
.portnum
, status
));
2088 nxge_destroy_dev(p_nxge_t nxgep
)
2090 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_destroy_dev"));
2092 (void) nxge_link_monitor(nxgep
, LINK_MONITOR_STOP
);
2094 (void) nxge_hw_stop(nxgep
);
2096 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_destroy_dev"));
2099 static nxge_status_t
2100 nxge_setup_system_dma_pages(p_nxge_t nxgep
)
2102 int ddi_status
= DDI_SUCCESS
;
2104 ddi_dma_cookie_t cookie
;
2105 uint_t iommu_pagesize
;
2106 nxge_status_t status
= NXGE_OK
;
2108 NXGE_ERROR_MSG((nxgep
, DDI_CTL
, "==> nxge_setup_system_dma_pages"));
2109 nxgep
->sys_page_sz
= ddi_ptob(nxgep
->dip
, (ulong_t
)1);
2110 if (nxgep
->niu_type
!= N2_NIU
) {
2111 iommu_pagesize
= dvma_pagesize(nxgep
->dip
);
2112 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
2113 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2114 " default_block_size %d iommu_pagesize %d",
2116 ddi_ptob(nxgep
->dip
, (ulong_t
)1),
2117 nxgep
->rx_default_block_size
,
2120 if (iommu_pagesize
!= 0) {
2121 if (nxgep
->sys_page_sz
== iommu_pagesize
) {
2122 if (iommu_pagesize
> 0x4000)
2123 nxgep
->sys_page_sz
= 0x4000;
2125 if (nxgep
->sys_page_sz
> iommu_pagesize
)
2126 nxgep
->sys_page_sz
= iommu_pagesize
;
2130 nxgep
->sys_page_mask
= ~(nxgep
->sys_page_sz
- 1);
2131 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
2132 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2133 "default_block_size %d page mask %d",
2135 ddi_ptob(nxgep
->dip
, (ulong_t
)1),
2136 nxgep
->rx_default_block_size
,
2137 nxgep
->sys_page_mask
));
2140 switch (nxgep
->sys_page_sz
) {
2142 nxgep
->sys_page_sz
= 0x1000;
2143 nxgep
->sys_page_mask
= ~(nxgep
->sys_page_sz
- 1);
2144 nxgep
->rx_default_block_size
= 0x1000;
2145 nxgep
->rx_bksize_code
= RBR_BKSIZE_4K
;
2148 nxgep
->rx_default_block_size
= 0x1000;
2149 nxgep
->rx_bksize_code
= RBR_BKSIZE_4K
;
2152 nxgep
->rx_default_block_size
= 0x2000;
2153 nxgep
->rx_bksize_code
= RBR_BKSIZE_8K
;
2156 nxgep
->rx_default_block_size
= 0x4000;
2157 nxgep
->rx_bksize_code
= RBR_BKSIZE_16K
;
2160 nxgep
->rx_default_block_size
= 0x8000;
2161 nxgep
->rx_bksize_code
= RBR_BKSIZE_32K
;
2165 #ifndef USE_RX_BIG_BUF
2166 nxge_rx_dma_attr
.dma_attr_align
= nxgep
->sys_page_sz
;
2168 nxgep
->rx_default_block_size
= 0x2000;
2169 nxgep
->rx_bksize_code
= RBR_BKSIZE_8K
;
2172 * Get the system DMA burst size.
2174 ddi_status
= ddi_dma_alloc_handle(nxgep
->dip
, &nxge_tx_dma_attr
,
2175 DDI_DMA_DONTWAIT
, 0,
2176 &nxgep
->dmasparehandle
);
2177 if (ddi_status
!= DDI_SUCCESS
) {
2178 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2179 "ddi_dma_alloc_handle: failed "
2180 " status 0x%x", ddi_status
));
2181 goto nxge_get_soft_properties_exit
;
2184 ddi_status
= ddi_dma_addr_bind_handle(nxgep
->dmasparehandle
, NULL
,
2185 (caddr_t
)nxgep
->dmasparehandle
,
2186 sizeof (nxgep
->dmasparehandle
),
2187 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
2188 DDI_DMA_DONTWAIT
, 0,
2190 if (ddi_status
!= DDI_DMA_MAPPED
) {
2191 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2192 "Binding spare handle to find system"
2193 " burstsize failed."));
2194 ddi_status
= DDI_FAILURE
;
2195 goto nxge_get_soft_properties_fail1
;
2198 nxgep
->sys_burst_sz
= ddi_dma_burstsizes(nxgep
->dmasparehandle
);
2199 (void) ddi_dma_unbind_handle(nxgep
->dmasparehandle
);
2201 nxge_get_soft_properties_fail1
:
2202 ddi_dma_free_handle(&nxgep
->dmasparehandle
);
2204 nxge_get_soft_properties_exit
:
2206 if (ddi_status
!= DDI_SUCCESS
)
2207 status
|= (NXGE_ERROR
| NXGE_DDI_FAILED
);
2209 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
2210 "<== nxge_setup_system_dma_pages status = 0x%08x", status
));
2214 static nxge_status_t
2215 nxge_alloc_mem_pool(p_nxge_t nxgep
)
2217 nxge_status_t status
= NXGE_OK
;
2219 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_alloc_mem_pool"));
2221 status
= nxge_alloc_rx_mem_pool(nxgep
);
2222 if (status
!= NXGE_OK
) {
2223 return (NXGE_ERROR
);
2226 status
= nxge_alloc_tx_mem_pool(nxgep
);
2227 if (status
!= NXGE_OK
) {
2228 nxge_free_rx_mem_pool(nxgep
);
2229 return (NXGE_ERROR
);
2232 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_alloc_mem_pool"));
2237 nxge_free_mem_pool(p_nxge_t nxgep
)
2239 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
, "==> nxge_free_mem_pool"));
2241 nxge_free_rx_mem_pool(nxgep
);
2242 nxge_free_tx_mem_pool(nxgep
);
2244 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
, "<== nxge_free_mem_pool"));
2248 nxge_alloc_rx_mem_pool(p_nxge_t nxgep
)
2251 p_nxge_dma_pt_cfg_t p_all_cfgp
;
2252 p_nxge_hw_pt_cfg_t p_cfgp
;
2253 p_nxge_dma_pool_t dma_poolp
;
2254 p_nxge_dma_common_t
*dma_buf_p
;
2255 p_nxge_dma_pool_t dma_cntl_poolp
;
2256 p_nxge_dma_common_t
*dma_cntl_p
;
2257 uint32_t *num_chunks
; /* per dma */
2258 nxge_status_t status
= NXGE_OK
;
2260 uint32_t nxge_port_rbr_size
;
2261 uint32_t nxge_port_rbr_spare_size
;
2262 uint32_t nxge_port_rcr_size
;
2263 uint32_t rx_cntl_alloc_size
;
2265 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_alloc_rx_mem_pool"));
2267 p_all_cfgp
= (p_nxge_dma_pt_cfg_t
)&nxgep
->pt_config
;
2268 p_cfgp
= (p_nxge_hw_pt_cfg_t
)&p_all_cfgp
->hw_config
;
2269 rdc_max
= NXGE_MAX_RDCS
;
2272 * Allocate memory for the common DMA data structures.
2274 dma_poolp
= (p_nxge_dma_pool_t
)KMEM_ZALLOC(sizeof (nxge_dma_pool_t
),
2276 dma_buf_p
= (p_nxge_dma_common_t
*)KMEM_ZALLOC(
2277 sizeof (p_nxge_dma_common_t
) * rdc_max
, KM_SLEEP
);
2279 dma_cntl_poolp
= (p_nxge_dma_pool_t
)
2280 KMEM_ZALLOC(sizeof (nxge_dma_pool_t
), KM_SLEEP
);
2281 dma_cntl_p
= (p_nxge_dma_common_t
*)KMEM_ZALLOC(
2282 sizeof (p_nxge_dma_common_t
) * rdc_max
, KM_SLEEP
);
2284 num_chunks
= (uint32_t *)KMEM_ZALLOC(
2285 sizeof (uint32_t) * rdc_max
, KM_SLEEP
);
2288 * Assume that each DMA channel will be configured with
2289 * the default block size.
2290 * rbr block counts are modulo the batch count (16).
2292 nxge_port_rbr_size
= p_all_cfgp
->rbr_size
;
2293 nxge_port_rcr_size
= p_all_cfgp
->rcr_size
;
2295 if (!nxge_port_rbr_size
) {
2296 nxge_port_rbr_size
= NXGE_RBR_RBB_DEFAULT
;
2298 if (nxge_port_rbr_size
% NXGE_RXDMA_POST_BATCH
) {
2299 nxge_port_rbr_size
= (NXGE_RXDMA_POST_BATCH
*
2300 (nxge_port_rbr_size
/ NXGE_RXDMA_POST_BATCH
+ 1));
2303 p_all_cfgp
->rbr_size
= nxge_port_rbr_size
;
2304 nxge_port_rbr_spare_size
= nxge_rbr_spare_size
;
2306 if (nxge_port_rbr_spare_size
% NXGE_RXDMA_POST_BATCH
) {
2307 nxge_port_rbr_spare_size
= (NXGE_RXDMA_POST_BATCH
*
2308 (nxge_port_rbr_spare_size
/ NXGE_RXDMA_POST_BATCH
+ 1));
2310 if (nxge_port_rbr_size
> RBR_DEFAULT_MAX_BLKS
) {
2311 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
,
2312 "nxge_alloc_rx_mem_pool: RBR size too high %d, "
2313 "set to default %d",
2314 nxge_port_rbr_size
, RBR_DEFAULT_MAX_BLKS
));
2315 nxge_port_rbr_size
= RBR_DEFAULT_MAX_BLKS
;
2317 if (nxge_port_rcr_size
> RCR_DEFAULT_MAX
) {
2318 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
,
2319 "nxge_alloc_rx_mem_pool: RCR too high %d, "
2320 "set to default %d",
2321 nxge_port_rcr_size
, RCR_DEFAULT_MAX
));
2322 nxge_port_rcr_size
= RCR_DEFAULT_MAX
;
2326 * N2/NIU has limitation on the descriptor sizes (contiguous
2327 * memory allocation on data buffers to 4M (contig_mem_alloc)
2328 * and little endian for control buffers (must use the ddi/dki mem alloc
2331 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2332 if (nxgep
->niu_type
== N2_NIU
) {
2333 nxge_port_rbr_spare_size
= 0;
2334 if ((nxge_port_rbr_size
> NXGE_NIU_CONTIG_RBR_MAX
) ||
2335 (!ISP2(nxge_port_rbr_size
))) {
2336 nxge_port_rbr_size
= NXGE_NIU_CONTIG_RBR_MAX
;
2338 if ((nxge_port_rcr_size
> NXGE_NIU_CONTIG_RCR_MAX
) ||
2339 (!ISP2(nxge_port_rcr_size
))) {
2340 nxge_port_rcr_size
= NXGE_NIU_CONTIG_RCR_MAX
;
2346 * Addresses of receive block ring, receive completion ring and the
2347 * mailbox must be all cache-aligned (64 bytes).
2349 rx_cntl_alloc_size
= nxge_port_rbr_size
+ nxge_port_rbr_spare_size
;
2350 rx_cntl_alloc_size
*= (sizeof (rx_desc_t
));
2351 rx_cntl_alloc_size
+= (sizeof (rcr_entry_t
) * nxge_port_rcr_size
);
2352 rx_cntl_alloc_size
+= sizeof (rxdma_mailbox_t
);
2354 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_alloc_rx_mem_pool: "
2355 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
2356 "nxge_port_rcr_size = %d "
2357 "rx_cntl_alloc_size = %d",
2358 nxge_port_rbr_size
, nxge_port_rbr_spare_size
,
2360 rx_cntl_alloc_size
));
2362 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2363 if (nxgep
->niu_type
== N2_NIU
) {
2364 uint32_t rx_buf_alloc_size
= (nxgep
->rx_default_block_size
*
2365 (nxge_port_rbr_size
+ nxge_port_rbr_spare_size
));
2367 if (!ISP2(rx_buf_alloc_size
)) {
2368 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2369 "==> nxge_alloc_rx_mem_pool: "
2370 " must be power of 2"));
2371 status
|= (NXGE_ERROR
| NXGE_DDI_FAILED
);
2372 goto nxge_alloc_rx_mem_pool_exit
;
2375 if (rx_buf_alloc_size
> (1 << 22)) {
2376 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2377 "==> nxge_alloc_rx_mem_pool: "
2378 " limit size to 4M"));
2379 status
|= (NXGE_ERROR
| NXGE_DDI_FAILED
);
2380 goto nxge_alloc_rx_mem_pool_exit
;
2383 if (rx_cntl_alloc_size
< 0x2000) {
2384 rx_cntl_alloc_size
= 0x2000;
2388 nxgep
->nxge_port_rbr_size
= nxge_port_rbr_size
;
2389 nxgep
->nxge_port_rcr_size
= nxge_port_rcr_size
;
2390 nxgep
->nxge_port_rbr_spare_size
= nxge_port_rbr_spare_size
;
2391 nxgep
->nxge_port_rx_cntl_alloc_size
= rx_cntl_alloc_size
;
2393 dma_poolp
->ndmas
= p_cfgp
->max_rdcs
;
2394 dma_poolp
->num_chunks
= num_chunks
;
2395 dma_poolp
->buf_allocated
= B_TRUE
;
2396 nxgep
->rx_buf_pool_p
= dma_poolp
;
2397 dma_poolp
->dma_buf_pool_p
= dma_buf_p
;
2399 dma_cntl_poolp
->ndmas
= p_cfgp
->max_rdcs
;
2400 dma_cntl_poolp
->buf_allocated
= B_TRUE
;
2401 nxgep
->rx_cntl_pool_p
= dma_cntl_poolp
;
2402 dma_cntl_poolp
->dma_buf_pool_p
= dma_cntl_p
;
2404 /* Allocate the receive rings, too. */
2405 nxgep
->rx_rbr_rings
=
2406 KMEM_ZALLOC(sizeof (rx_rbr_rings_t
), KM_SLEEP
);
2407 nxgep
->rx_rbr_rings
->rbr_rings
=
2408 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t
) * rdc_max
, KM_SLEEP
);
2409 nxgep
->rx_rcr_rings
=
2410 KMEM_ZALLOC(sizeof (rx_rcr_rings_t
), KM_SLEEP
);
2411 nxgep
->rx_rcr_rings
->rcr_rings
=
2412 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t
) * rdc_max
, KM_SLEEP
);
2413 nxgep
->rx_mbox_areas_p
=
2414 KMEM_ZALLOC(sizeof (rx_mbox_areas_t
), KM_SLEEP
);
2415 nxgep
->rx_mbox_areas_p
->rxmbox_areas
=
2416 KMEM_ZALLOC(sizeof (p_rx_mbox_t
) * rdc_max
, KM_SLEEP
);
2418 nxgep
->rx_rbr_rings
->ndmas
= nxgep
->rx_rcr_rings
->ndmas
=
2421 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
2422 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status
));
2424 nxge_alloc_rx_mem_pool_exit
:
2431 * Allocate buffers for an RDC.
2435 * channel The channel to map into our kernel space.
2439 * NPI function calls:
2441 * NXGE function calls:
2443 * Registers accessed:
2457 size_t rx_buf_alloc_size
;
2458 nxge_status_t status
= NXGE_OK
;
2460 nxge_dma_common_t
**data
;
2461 nxge_dma_common_t
**control
;
2462 uint32_t *num_chunks
;
2464 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_alloc_rbb"));
2467 * Allocate memory for the receive buffers and descriptor rings.
2468 * Replace these allocation functions with the interface functions
2469 * provided by the partition manager if/when they are available.
2473 * Allocate memory for the receive buffer blocks.
2475 rx_buf_alloc_size
= (nxgep
->rx_default_block_size
*
2476 (nxgep
->nxge_port_rbr_size
+ nxgep
->nxge_port_rbr_spare_size
));
2478 data
= &nxgep
->rx_buf_pool_p
->dma_buf_pool_p
[channel
];
2479 num_chunks
= &nxgep
->rx_buf_pool_p
->num_chunks
[channel
];
2481 if ((status
= nxge_alloc_rx_buf_dma(
2482 nxgep
, channel
, data
, rx_buf_alloc_size
,
2483 nxgep
->rx_default_block_size
, num_chunks
)) != NXGE_OK
) {
2487 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_alloc_rxb(): "
2488 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel
, *data
, data
));
2491 * Allocate memory for descriptor rings and mailbox.
2493 control
= &nxgep
->rx_cntl_pool_p
->dma_buf_pool_p
[channel
];
2495 if ((status
= nxge_alloc_rx_cntl_dma(
2496 nxgep
, channel
, control
, nxgep
->nxge_port_rx_cntl_alloc_size
))
2498 nxge_free_rx_cntl_dma(nxgep
, *control
);
2499 (*data
)->buf_alloc_state
|= BUF_ALLOCATED_WAIT_FREE
;
2500 nxge_free_rx_buf_dma(nxgep
, *data
, *num_chunks
);
2504 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
2505 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status
));
2515 nxge_dma_common_t
*data
;
2516 nxge_dma_common_t
*control
;
2517 uint32_t num_chunks
;
2519 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_alloc_rbb"));
2521 data
= nxgep
->rx_buf_pool_p
->dma_buf_pool_p
[channel
];
2522 num_chunks
= nxgep
->rx_buf_pool_p
->num_chunks
[channel
];
2523 nxge_free_rx_buf_dma(nxgep
, data
, num_chunks
);
2525 nxgep
->rx_buf_pool_p
->dma_buf_pool_p
[channel
] = 0;
2526 nxgep
->rx_buf_pool_p
->num_chunks
[channel
] = 0;
2528 control
= nxgep
->rx_cntl_pool_p
->dma_buf_pool_p
[channel
];
2529 nxge_free_rx_cntl_dma(nxgep
, control
);
2531 nxgep
->rx_cntl_pool_p
->dma_buf_pool_p
[channel
] = 0;
2533 KMEM_FREE(data
, sizeof (nxge_dma_common_t
) * NXGE_DMA_BLOCK
);
2534 KMEM_FREE(control
, sizeof (nxge_dma_common_t
));
2536 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_alloc_rbb"));
2540 nxge_free_rx_mem_pool(p_nxge_t nxgep
)
2542 int rdc_max
= NXGE_MAX_RDCS
;
2544 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_free_rx_mem_pool"));
2546 if (!nxgep
->rx_buf_pool_p
|| !nxgep
->rx_buf_pool_p
->buf_allocated
) {
2547 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2548 "<== nxge_free_rx_mem_pool "
2549 "(null rx buf pool or buf not allocated"));
2552 if (!nxgep
->rx_cntl_pool_p
|| !nxgep
->rx_cntl_pool_p
->buf_allocated
) {
2553 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2554 "<== nxge_free_rx_mem_pool "
2555 "(null rx cntl buf pool or cntl buf not allocated"));
2559 KMEM_FREE(nxgep
->rx_cntl_pool_p
->dma_buf_pool_p
,
2560 sizeof (p_nxge_dma_common_t
) * rdc_max
);
2561 KMEM_FREE(nxgep
->rx_cntl_pool_p
, sizeof (nxge_dma_pool_t
));
2563 KMEM_FREE(nxgep
->rx_buf_pool_p
->num_chunks
,
2564 sizeof (uint32_t) * rdc_max
);
2565 KMEM_FREE(nxgep
->rx_buf_pool_p
->dma_buf_pool_p
,
2566 sizeof (p_nxge_dma_common_t
) * rdc_max
);
2567 KMEM_FREE(nxgep
->rx_buf_pool_p
, sizeof (nxge_dma_pool_t
));
2569 nxgep
->rx_buf_pool_p
= 0;
2570 nxgep
->rx_cntl_pool_p
= 0;
2572 KMEM_FREE(nxgep
->rx_rbr_rings
->rbr_rings
,
2573 sizeof (p_rx_rbr_ring_t
) * rdc_max
);
2574 KMEM_FREE(nxgep
->rx_rbr_rings
, sizeof (rx_rbr_rings_t
));
2575 KMEM_FREE(nxgep
->rx_rcr_rings
->rcr_rings
,
2576 sizeof (p_rx_rcr_ring_t
) * rdc_max
);
2577 KMEM_FREE(nxgep
->rx_rcr_rings
, sizeof (rx_rcr_rings_t
));
2578 KMEM_FREE(nxgep
->rx_mbox_areas_p
->rxmbox_areas
,
2579 sizeof (p_rx_mbox_t
) * rdc_max
);
2580 KMEM_FREE(nxgep
->rx_mbox_areas_p
, sizeof (rx_mbox_areas_t
));
2582 nxgep
->rx_rbr_rings
= 0;
2583 nxgep
->rx_rcr_rings
= 0;
2584 nxgep
->rx_mbox_areas_p
= 0;
2586 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_free_rx_mem_pool"));
2590 static nxge_status_t
2591 nxge_alloc_rx_buf_dma(p_nxge_t nxgep
, uint16_t dma_channel
,
2592 p_nxge_dma_common_t
*dmap
,
2593 size_t alloc_size
, size_t block_size
, uint32_t *num_chunks
)
2595 p_nxge_dma_common_t rx_dmap
;
2596 nxge_status_t status
= NXGE_OK
;
2597 size_t total_alloc_size
;
2598 size_t allocated
= 0;
2599 int i
, size_index
, array_size
;
2600 boolean_t use_kmem_alloc
= B_FALSE
;
2602 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_alloc_rx_buf_dma"));
2604 rx_dmap
= (p_nxge_dma_common_t
)
2605 KMEM_ZALLOC(sizeof (nxge_dma_common_t
) * NXGE_DMA_BLOCK
,
2608 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2609 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2610 dma_channel
, alloc_size
, block_size
, dmap
));
2612 total_alloc_size
= alloc_size
;
2614 #if defined(RX_USE_RECLAIM_POST)
2615 total_alloc_size
= alloc_size
+ alloc_size
/4;
2620 array_size
= sizeof (alloc_sizes
)/sizeof (size_t);
2621 while ((size_index
< array_size
) &&
2622 (alloc_sizes
[size_index
] < alloc_size
))
2624 if (size_index
>= array_size
) {
2625 size_index
= array_size
- 1;
2628 /* For Neptune, use kmem_alloc if the kmem flag is set. */
2629 if (nxgep
->niu_type
!= N2_NIU
&& nxge_use_kmem_alloc
) {
2630 use_kmem_alloc
= B_TRUE
;
2631 #if defined(__i386) || defined(__amd64)
2634 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2635 "==> nxge_alloc_rx_buf_dma: "
2636 "Neptune use kmem_alloc() - size_index %d",
2640 while ((allocated
< total_alloc_size
) &&
2641 (size_index
>= 0) && (i
< NXGE_DMA_BLOCK
)) {
2642 rx_dmap
[i
].dma_chunk_index
= i
;
2643 rx_dmap
[i
].block_size
= block_size
;
2644 rx_dmap
[i
].alength
= alloc_sizes
[size_index
];
2645 rx_dmap
[i
].orig_alength
= rx_dmap
[i
].alength
;
2646 rx_dmap
[i
].nblocks
= alloc_sizes
[size_index
] / block_size
;
2647 rx_dmap
[i
].dma_channel
= dma_channel
;
2648 rx_dmap
[i
].contig_alloc_type
= B_FALSE
;
2649 rx_dmap
[i
].kmem_alloc_type
= B_FALSE
;
2650 rx_dmap
[i
].buf_alloc_type
= DDI_MEM_ALLOC
;
2653 * N2/NIU: data buffers must be contiguous as the driver
2654 * needs to call Hypervisor api to set up
2657 if ((nxgep
->niu_type
== N2_NIU
) && (NXGE_DMA_BLOCK
== 1)) {
2658 rx_dmap
[i
].contig_alloc_type
= B_TRUE
;
2659 rx_dmap
[i
].buf_alloc_type
= CONTIG_MEM_ALLOC
;
2660 } else if (use_kmem_alloc
) {
2661 /* For Neptune, use kmem_alloc */
2662 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2663 "==> nxge_alloc_rx_buf_dma: "
2664 "Neptune use kmem_alloc()"));
2665 rx_dmap
[i
].kmem_alloc_type
= B_TRUE
;
2666 rx_dmap
[i
].buf_alloc_type
= KMEM_ALLOC
;
2669 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2670 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2671 "i %d nblocks %d alength %d",
2672 dma_channel
, i
, &rx_dmap
[i
], block_size
,
2673 i
, rx_dmap
[i
].nblocks
,
2674 rx_dmap
[i
].alength
));
2675 status
= nxge_dma_mem_alloc(nxgep
, nxge_force_dma
,
2678 &nxge_dev_buf_dma_acc_attr
,
2679 DDI_DMA_READ
| DDI_DMA_STREAMING
,
2680 (p_nxge_dma_common_t
)(&rx_dmap
[i
]));
2681 if (status
!= NXGE_OK
) {
2682 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2683 "nxge_alloc_rx_buf_dma: Alloc Failed: "
2684 "dma %d size_index %d size requested %d",
2687 rx_dmap
[i
].alength
));
2690 rx_dmap
[i
].buf_alloc_state
= BUF_ALLOCATED
;
2691 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2692 " nxge_alloc_rx_buf_dma DONE alloc mem: "
2693 "dma %d dma_buf_p $%p kaddrp $%p alength %d "
2694 "buf_alloc_state %d alloc_type %d",
2699 rx_dmap
[i
].buf_alloc_state
,
2700 rx_dmap
[i
].buf_alloc_type
));
2701 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2702 " alloc_rx_buf_dma allocated rdc %d "
2703 "chunk %d size %x dvma %x bufp %llx kaddrp $%p",
2704 dma_channel
, i
, rx_dmap
[i
].alength
,
2705 rx_dmap
[i
].ioaddr_pp
, &rx_dmap
[i
],
2706 rx_dmap
[i
].kaddrp
));
2708 allocated
+= alloc_sizes
[size_index
];
2712 if (allocated
< total_alloc_size
) {
2713 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2714 "==> nxge_alloc_rx_buf_dma: not enough for channel %d "
2715 "allocated 0x%x requested 0x%x",
2717 allocated
, total_alloc_size
));
2718 status
= NXGE_ERROR
;
2719 goto nxge_alloc_rx_mem_fail1
;
2722 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2723 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
2724 "allocated 0x%x requested 0x%x",
2726 allocated
, total_alloc_size
));
2728 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
2729 " alloc_rx_buf_dma rdc %d allocated %d chunks",
2734 goto nxge_alloc_rx_mem_exit
;
2736 nxge_alloc_rx_mem_fail1
:
2737 KMEM_FREE(rx_dmap
, sizeof (nxge_dma_common_t
) * NXGE_DMA_BLOCK
);
2739 nxge_alloc_rx_mem_exit
:
2740 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
2741 "<== nxge_alloc_rx_buf_dma status 0x%08x", status
));
2748 nxge_free_rx_buf_dma(p_nxge_t nxgep
, p_nxge_dma_common_t dmap
,
2749 uint32_t num_chunks
)
2753 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2754 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks
));
2759 for (i
= 0; i
< num_chunks
; i
++) {
2760 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
2761 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2763 nxge_dma_free_rx_data_buf(dmap
++);
2766 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_free_rx_buf_dma"));
2770 static nxge_status_t
2771 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep
, uint16_t dma_channel
,
2772 p_nxge_dma_common_t
*dmap
, size_t size
)
2774 p_nxge_dma_common_t rx_dmap
;
2775 nxge_status_t status
= NXGE_OK
;
2777 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_alloc_rx_cntl_dma"));
2779 rx_dmap
= (p_nxge_dma_common_t
)
2780 KMEM_ZALLOC(sizeof (nxge_dma_common_t
), KM_SLEEP
);
2782 rx_dmap
->contig_alloc_type
= B_FALSE
;
2783 rx_dmap
->kmem_alloc_type
= B_FALSE
;
2785 status
= nxge_dma_mem_alloc(nxgep
, nxge_force_dma
,
2786 &nxge_desc_dma_attr
,
2788 &nxge_dev_desc_dma_acc_attr
,
2789 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
2791 if (status
!= NXGE_OK
) {
2792 goto nxge_alloc_rx_cntl_dma_fail1
;
2796 goto nxge_alloc_rx_cntl_dma_exit
;
2798 nxge_alloc_rx_cntl_dma_fail1
:
2799 KMEM_FREE(rx_dmap
, sizeof (nxge_dma_common_t
));
2801 nxge_alloc_rx_cntl_dma_exit
:
2802 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
2803 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status
));
2810 nxge_free_rx_cntl_dma(p_nxge_t nxgep
, p_nxge_dma_common_t dmap
)
2812 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_free_rx_cntl_dma"));
2817 nxge_dma_mem_free(dmap
);
2819 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_free_rx_cntl_dma"));
2832 nxge_tdc_sizes_t
*sizes
)
2834 uint32_t threshhold
; /* The bcopy() threshhold */
2835 size_t tx_size
; /* Transmit buffer size */
2836 size_t cr_size
; /* Completion ring size */
2839 * Assume that each DMA channel will be configured with the
2840 * default transmit buffer size for copying transmit data.
2841 * (If a packet is bigger than this, it will not be copied.)
2843 if (nxgep
->niu_type
== N2_NIU
) {
2844 threshhold
= TX_BCOPY_SIZE
;
2846 threshhold
= nxge_bcopy_thresh
;
2848 tx_size
= nxge_tx_ring_size
* threshhold
;
2850 cr_size
= nxge_tx_ring_size
* sizeof (tx_desc_t
);
2851 cr_size
+= sizeof (txdma_mailbox_t
);
2853 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2854 if (nxgep
->niu_type
== N2_NIU
) {
2855 if (!ISP2(tx_size
)) {
2856 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2857 "==> nxge_tdc_sizes: Tx size"
2858 " must be power of 2"));
2859 return (NXGE_ERROR
);
2862 if (tx_size
> (1 << 22)) {
2863 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2864 "==> nxge_tdc_sizes: Tx size"
2866 return (NXGE_ERROR
);
2869 if (cr_size
< 0x2000)
2874 sizes
->threshhold
= threshhold
;
2875 sizes
->tx_size
= tx_size
;
2876 sizes
->cr_size
= cr_size
;
2883 * Allocate buffers for an TDC.
2887 * channel The channel to map into our kernel space.
2891 * NPI function calls:
2893 * NXGE function calls:
2895 * Registers accessed:
2909 nxge_dma_common_t
**dma_buf_p
;
2910 nxge_dma_common_t
**dma_cntl_p
;
2911 uint32_t *num_chunks
;
2912 nxge_status_t status
= NXGE_OK
;
2914 nxge_tdc_sizes_t sizes
;
2916 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_alloc_tbb"));
2918 if (nxge_tdc_sizes(nxgep
, &sizes
) != NXGE_OK
)
2919 return (NXGE_ERROR
);
2922 * Allocate memory for transmit buffers and descriptor rings.
2923 * Replace these allocation functions with the interface functions
2924 * provided by the partition manager Real Soon Now.
2926 dma_buf_p
= &nxgep
->tx_buf_pool_p
->dma_buf_pool_p
[channel
];
2927 num_chunks
= &nxgep
->tx_buf_pool_p
->num_chunks
[channel
];
2929 dma_cntl_p
= &nxgep
->tx_cntl_pool_p
->dma_buf_pool_p
[channel
];
2932 * Allocate memory for transmit buffers and descriptor rings.
2933 * Replace allocation functions with interface functions provided
2934 * by the partition manager when it is available.
2936 * Allocate memory for the transmit buffer pool.
2938 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
2939 "sizes: tx: %ld, cr:%ld, th:%ld",
2940 sizes
.tx_size
, sizes
.cr_size
, sizes
.threshhold
));
2943 status
= nxge_alloc_tx_buf_dma(nxgep
, channel
, dma_buf_p
,
2944 sizes
.tx_size
, sizes
.threshhold
, num_chunks
);
2945 if (status
!= NXGE_OK
) {
2946 cmn_err(CE_NOTE
, "nxge_alloc_tx_buf_dma failed!");
2951 * Allocate memory for descriptor rings and mailbox.
2953 status
= nxge_alloc_tx_cntl_dma(nxgep
, channel
, dma_cntl_p
,
2955 if (status
!= NXGE_OK
) {
2956 nxge_free_tx_buf_dma(nxgep
, *dma_buf_p
, *num_chunks
);
2957 cmn_err(CE_NOTE
, "nxge_alloc_tx_cntl_dma failed!");
2969 nxge_dma_common_t
*data
;
2970 nxge_dma_common_t
*control
;
2971 uint32_t num_chunks
;
2973 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_free_txb"));
2975 data
= nxgep
->tx_buf_pool_p
->dma_buf_pool_p
[channel
];
2976 num_chunks
= nxgep
->tx_buf_pool_p
->num_chunks
[channel
];
2977 nxge_free_tx_buf_dma(nxgep
, data
, num_chunks
);
2979 nxgep
->tx_buf_pool_p
->dma_buf_pool_p
[channel
] = 0;
2980 nxgep
->tx_buf_pool_p
->num_chunks
[channel
] = 0;
2982 control
= nxgep
->tx_cntl_pool_p
->dma_buf_pool_p
[channel
];
2983 nxge_free_tx_cntl_dma(nxgep
, control
);
2985 nxgep
->tx_cntl_pool_p
->dma_buf_pool_p
[channel
] = 0;
2987 KMEM_FREE(data
, sizeof (nxge_dma_common_t
) * NXGE_DMA_BLOCK
);
2988 KMEM_FREE(control
, sizeof (nxge_dma_common_t
));
2990 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_free_txb"));
2994 * nxge_alloc_tx_mem_pool
2996 * This function allocates all of the per-port TDC control data structures.
2997 * The per-channel (TDC) data structures are allocated when needed.
3008 nxge_alloc_tx_mem_pool(p_nxge_t nxgep
)
3010 nxge_hw_pt_cfg_t
*p_cfgp
;
3011 nxge_dma_pool_t
*dma_poolp
;
3012 nxge_dma_common_t
**dma_buf_p
;
3013 nxge_dma_pool_t
*dma_cntl_poolp
;
3014 nxge_dma_common_t
**dma_cntl_p
;
3015 uint32_t *num_chunks
; /* per dma */
3018 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
, "==> nxge_alloc_tx_mem_pool"));
3020 p_cfgp
= &nxgep
->pt_config
.hw_config
;
3021 tdc_max
= NXGE_MAX_TDCS
;
3024 * Allocate memory for each transmit DMA channel.
3026 dma_poolp
= (p_nxge_dma_pool_t
)KMEM_ZALLOC(sizeof (nxge_dma_pool_t
),
3028 dma_buf_p
= (p_nxge_dma_common_t
*)KMEM_ZALLOC(
3029 sizeof (p_nxge_dma_common_t
) * tdc_max
, KM_SLEEP
);
3031 dma_cntl_poolp
= (p_nxge_dma_pool_t
)
3032 KMEM_ZALLOC(sizeof (nxge_dma_pool_t
), KM_SLEEP
);
3033 dma_cntl_p
= (p_nxge_dma_common_t
*)KMEM_ZALLOC(
3034 sizeof (p_nxge_dma_common_t
) * tdc_max
, KM_SLEEP
);
3036 if (nxge_tx_ring_size
> TDC_DEFAULT_MAX
) {
3037 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
,
3038 "nxge_alloc_tx_mem_pool: TDC too high %d, "
3039 "set to default %d",
3040 nxge_tx_ring_size
, TDC_DEFAULT_MAX
));
3041 nxge_tx_ring_size
= TDC_DEFAULT_MAX
;
3044 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3046 * N2/NIU has limitation on the descriptor sizes (contiguous
3047 * memory allocation on data buffers to 4M (contig_mem_alloc)
3048 * and little endian for control buffers (must use the ddi/dki mem alloc
3049 * function). The transmit ring is limited to 8K (includes the
3052 if (nxgep
->niu_type
== N2_NIU
) {
3053 if ((nxge_tx_ring_size
> NXGE_NIU_CONTIG_TX_MAX
) ||
3054 (!ISP2(nxge_tx_ring_size
))) {
3055 nxge_tx_ring_size
= NXGE_NIU_CONTIG_TX_MAX
;
3060 nxgep
->nxge_port_tx_ring_size
= nxge_tx_ring_size
;
3062 num_chunks
= (uint32_t *)KMEM_ZALLOC(
3063 sizeof (uint32_t) * tdc_max
, KM_SLEEP
);
3065 dma_poolp
->ndmas
= p_cfgp
->tdc
.owned
;
3066 dma_poolp
->num_chunks
= num_chunks
;
3067 dma_poolp
->dma_buf_pool_p
= dma_buf_p
;
3068 nxgep
->tx_buf_pool_p
= dma_poolp
;
3070 dma_poolp
->buf_allocated
= B_TRUE
;
3072 dma_cntl_poolp
->ndmas
= p_cfgp
->tdc
.owned
;
3073 dma_cntl_poolp
->dma_buf_pool_p
= dma_cntl_p
;
3074 nxgep
->tx_cntl_pool_p
= dma_cntl_poolp
;
3076 dma_cntl_poolp
->buf_allocated
= B_TRUE
;
3079 KMEM_ZALLOC(sizeof (tx_rings_t
), KM_SLEEP
);
3080 nxgep
->tx_rings
->rings
=
3081 KMEM_ZALLOC(sizeof (p_tx_ring_t
) * tdc_max
, KM_SLEEP
);
3082 nxgep
->tx_mbox_areas_p
=
3083 KMEM_ZALLOC(sizeof (tx_mbox_areas_t
), KM_SLEEP
);
3084 nxgep
->tx_mbox_areas_p
->txmbox_areas_p
=
3085 KMEM_ZALLOC(sizeof (p_tx_mbox_t
) * tdc_max
, KM_SLEEP
);
3087 nxgep
->tx_rings
->ndmas
= p_cfgp
->tdc
.owned
;
3089 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
,
3090 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
3091 tdc_max
, dma_poolp
->ndmas
));
3097 nxge_alloc_tx_buf_dma(p_nxge_t nxgep
, uint16_t dma_channel
,
3098 p_nxge_dma_common_t
*dmap
, size_t alloc_size
,
3099 size_t block_size
, uint32_t *num_chunks
)
3101 p_nxge_dma_common_t tx_dmap
;
3102 nxge_status_t status
= NXGE_OK
;
3103 size_t total_alloc_size
;
3104 size_t allocated
= 0;
3105 int i
, size_index
, array_size
;
3107 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_alloc_tx_buf_dma"));
3109 tx_dmap
= (p_nxge_dma_common_t
)
3110 KMEM_ZALLOC(sizeof (nxge_dma_common_t
) * NXGE_DMA_BLOCK
,
3113 total_alloc_size
= alloc_size
;
3116 array_size
= sizeof (alloc_sizes
) / sizeof (size_t);
3117 while ((size_index
< array_size
) &&
3118 (alloc_sizes
[size_index
] < alloc_size
))
3120 if (size_index
>= array_size
) {
3121 size_index
= array_size
- 1;
3124 while ((allocated
< total_alloc_size
) &&
3125 (size_index
>= 0) && (i
< NXGE_DMA_BLOCK
)) {
3127 tx_dmap
[i
].dma_chunk_index
= i
;
3128 tx_dmap
[i
].block_size
= block_size
;
3129 tx_dmap
[i
].alength
= alloc_sizes
[size_index
];
3130 tx_dmap
[i
].orig_alength
= tx_dmap
[i
].alength
;
3131 tx_dmap
[i
].nblocks
= alloc_sizes
[size_index
] / block_size
;
3132 tx_dmap
[i
].dma_channel
= dma_channel
;
3133 tx_dmap
[i
].contig_alloc_type
= B_FALSE
;
3134 tx_dmap
[i
].kmem_alloc_type
= B_FALSE
;
3137 * N2/NIU: data buffers must be contiguous as the driver
3138 * needs to call Hypervisor api to set up
3141 if ((nxgep
->niu_type
== N2_NIU
) && (NXGE_DMA_BLOCK
== 1)) {
3142 tx_dmap
[i
].contig_alloc_type
= B_TRUE
;
3145 status
= nxge_dma_mem_alloc(nxgep
, nxge_force_dma
,
3148 &nxge_dev_buf_dma_acc_attr
,
3149 DDI_DMA_WRITE
| DDI_DMA_STREAMING
,
3150 (p_nxge_dma_common_t
)(&tx_dmap
[i
]));
3151 if (status
!= NXGE_OK
) {
3155 allocated
+= alloc_sizes
[size_index
];
3159 if (allocated
< total_alloc_size
) {
3160 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3161 "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
3162 "allocated 0x%x requested 0x%x",
3164 allocated
, total_alloc_size
));
3165 status
= NXGE_ERROR
;
3166 goto nxge_alloc_tx_mem_fail1
;
3169 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3170 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
3171 "allocated 0x%x requested 0x%x",
3173 allocated
, total_alloc_size
));
3177 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
3178 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
3180 goto nxge_alloc_tx_mem_exit
;
3182 nxge_alloc_tx_mem_fail1
:
3183 KMEM_FREE(tx_dmap
, sizeof (nxge_dma_common_t
) * NXGE_DMA_BLOCK
);
3185 nxge_alloc_tx_mem_exit
:
3186 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
3187 "<== nxge_alloc_tx_buf_dma status 0x%08x", status
));
3194 nxge_free_tx_buf_dma(p_nxge_t nxgep
, p_nxge_dma_common_t dmap
,
3195 uint32_t num_chunks
)
3199 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
, "==> nxge_free_tx_buf_dma"));
3204 for (i
= 0; i
< num_chunks
; i
++) {
3205 nxge_dma_mem_free(dmap
++);
3208 NXGE_DEBUG_MSG((nxgep
, MEM_CTL
, "<== nxge_free_tx_buf_dma"));
3213 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep
, uint16_t dma_channel
,
3214 p_nxge_dma_common_t
*dmap
, size_t size
)
3216 p_nxge_dma_common_t tx_dmap
;
3217 nxge_status_t status
= NXGE_OK
;
3219 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_alloc_tx_cntl_dma"));
3220 tx_dmap
= (p_nxge_dma_common_t
)
3221 KMEM_ZALLOC(sizeof (nxge_dma_common_t
), KM_SLEEP
);
3223 tx_dmap
->contig_alloc_type
= B_FALSE
;
3224 tx_dmap
->kmem_alloc_type
= B_FALSE
;
3226 status
= nxge_dma_mem_alloc(nxgep
, nxge_force_dma
,
3227 &nxge_desc_dma_attr
,
3229 &nxge_dev_desc_dma_acc_attr
,
3230 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
3232 if (status
!= NXGE_OK
) {
3233 goto nxge_alloc_tx_cntl_dma_fail1
;
3237 goto nxge_alloc_tx_cntl_dma_exit
;
3239 nxge_alloc_tx_cntl_dma_fail1
:
3240 KMEM_FREE(tx_dmap
, sizeof (nxge_dma_common_t
));
3242 nxge_alloc_tx_cntl_dma_exit
:
3243 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
3244 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status
));
3251 nxge_free_tx_cntl_dma(p_nxge_t nxgep
, p_nxge_dma_common_t dmap
)
3253 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_free_tx_cntl_dma"));
3258 nxge_dma_mem_free(dmap
);
3260 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_free_tx_cntl_dma"));
3264 * nxge_free_tx_mem_pool
3266 * This function frees all of the per-port TDC control data structures.
3267 * The per-channel (TDC) data structures are freed when the channel
3279 nxge_free_tx_mem_pool(p_nxge_t nxgep
)
3281 int tdc_max
= NXGE_MAX_TDCS
;
3283 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_free_tx_mem_pool"));
3285 if (!nxgep
->tx_buf_pool_p
|| !nxgep
->tx_buf_pool_p
->buf_allocated
) {
3286 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3287 "<== nxge_free_tx_mem_pool "
3288 "(null tx buf pool or buf not allocated"));
3291 if (!nxgep
->tx_cntl_pool_p
|| !nxgep
->tx_cntl_pool_p
->buf_allocated
) {
3292 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3293 "<== nxge_free_tx_mem_pool "
3294 "(null tx cntl buf pool or cntl buf not allocated"));
3298 /* 1. Free the mailboxes. */
3299 KMEM_FREE(nxgep
->tx_mbox_areas_p
->txmbox_areas_p
,
3300 sizeof (p_tx_mbox_t
) * tdc_max
);
3301 KMEM_FREE(nxgep
->tx_mbox_areas_p
, sizeof (tx_mbox_areas_t
));
3303 nxgep
->tx_mbox_areas_p
= 0;
3305 /* 2. Free the transmit ring arrays. */
3306 KMEM_FREE(nxgep
->tx_rings
->rings
,
3307 sizeof (p_tx_ring_t
) * tdc_max
);
3308 KMEM_FREE(nxgep
->tx_rings
, sizeof (tx_rings_t
));
3310 nxgep
->tx_rings
= 0;
3312 /* 3. Free the completion ring data structures. */
3313 KMEM_FREE(nxgep
->tx_cntl_pool_p
->dma_buf_pool_p
,
3314 sizeof (p_nxge_dma_common_t
) * tdc_max
);
3315 KMEM_FREE(nxgep
->tx_cntl_pool_p
, sizeof (nxge_dma_pool_t
));
3317 nxgep
->tx_cntl_pool_p
= 0;
3319 /* 4. Free the data ring data structures. */
3320 KMEM_FREE(nxgep
->tx_buf_pool_p
->num_chunks
,
3321 sizeof (uint32_t) * tdc_max
);
3322 KMEM_FREE(nxgep
->tx_buf_pool_p
->dma_buf_pool_p
,
3323 sizeof (p_nxge_dma_common_t
) * tdc_max
);
3324 KMEM_FREE(nxgep
->tx_buf_pool_p
, sizeof (nxge_dma_pool_t
));
3326 nxgep
->tx_buf_pool_p
= 0;
3328 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_free_tx_mem_pool"));
3332 static nxge_status_t
3333 nxge_dma_mem_alloc(p_nxge_t nxgep
, dma_method_t method
,
3334 struct ddi_dma_attr
*dma_attrp
,
3335 size_t length
, ddi_device_acc_attr_t
*acc_attr_p
, uint_t xfer_flags
,
3336 p_nxge_dma_common_t dma_p
)
3339 int ddi_status
= DDI_SUCCESS
;
3340 boolean_t contig_alloc_type
;
3341 boolean_t kmem_alloc_type
;
3343 contig_alloc_type
= dma_p
->contig_alloc_type
;
3345 if (contig_alloc_type
&& (nxgep
->niu_type
!= N2_NIU
)) {
3347 * contig_alloc_type for contiguous memory only allowed
3350 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3351 "nxge_dma_mem_alloc: alloc type not allowed (%d)",
3352 dma_p
->contig_alloc_type
));
3353 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3356 dma_p
->dma_handle
= NULL
;
3357 dma_p
->acc_handle
= NULL
;
3358 dma_p
->kaddrp
= dma_p
->last_kaddrp
= NULL
;
3359 dma_p
->first_ioaddr_pp
= dma_p
->last_ioaddr_pp
= NULL
;
3360 ddi_status
= ddi_dma_alloc_handle(nxgep
->dip
, dma_attrp
,
3361 DDI_DMA_DONTWAIT
, NULL
, &dma_p
->dma_handle
);
3362 if (ddi_status
!= DDI_SUCCESS
) {
3363 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3364 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
3365 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3368 kmem_alloc_type
= dma_p
->kmem_alloc_type
;
3370 switch (contig_alloc_type
) {
3372 switch (kmem_alloc_type
) {
3374 ddi_status
= ddi_dma_mem_alloc(dma_p
->dma_handle
,
3378 DDI_DMA_DONTWAIT
, 0, &kaddrp
, &dma_p
->alength
,
3379 &dma_p
->acc_handle
);
3380 if (ddi_status
!= DDI_SUCCESS
) {
3381 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3382 "nxge_dma_mem_alloc: "
3383 "ddi_dma_mem_alloc failed"));
3384 ddi_dma_free_handle(&dma_p
->dma_handle
);
3385 dma_p
->dma_handle
= NULL
;
3386 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3388 if (dma_p
->alength
< length
) {
3389 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3390 "nxge_dma_mem_alloc:di_dma_mem_alloc "
3392 ddi_dma_mem_free(&dma_p
->acc_handle
);
3393 ddi_dma_free_handle(&dma_p
->dma_handle
);
3394 dma_p
->acc_handle
= NULL
;
3395 dma_p
->dma_handle
= NULL
;
3396 return (NXGE_ERROR
);
3399 ddi_status
= ddi_dma_addr_bind_handle(dma_p
->dma_handle
,
3401 kaddrp
, dma_p
->alength
, xfer_flags
,
3403 0, &dma_p
->dma_cookie
, &dma_p
->ncookies
);
3404 if (ddi_status
!= DDI_DMA_MAPPED
) {
3405 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3406 "nxge_dma_mem_alloc: ddi_dma_addr_bind "
3408 "(staus 0x%x ncookies %d.)", ddi_status
,
3410 if (dma_p
->acc_handle
) {
3411 ddi_dma_mem_free(&dma_p
->acc_handle
);
3412 dma_p
->acc_handle
= NULL
;
3414 ddi_dma_free_handle(&dma_p
->dma_handle
);
3415 dma_p
->dma_handle
= NULL
;
3416 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3419 if (dma_p
->ncookies
!= 1) {
3420 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
3421 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3423 "(staus 0x%x ncookies %d.)", ddi_status
,
3425 (void) ddi_dma_unbind_handle(dma_p
->dma_handle
);
3426 if (dma_p
->acc_handle
) {
3427 ddi_dma_mem_free(&dma_p
->acc_handle
);
3428 dma_p
->acc_handle
= NULL
;
3430 ddi_dma_free_handle(&dma_p
->dma_handle
);
3431 dma_p
->dma_handle
= NULL
;
3432 dma_p
->acc_handle
= NULL
;
3433 return (NXGE_ERROR
);
3438 kaddrp
= KMEM_ALLOC(length
, KM_NOSLEEP
);
3439 if (kaddrp
== NULL
) {
3440 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3441 "nxge_dma_mem_alloc:ddi_dma_mem_alloc "
3442 "kmem alloc failed"));
3443 return (NXGE_ERROR
);
3446 dma_p
->alength
= length
;
3447 ddi_status
= ddi_dma_addr_bind_handle(dma_p
->dma_handle
,
3448 NULL
, kaddrp
, dma_p
->alength
, xfer_flags
,
3449 DDI_DMA_DONTWAIT
, 0,
3450 &dma_p
->dma_cookie
, &dma_p
->ncookies
);
3451 if (ddi_status
!= DDI_DMA_MAPPED
) {
3452 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3453 "nxge_dma_mem_alloc:ddi_dma_addr_bind: "
3454 "(kmem_alloc) failed kaddrp $%p length %d "
3455 "(staus 0x%x (%d) ncookies %d.)",
3457 ddi_status
, ddi_status
, dma_p
->ncookies
));
3458 KMEM_FREE(kaddrp
, length
);
3459 dma_p
->acc_handle
= NULL
;
3460 ddi_dma_free_handle(&dma_p
->dma_handle
);
3461 dma_p
->dma_handle
= NULL
;
3462 dma_p
->kaddrp
= NULL
;
3463 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3466 if (dma_p
->ncookies
!= 1) {
3467 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
3468 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3469 "(kmem_alloc) > 1 cookie"
3470 "(staus 0x%x ncookies %d.)", ddi_status
,
3472 (void) ddi_dma_unbind_handle(dma_p
->dma_handle
);
3473 KMEM_FREE(kaddrp
, length
);
3474 ddi_dma_free_handle(&dma_p
->dma_handle
);
3475 dma_p
->dma_handle
= NULL
;
3476 dma_p
->acc_handle
= NULL
;
3477 dma_p
->kaddrp
= NULL
;
3478 return (NXGE_ERROR
);
3481 dma_p
->kaddrp
= kaddrp
;
3483 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
3484 "nxge_dma_mem_alloc: kmem_alloc dmap $%p "
3485 "kaddr $%p alength %d",
3493 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3495 kaddrp
= (caddr_t
)contig_mem_alloc(length
);
3496 if (kaddrp
== NULL
) {
3497 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3498 "nxge_dma_mem_alloc:contig_mem_alloc failed."));
3499 ddi_dma_free_handle(&dma_p
->dma_handle
);
3500 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3503 dma_p
->alength
= length
;
3504 ddi_status
= ddi_dma_addr_bind_handle(dma_p
->dma_handle
, NULL
,
3505 kaddrp
, dma_p
->alength
, xfer_flags
, DDI_DMA_DONTWAIT
, 0,
3506 &dma_p
->dma_cookie
, &dma_p
->ncookies
);
3507 if (ddi_status
!= DDI_DMA_MAPPED
) {
3508 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3509 "nxge_dma_mem_alloc:di_dma_addr_bind failed "
3510 "(status 0x%x ncookies %d.)", ddi_status
,
3513 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
3514 "==> nxge_dma_mem_alloc: (not mapped)"
3515 "length %lu (0x%x) "
3516 "free contig kaddrp $%p "
3523 contig_mem_free((void *)kaddrp
, length
);
3524 ddi_dma_free_handle(&dma_p
->dma_handle
);
3526 dma_p
->dma_handle
= NULL
;
3527 dma_p
->acc_handle
= NULL
;
3528 dma_p
->alength
= NULL
;
3529 dma_p
->kaddrp
= NULL
;
3531 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3534 if (dma_p
->ncookies
!= 1 ||
3535 (dma_p
->dma_cookie
.dmac_laddress
== NULL
)) {
3536 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3537 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
3539 "dmac_laddress is NULL $%p size %d "
3540 " (status 0x%x ncookies %d.)",
3542 dma_p
->dma_cookie
.dmac_laddress
,
3543 dma_p
->dma_cookie
.dmac_size
,
3546 contig_mem_free((void *)kaddrp
, length
);
3547 (void) ddi_dma_unbind_handle(dma_p
->dma_handle
);
3548 ddi_dma_free_handle(&dma_p
->dma_handle
);
3551 dma_p
->dma_handle
= NULL
;
3552 dma_p
->acc_handle
= NULL
;
3553 dma_p
->kaddrp
= NULL
;
3555 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3561 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3562 "nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
3563 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
3567 dma_p
->kaddrp
= kaddrp
;
3568 dma_p
->last_kaddrp
= (unsigned char *)kaddrp
+
3569 dma_p
->alength
- RXBUF_64B_ALIGNED
;
3572 (unsigned char *)(uint32_t)dma_p
->dma_cookie
.dmac_laddress
;
3574 dma_p
->ioaddr_pp
= (unsigned char *)dma_p
->dma_cookie
.dmac_laddress
;
3576 dma_p
->last_ioaddr_pp
=
3578 (unsigned char *)(uint32_t)dma_p
->dma_cookie
.dmac_laddress
+
3580 (unsigned char *)dma_p
->dma_cookie
.dmac_laddress
+
3582 dma_p
->alength
- RXBUF_64B_ALIGNED
;
3584 NPI_DMA_ACC_HANDLE_SET(dma_p
, dma_p
->acc_handle
);
3586 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3587 dma_p
->orig_ioaddr_pp
=
3588 (unsigned char *)dma_p
->dma_cookie
.dmac_laddress
;
3589 dma_p
->orig_alength
= length
;
3590 dma_p
->orig_kaddrp
= kaddrp
;
3591 dma_p
->orig_vatopa
= (uint64_t)va_to_pa(kaddrp
);
3594 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_dma_mem_alloc: "
3595 "dma buffer allocated: dma_p $%p "
3596 "return dmac_ladress from cookie $%p cookie dmac_size %d "
3597 "dma_p->ioaddr_p $%p "
3598 "dma_p->orig_ioaddr_p $%p "
3600 "alength %d (0x%x) "
3604 dma_p
->dma_cookie
.dmac_laddress
, dma_p
->dma_cookie
.dmac_size
,
3606 dma_p
->orig_ioaddr_pp
,
3608 dma_p
->alength
, dma_p
->alength
,
3616 nxge_dma_mem_free(p_nxge_dma_common_t dma_p
)
3618 if (dma_p
->dma_handle
!= NULL
) {
3619 if (dma_p
->ncookies
) {
3620 (void) ddi_dma_unbind_handle(dma_p
->dma_handle
);
3621 dma_p
->ncookies
= 0;
3623 ddi_dma_free_handle(&dma_p
->dma_handle
);
3624 dma_p
->dma_handle
= NULL
;
3627 if (dma_p
->acc_handle
!= NULL
) {
3628 ddi_dma_mem_free(&dma_p
->acc_handle
);
3629 dma_p
->acc_handle
= NULL
;
3630 NPI_DMA_ACC_HANDLE_SET(dma_p
, NULL
);
3633 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3634 if (dma_p
->contig_alloc_type
&&
3635 dma_p
->orig_kaddrp
&& dma_p
->orig_alength
) {
3636 NXGE_DEBUG_MSG((NULL
, DMA_CTL
, "nxge_dma_mem_free: "
3637 "kaddrp $%p (orig_kaddrp $%p)"
3640 "alength 0x%x (%d)",
3643 dma_p
->contig_alloc_type
,
3644 dma_p
->orig_alength
,
3645 dma_p
->alength
, dma_p
->alength
));
3647 contig_mem_free(dma_p
->orig_kaddrp
, dma_p
->orig_alength
);
3648 dma_p
->orig_alength
= NULL
;
3649 dma_p
->orig_kaddrp
= NULL
;
3650 dma_p
->contig_alloc_type
= B_FALSE
;
3653 dma_p
->kaddrp
= NULL
;
3654 dma_p
->alength
= NULL
;
3658 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p
)
3663 NXGE_DEBUG_MSG((NULL
, DMA_CTL
, "==> nxge_dma_free_rx_data_buf"));
3665 if (dma_p
->dma_handle
!= NULL
) {
3666 if (dma_p
->ncookies
) {
3667 (void) ddi_dma_unbind_handle(dma_p
->dma_handle
);
3668 dma_p
->ncookies
= 0;
3670 ddi_dma_free_handle(&dma_p
->dma_handle
);
3671 dma_p
->dma_handle
= NULL
;
3674 if (dma_p
->acc_handle
!= NULL
) {
3675 ddi_dma_mem_free(&dma_p
->acc_handle
);
3676 dma_p
->acc_handle
= NULL
;
3677 NPI_DMA_ACC_HANDLE_SET(dma_p
, NULL
);
3680 NXGE_DEBUG_MSG((NULL
, DMA_CTL
,
3681 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
3683 dma_p
->buf_alloc_state
));
3685 if (!(dma_p
->buf_alloc_state
& BUF_ALLOCATED_WAIT_FREE
)) {
3686 NXGE_DEBUG_MSG((NULL
, DMA_CTL
,
3687 "<== nxge_dma_free_rx_data_buf: "
3688 "outstanding data buffers"));
3692 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3693 if (dma_p
->contig_alloc_type
&&
3694 dma_p
->orig_kaddrp
&& dma_p
->orig_alength
) {
3695 NXGE_DEBUG_MSG((NULL
, DMA_CTL
, "nxge_dma_free_rx_data_buf: "
3696 "kaddrp $%p (orig_kaddrp $%p)"
3699 "alength 0x%x (%d)",
3702 dma_p
->contig_alloc_type
,
3703 dma_p
->orig_alength
,
3704 dma_p
->alength
, dma_p
->alength
));
3706 kaddr
= (uint64_t)dma_p
->orig_kaddrp
;
3707 buf_size
= dma_p
->orig_alength
;
3708 nxge_free_buf(CONTIG_MEM_ALLOC
, kaddr
, buf_size
);
3709 dma_p
->orig_alength
= NULL
;
3710 dma_p
->orig_kaddrp
= NULL
;
3711 dma_p
->contig_alloc_type
= B_FALSE
;
3712 dma_p
->kaddrp
= NULL
;
3713 dma_p
->alength
= NULL
;
3718 if (dma_p
->kmem_alloc_type
) {
3719 NXGE_DEBUG_MSG((NULL
, DMA_CTL
,
3720 "nxge_dma_free_rx_data_buf: free kmem "
3721 "kaddrp $%p (orig_kaddrp $%p)"
3724 "alength 0x%x (%d)",
3727 dma_p
->kmem_alloc_type
,
3728 dma_p
->orig_alength
,
3729 dma_p
->alength
, dma_p
->alength
));
3731 kaddr
= (uint64_t)(uint32_t)dma_p
->kaddrp
;
3733 kaddr
= (uint64_t)dma_p
->kaddrp
;
3735 buf_size
= dma_p
->orig_alength
;
3736 NXGE_DEBUG_MSG((NULL
, DMA_CTL
,
3737 "nxge_dma_free_rx_data_buf: free dmap $%p "
3738 "kaddr $%p buf_size %d",
3741 nxge_free_buf(KMEM_ALLOC
, kaddr
, buf_size
);
3743 dma_p
->orig_alength
= 0;
3744 dma_p
->kaddrp
= NULL
;
3745 dma_p
->kmem_alloc_type
= B_FALSE
;
3748 NXGE_DEBUG_MSG((NULL
, DMA_CTL
, "<== nxge_dma_free_rx_data_buf"));
3752 * nxge_m_start() -- start transmitting and receiving.
3754 * This function is called by the MAC layer when the first
3755 * stream is open to prepare the hardware ready for sending
3756 * and transmitting packets.
3759 nxge_m_start(void *arg
)
3761 p_nxge_t nxgep
= (p_nxge_t
)arg
;
3763 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "==> nxge_m_start"));
3766 * Are we already started?
3768 if (nxgep
->nxge_mac_state
== NXGE_MAC_STARTED
) {
3772 if (nxge_peu_reset_enable
&& !nxgep
->nxge_link_poll_timerid
) {
3773 (void) nxge_link_monitor(nxgep
, LINK_MONITOR_START
);
3777 * Make sure RX MAC is disabled while we initialize.
3779 if (!isLDOMguest(nxgep
)) {
3780 (void) nxge_rx_mac_disable(nxgep
);
3784 * Grab the global lock.
3786 MUTEX_ENTER(nxgep
->genlock
);
3789 * Initialize the driver and hardware.
3791 if (nxge_init(nxgep
) != NXGE_OK
) {
3792 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3793 "<== nxge_m_start: initialization failed"));
3794 MUTEX_EXIT(nxgep
->genlock
);
3799 * Start timer to check the system error and tx hangs
3801 if (!isLDOMguest(nxgep
))
3802 nxgep
->nxge_timerid
= nxge_start_timer(nxgep
,
3803 nxge_check_hw_state
, NXGE_CHECK_TIMER
);
3806 nxge_hio_start_timer(nxgep
);
3809 nxgep
->link_notify
= B_TRUE
;
3810 nxgep
->link_check_count
= 0;
3811 nxgep
->nxge_mac_state
= NXGE_MAC_STARTED
;
3814 * Let the global lock go, since we are intialized.
3816 MUTEX_EXIT(nxgep
->genlock
);
3819 * Let the MAC start receiving packets, now that
3820 * we are initialized.
3822 if (!isLDOMguest(nxgep
)) {
3823 if (nxge_rx_mac_enable(nxgep
) != NXGE_OK
) {
3824 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3825 "<== nxge_m_start: enable of RX mac failed"));
3830 * Enable hardware interrupts.
3832 nxge_intr_hw_enable(nxgep
);
3837 * In guest domain we enable RDCs and their interrupts as
3840 if (nxge_hio_rdc_enable(nxgep
) != NXGE_OK
) {
3841 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3842 "<== nxge_m_start: enable of RDCs failed"));
3846 if (nxge_hio_rdc_intr_arm(nxgep
, B_TRUE
) != NXGE_OK
) {
3847 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3848 "<== nxge_m_start: intrs enable for RDCs failed"));
3853 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "<== nxge_m_start"));
3858 nxge_check_groups_stopped(p_nxge_t nxgep
)
3862 for (i
= 0; i
< NXGE_MAX_RDC_GROUPS
; i
++) {
3863 if (nxgep
->rx_hio_groups
[i
].started
)
3871 * nxge_m_stop(): stop transmitting and receiving.
3874 nxge_m_stop(void *arg
)
3876 p_nxge_t nxgep
= (p_nxge_t
)arg
;
3877 boolean_t groups_stopped
;
3879 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "==> nxge_m_stop"));
3882 * Are the groups stopped?
3884 groups_stopped
= nxge_check_groups_stopped(nxgep
);
3885 ASSERT(groups_stopped
== B_TRUE
);
3886 if (!groups_stopped
) {
3887 cmn_err(CE_WARN
, "nxge(%d): groups are not stopped!\n",
3892 if (!isLDOMguest(nxgep
)) {
3894 * Disable the RX mac.
3896 (void) nxge_rx_mac_disable(nxgep
);
3899 * Wait for the IPP to drain.
3901 (void) nxge_ipp_drain(nxgep
);
3904 * Disable hardware interrupts.
3906 nxge_intr_hw_disable(nxgep
);
3910 (void) nxge_hio_rdc_intr_arm(nxgep
, B_FALSE
);
3915 * Grab the global lock.
3917 MUTEX_ENTER(nxgep
->genlock
);
3919 nxgep
->nxge_mac_state
= NXGE_MAC_STOPPING
;
3920 if (nxgep
->nxge_timerid
) {
3921 nxge_stop_timer(nxgep
, nxgep
->nxge_timerid
);
3922 nxgep
->nxge_timerid
= 0;
3930 nxgep
->nxge_mac_state
= NXGE_MAC_STOPPED
;
3933 * Let go of the global lock.
3935 MUTEX_EXIT(nxgep
->genlock
);
3936 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "<== nxge_m_stop"));
3940 nxge_m_multicst(void *arg
, boolean_t add
, const uint8_t *mca
)
3942 p_nxge_t nxgep
= (p_nxge_t
)arg
;
3943 struct ether_addr addrp
;
3945 NXGE_DEBUG_MSG((nxgep
, MAC_CTL
,
3946 "==> nxge_m_multicst: add %d", add
));
3948 bcopy(mca
, (uint8_t *)&addrp
, ETHERADDRL
);
3950 if (nxge_add_mcast_addr(nxgep
, &addrp
)) {
3951 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3952 "<== nxge_m_multicst: add multicast failed"));
3956 if (nxge_del_mcast_addr(nxgep
, &addrp
)) {
3957 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3958 "<== nxge_m_multicst: del multicast failed"));
3963 NXGE_DEBUG_MSG((nxgep
, MAC_CTL
, "<== nxge_m_multicst"));
3969 nxge_m_promisc(void *arg
, boolean_t on
)
3971 p_nxge_t nxgep
= (p_nxge_t
)arg
;
3973 NXGE_DEBUG_MSG((nxgep
, MAC_CTL
,
3974 "==> nxge_m_promisc: on %d", on
));
3976 if (nxge_set_promisc(nxgep
, on
)) {
3977 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3978 "<== nxge_m_promisc: set promisc failed"));
3982 NXGE_DEBUG_MSG((nxgep
, MAC_CTL
,
3983 "<== nxge_m_promisc: on %d", on
));
3989 nxge_m_ioctl(void *arg
, queue_t
*wq
, mblk_t
*mp
)
3991 p_nxge_t nxgep
= (p_nxge_t
)arg
;
3992 struct iocblk
*iocp
;
3993 boolean_t need_privilege
;
3997 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "==> nxge_m_ioctl"));
3999 iocp
= (struct iocblk
*)mp
->b_rptr
;
4000 iocp
->ioc_error
= 0;
4001 need_privilege
= B_TRUE
;
4002 cmd
= iocp
->ioc_cmd
;
4003 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "==> nxge_m_ioctl: cmd 0x%08x", cmd
));
4006 miocnak(wq
, mp
, 0, EINVAL
);
4007 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "<== nxge_m_ioctl: invalid"));
4010 case LB_GET_INFO_SIZE
:
4013 need_privilege
= B_FALSE
;
4023 case NXGE_GET_TX_RING_SZ
:
4024 case NXGE_GET_TX_DESC
:
4025 case NXGE_TX_SIDE_RESET
:
4026 case NXGE_RX_SIDE_RESET
:
4027 case NXGE_GLOBAL_RESET
:
4028 case NXGE_RESET_MAC
:
4029 case NXGE_TX_REGS_DUMP
:
4030 case NXGE_RX_REGS_DUMP
:
4031 case NXGE_INT_REGS_DUMP
:
4032 case NXGE_VIR_INT_REGS_DUMP
:
4040 need_privilege
= B_FALSE
;
4042 case NXGE_INJECT_ERR
:
4043 cmn_err(CE_NOTE
, "!nxge_m_ioctl: Inject error\n");
4044 nxge_err_inject(nxgep
, wq
, mp
);
4048 if (need_privilege
) {
4049 err
= secpolicy_net_config(iocp
->ioc_cr
, B_FALSE
);
4051 miocnak(wq
, mp
, 0, err
);
4052 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4053 "<== nxge_m_ioctl: no priv"));
4062 case LB_GET_INFO_SIZE
:
4064 nxge_loopback_ioctl(nxgep
, wq
, mp
, iocp
);
4073 case NXGE_GET_TX_RING_SZ
:
4074 case NXGE_GET_TX_DESC
:
4075 case NXGE_TX_SIDE_RESET
:
4076 case NXGE_RX_SIDE_RESET
:
4077 case NXGE_GLOBAL_RESET
:
4078 case NXGE_RESET_MAC
:
4079 case NXGE_TX_REGS_DUMP
:
4080 case NXGE_RX_REGS_DUMP
:
4081 case NXGE_INT_REGS_DUMP
:
4082 case NXGE_VIR_INT_REGS_DUMP
:
4083 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4084 "==> nxge_m_ioctl: cmd 0x%x", cmd
));
4085 nxge_hw_ioctl(nxgep
, wq
, mp
, iocp
);
4088 if (nxge_rxclass_ioctl(nxgep
, wq
, mp
->b_cont
) < 0)
4089 miocnak(wq
, mp
, 0, EINVAL
);
4091 miocack(wq
, mp
, sizeof (rx_class_cfg_t
), 0);
4095 if (nxge_rxhash_ioctl(nxgep
, wq
, mp
->b_cont
) < 0)
4096 miocnak(wq
, mp
, 0, EINVAL
);
4098 miocack(wq
, mp
, sizeof (cfg_cmd_t
), 0);
4102 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "<== nxge_m_ioctl"));
4105 extern void nxge_rx_hw_blank(void *arg
, time_t ticks
, uint_t count
);
4108 nxge_mmac_kstat_update(p_nxge_t nxgep
, int slot
, boolean_t factory
)
4110 p_nxge_mmac_stats_t mmac_stats
;
4112 nxge_mmac_t
*mmac_info
;
4114 mmac_info
= &nxgep
->nxge_mmac_info
;
4116 mmac_stats
= &nxgep
->statsp
->mmac_stats
;
4117 mmac_stats
->mmac_max_cnt
= mmac_info
->num_mmac
;
4118 mmac_stats
->mmac_avail_cnt
= mmac_info
->naddrfree
;
4120 for (i
= 0; i
< ETHERADDRL
; i
++) {
4122 mmac_stats
->mmac_avail_pool
[slot
-1].ether_addr_octet
[i
]
4123 = mmac_info
->factory_mac_pool
[slot
][
4124 (ETHERADDRL
-1) - i
];
4126 mmac_stats
->mmac_avail_pool
[slot
-1].ether_addr_octet
[i
]
4127 = mmac_info
->mac_pool
[slot
].addr
[
4128 (ETHERADDRL
- 1) - i
];
4134 * nxge_altmac_set() -- Set an alternate MAC address
4137 nxge_altmac_set(p_nxge_t nxgep
, uint8_t *maddr
, int slot
,
4138 int rdctbl
, boolean_t usetbl
)
4142 npi_mac_addr_t altmac
;
4144 p_nxge_class_pt_cfg_t clscfgp
;
4147 altmac
.w2
= ((uint16_t)maddr
[0] << 8) | ((uint16_t)maddr
[1] & 0x0ff);
4148 altmac
.w1
= ((uint16_t)maddr
[2] << 8) | ((uint16_t)maddr
[3] & 0x0ff);
4149 altmac
.w0
= ((uint16_t)maddr
[4] << 8) | ((uint16_t)maddr
[5] & 0x0ff);
4151 portn
= nxgep
->mac
.portnum
;
4152 addrn
= (uint8_t)slot
- 1;
4154 if (npi_mac_altaddr_entry(nxgep
->npi_handle
, OP_SET
,
4155 nxgep
->function_num
, addrn
, &altmac
) != NPI_SUCCESS
)
4159 * Set the rdc table number for the host info entry
4160 * for this mac address slot.
4162 clscfgp
= (p_nxge_class_pt_cfg_t
)&nxgep
->class_config
;
4165 mac_rdc
.bits
.w0
.rdc_tbl_num
= rdctbl
;
4167 mac_rdc
.bits
.w0
.rdc_tbl_num
=
4168 clscfgp
->mac_host_info
[addrn
].rdctbl
;
4169 mac_rdc
.bits
.w0
.mac_pref
= clscfgp
->mac_host_info
[addrn
].mpr_npr
;
4171 if (npi_mac_hostinfo_entry(nxgep
->npi_handle
, OP_SET
,
4172 nxgep
->function_num
, addrn
, &mac_rdc
) != NPI_SUCCESS
) {
4177 * Enable comparison with the alternate MAC address.
4178 * While the first alternate addr is enabled by bit 1 of register
4179 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
4180 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
4181 * accordingly before calling npi_mac_altaddr_entry.
4183 if (portn
== XMAC_PORT_0
|| portn
== XMAC_PORT_1
)
4184 addrn
= (uint8_t)slot
- 1;
4186 addrn
= (uint8_t)slot
;
4188 if (npi_mac_altaddr_enable(nxgep
->npi_handle
,
4189 nxgep
->function_num
, addrn
) != NPI_SUCCESS
) {
4197 * nxeg_m_mmac_add_g() - find an unused address slot, set the address
4198 * value to the one specified, enable the port to start filtering on
4199 * the new MAC address. Returns 0 on success.
4202 nxge_m_mmac_add_g(void *arg
, const uint8_t *maddr
, int rdctbl
,
4205 p_nxge_t nxgep
= arg
;
4207 nxge_mmac_t
*mmac_info
;
4209 nxge_status_t status
;
4211 mutex_enter(nxgep
->genlock
);
4214 * Make sure that nxge is initialized, if _start() has
4217 if (!(nxgep
->drv_state
& STATE_HW_INITIALIZED
)) {
4218 status
= nxge_init(nxgep
);
4219 if (status
!= NXGE_OK
) {
4220 mutex_exit(nxgep
->genlock
);
4225 mmac_info
= &nxgep
->nxge_mmac_info
;
4226 if (mmac_info
->naddrfree
== 0) {
4227 mutex_exit(nxgep
->genlock
);
4232 * Search for the first available slot. Because naddrfree
4233 * is not zero, we are guaranteed to find one.
4234 * Each of the first two ports of Neptune has 16 alternate
4235 * MAC slots but only the first 7 (of 15) slots have assigned factory
4236 * MAC addresses. We first search among the slots without bundled
4237 * factory MACs. If we fail to find one in that range, then we
4238 * search the slots with bundled factory MACs. A factory MAC
4239 * will be wasted while the slot is used with a user MAC address.
4240 * But the slot could be used by factory MAC again after calling
4241 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
4243 for (slot
= 0; slot
<= mmac_info
->num_mmac
; slot
++) {
4244 if (!(mmac_info
->mac_pool
[slot
].flags
& MMAC_SLOT_USED
))
4248 ASSERT(slot
<= mmac_info
->num_mmac
);
4250 if ((err
= nxge_altmac_set(nxgep
, (uint8_t *)maddr
, slot
, rdctbl
,
4252 mutex_exit(nxgep
->genlock
);
4256 bcopy(maddr
, mmac_info
->mac_pool
[slot
].addr
, ETHERADDRL
);
4257 mmac_info
->mac_pool
[slot
].flags
|= MMAC_SLOT_USED
;
4258 mmac_info
->mac_pool
[slot
].flags
&= ~MMAC_VENDOR_ADDR
;
4259 mmac_info
->naddrfree
--;
4260 nxge_mmac_kstat_update(nxgep
, slot
, B_FALSE
);
4262 mutex_exit(nxgep
->genlock
);
4267 * Remove the specified mac address and update the HW not to filter
4268 * the mac address anymore.
4271 nxge_m_mmac_remove(void *arg
, int slot
)
4273 p_nxge_t nxgep
= arg
;
4274 nxge_mmac_t
*mmac_info
;
4278 nxge_status_t status
;
4280 mutex_enter(nxgep
->genlock
);
4283 * Make sure that nxge is initialized, if _start() has
4286 if (!(nxgep
->drv_state
& STATE_HW_INITIALIZED
)) {
4287 status
= nxge_init(nxgep
);
4288 if (status
!= NXGE_OK
) {
4289 mutex_exit(nxgep
->genlock
);
4294 mmac_info
= &nxgep
->nxge_mmac_info
;
4295 if (slot
< 1 || slot
> mmac_info
->num_mmac
) {
4296 mutex_exit(nxgep
->genlock
);
4300 portn
= nxgep
->mac
.portnum
;
4301 if (portn
== XMAC_PORT_0
|| portn
== XMAC_PORT_1
)
4302 addrn
= (uint8_t)slot
- 1;
4304 addrn
= (uint8_t)slot
;
4306 if (mmac_info
->mac_pool
[slot
].flags
& MMAC_SLOT_USED
) {
4307 if (npi_mac_altaddr_disable(nxgep
->npi_handle
, portn
, addrn
)
4309 mmac_info
->naddrfree
++;
4310 mmac_info
->mac_pool
[slot
].flags
&= ~MMAC_SLOT_USED
;
4312 * Regardless if the MAC we just stopped filtering
4313 * is a user addr or a facory addr, we must set
4314 * the MMAC_VENDOR_ADDR flag if this slot has an
4315 * associated factory MAC to indicate that a factory
4318 if (slot
<= mmac_info
->num_factory_mmac
) {
4319 mmac_info
->mac_pool
[slot
].flags
4320 |= MMAC_VENDOR_ADDR
;
4323 * Clear mac_pool[slot].addr so that kstat shows 0
4324 * alternate MAC address if the slot is not used.
4325 * (But nxge_m_mmac_get returns the factory MAC even
4326 * when the slot is not used!)
4328 bzero(mmac_info
->mac_pool
[slot
].addr
, ETHERADDRL
);
4329 nxge_mmac_kstat_update(nxgep
, slot
, B_FALSE
);
4337 mutex_exit(nxgep
->genlock
);
4342 * The callback to query all the factory addresses. naddr must be the same as
4343 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and
4344 * mcm_addr is the space allocated for keep all the addresses, whose size is
4345 * naddr * MAXMACADDRLEN.
4348 nxge_m_getfactaddr(void *arg
, uint_t naddr
, uint8_t *addr
)
4350 nxge_t
*nxgep
= arg
;
4351 nxge_mmac_t
*mmac_info
;
4354 mutex_enter(nxgep
->genlock
);
4356 mmac_info
= &nxgep
->nxge_mmac_info
;
4357 ASSERT(naddr
== mmac_info
->num_factory_mmac
);
4359 for (i
= 0; i
< naddr
; i
++) {
4360 bcopy(mmac_info
->factory_mac_pool
[i
+ 1],
4361 addr
+ i
* MAXMACADDRLEN
, ETHERADDRL
);
4364 mutex_exit(nxgep
->genlock
);
4369 nxge_m_getcapab(void *arg
, mac_capab_t cap
, void *cap_data
)
4371 nxge_t
*nxgep
= arg
;
4372 uint32_t *txflags
= cap_data
;
4375 case MAC_CAPAB_HCKSUM
:
4376 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4377 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload
));
4378 if (nxge_cksum_offload
<= 1) {
4379 *txflags
= HCKSUM_INET_PARTIAL
;
4383 case MAC_CAPAB_MULTIFACTADDR
: {
4384 mac_capab_multifactaddr_t
*mfacp
= cap_data
;
4386 if (!isLDOMguest(nxgep
)) {
4387 mutex_enter(nxgep
->genlock
);
4389 nxgep
->nxge_mmac_info
.num_factory_mmac
;
4390 mfacp
->mcm_getaddr
= nxge_m_getfactaddr
;
4391 mutex_exit(nxgep
->genlock
);
4396 case MAC_CAPAB_LSO
: {
4397 mac_capab_lso_t
*cap_lso
= cap_data
;
4399 if (nxgep
->soft_lso_enable
) {
4400 if (nxge_cksum_offload
<= 1) {
4401 cap_lso
->lso_flags
= LSO_TX_BASIC_TCP_IPV4
;
4402 if (nxge_lso_max
> NXGE_LSO_MAXLEN
) {
4403 nxge_lso_max
= NXGE_LSO_MAXLEN
;
4405 cap_lso
->lso_basic_tcp_ipv4
.lso_max
=
4414 case MAC_CAPAB_RINGS
: {
4415 mac_capab_rings_t
*cap_rings
= cap_data
;
4416 p_nxge_hw_pt_cfg_t p_cfgp
= &nxgep
->pt_config
.hw_config
;
4418 mutex_enter(nxgep
->genlock
);
4419 if (cap_rings
->mr_type
== MAC_RING_TYPE_RX
) {
4420 if (isLDOMguest(nxgep
)) {
4421 cap_rings
->mr_group_type
=
4422 MAC_GROUP_TYPE_STATIC
;
4423 cap_rings
->mr_rnum
=
4424 NXGE_HIO_SHARE_MAX_CHANNELS
;
4425 cap_rings
->mr_rget
= nxge_fill_ring
;
4426 cap_rings
->mr_gnum
= 1;
4427 cap_rings
->mr_gget
= nxge_hio_group_get
;
4428 cap_rings
->mr_gaddring
= NULL
;
4429 cap_rings
->mr_gremring
= NULL
;
4434 cap_rings
->mr_group_type
=
4435 MAC_GROUP_TYPE_DYNAMIC
;
4436 cap_rings
->mr_rnum
= p_cfgp
->max_rdcs
;
4437 cap_rings
->mr_rget
= nxge_fill_ring
;
4438 cap_rings
->mr_gnum
= p_cfgp
->max_rdc_grpids
;
4439 cap_rings
->mr_gget
= nxge_hio_group_get
;
4440 cap_rings
->mr_gaddring
= nxge_group_add_ring
;
4441 cap_rings
->mr_gremring
= nxge_group_rem_ring
;
4444 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
4445 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]",
4446 p_cfgp
->max_rdcs
, p_cfgp
->max_rdc_grpids
));
4451 if (isLDOMguest(nxgep
)) {
4452 cap_rings
->mr_group_type
=
4453 MAC_GROUP_TYPE_STATIC
;
4454 cap_rings
->mr_rnum
=
4455 NXGE_HIO_SHARE_MAX_CHANNELS
;
4456 cap_rings
->mr_rget
= nxge_fill_ring
;
4457 cap_rings
->mr_gnum
= 0;
4458 cap_rings
->mr_gget
= NULL
;
4459 cap_rings
->mr_gaddring
= NULL
;
4460 cap_rings
->mr_gremring
= NULL
;
4465 cap_rings
->mr_group_type
=
4466 MAC_GROUP_TYPE_DYNAMIC
;
4467 cap_rings
->mr_rnum
= p_cfgp
->tdc
.count
;
4468 cap_rings
->mr_rget
= nxge_fill_ring
;
4473 * Do not report the default group: hence -1
4475 cap_rings
->mr_gnum
=
4476 NXGE_MAX_TDC_GROUPS
/ nxgep
->nports
- 1;
4477 cap_rings
->mr_gget
= nxge_hio_group_get
;
4478 cap_rings
->mr_gaddring
= nxge_group_add_ring
;
4479 cap_rings
->mr_gremring
= nxge_group_rem_ring
;
4482 NXGE_DEBUG_MSG((nxgep
, TX_CTL
,
4483 "==> nxge_m_getcapab: tx rings # of rings %d",
4484 p_cfgp
->tdc
.count
));
4486 mutex_exit(nxgep
->genlock
);
4491 case MAC_CAPAB_SHARES
: {
4492 mac_capab_share_t
*mshares
= (mac_capab_share_t
*)cap_data
;
4495 * Only the service domain driver responds to
4496 * this capability request.
4498 mutex_enter(nxgep
->genlock
);
4499 if (isLDOMservice(nxgep
)) {
4500 mshares
->ms_snum
= 3;
4501 mshares
->ms_handle
= (void *)nxgep
;
4502 mshares
->ms_salloc
= nxge_hio_share_alloc
;
4503 mshares
->ms_sfree
= nxge_hio_share_free
;
4504 mshares
->ms_sadd
= nxge_hio_share_add_group
;
4505 mshares
->ms_sremove
= nxge_hio_share_rem_group
;
4506 mshares
->ms_squery
= nxge_hio_share_query
;
4507 mshares
->ms_sbind
= nxge_hio_share_bind
;
4508 mshares
->ms_sunbind
= nxge_hio_share_unbind
;
4509 mutex_exit(nxgep
->genlock
);
4511 mutex_exit(nxgep
->genlock
);
4524 nxge_param_locked(mac_prop_id_t pr_num
)
4527 * All adv_* parameters are locked (read-only) while
4528 * the device is in any sort of loopback mode ...
4531 case MAC_PROP_ADV_1000FDX_CAP
:
4532 case MAC_PROP_EN_1000FDX_CAP
:
4533 case MAC_PROP_ADV_1000HDX_CAP
:
4534 case MAC_PROP_EN_1000HDX_CAP
:
4535 case MAC_PROP_ADV_100FDX_CAP
:
4536 case MAC_PROP_EN_100FDX_CAP
:
4537 case MAC_PROP_ADV_100HDX_CAP
:
4538 case MAC_PROP_EN_100HDX_CAP
:
4539 case MAC_PROP_ADV_10FDX_CAP
:
4540 case MAC_PROP_EN_10FDX_CAP
:
4541 case MAC_PROP_ADV_10HDX_CAP
:
4542 case MAC_PROP_EN_10HDX_CAP
:
4543 case MAC_PROP_AUTONEG
:
4544 case MAC_PROP_FLOWCTRL
:
4551 * callback functions for set/get of properties
4554 nxge_m_setprop(void *barg
, const char *pr_name
, mac_prop_id_t pr_num
,
4555 uint_t pr_valsize
, const void *pr_val
)
4557 nxge_t
*nxgep
= barg
;
4558 p_nxge_param_t param_arr
= nxgep
->param_arr
;
4559 p_nxge_stats_t statsp
= nxgep
->statsp
;
4562 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
, "==> nxge_m_setprop"));
4564 mutex_enter(nxgep
->genlock
);
4565 if (statsp
->port_stats
.lb_mode
!= nxge_lb_normal
&&
4566 nxge_param_locked(pr_num
)) {
4568 * All adv_* parameters are locked (read-only)
4569 * while the device is in any sort of loopback mode.
4571 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4572 "==> nxge_m_setprop: loopback mode: read only"));
4573 mutex_exit(nxgep
->genlock
);
4578 case MAC_PROP_EN_1000FDX_CAP
:
4579 nxgep
->param_en_1000fdx
=
4580 param_arr
[param_anar_1000fdx
].value
= *(uint8_t *)pr_val
;
4583 case MAC_PROP_EN_100FDX_CAP
:
4584 nxgep
->param_en_100fdx
=
4585 param_arr
[param_anar_100fdx
].value
= *(uint8_t *)pr_val
;
4588 case MAC_PROP_EN_10FDX_CAP
:
4589 nxgep
->param_en_10fdx
=
4590 param_arr
[param_anar_10fdx
].value
= *(uint8_t *)pr_val
;
4593 case MAC_PROP_AUTONEG
:
4594 param_arr
[param_autoneg
].value
= *(uint8_t *)pr_val
;
4597 case MAC_PROP_MTU
: {
4598 uint32_t cur_mtu
, new_mtu
, old_framesize
;
4600 cur_mtu
= nxgep
->mac
.default_mtu
;
4601 ASSERT(pr_valsize
>= sizeof (new_mtu
));
4602 bcopy(pr_val
, &new_mtu
, sizeof (new_mtu
));
4604 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4605 "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
4606 new_mtu
, nxgep
->mac
.is_jumbo
));
4608 if (new_mtu
== cur_mtu
) {
4613 if (nxgep
->nxge_mac_state
== NXGE_MAC_STARTED
) {
4618 if ((new_mtu
< NXGE_DEFAULT_MTU
) ||
4619 (new_mtu
> NXGE_MAXIMUM_MTU
)) {
4624 old_framesize
= (uint32_t)nxgep
->mac
.maxframesize
;
4625 nxgep
->mac
.maxframesize
= (uint16_t)
4626 (new_mtu
+ NXGE_EHEADER_VLAN_CRC
);
4627 if (nxge_mac_set_framesize(nxgep
)) {
4628 nxgep
->mac
.maxframesize
=
4629 (uint16_t)old_framesize
;
4634 nxgep
->mac
.default_mtu
= new_mtu
;
4635 nxgep
->mac
.is_jumbo
= (new_mtu
> NXGE_DEFAULT_MTU
);
4637 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4638 "==> nxge_m_setprop: set MTU: %d maxframe %d",
4639 new_mtu
, nxgep
->mac
.maxframesize
));
4643 case MAC_PROP_FLOWCTRL
: {
4646 ASSERT(pr_valsize
>= sizeof (fl
));
4647 bcopy(pr_val
, &fl
, sizeof (fl
));
4650 case LINK_FLOWCTRL_NONE
:
4651 param_arr
[param_anar_pause
].value
= 0;
4654 case LINK_FLOWCTRL_RX
:
4655 param_arr
[param_anar_pause
].value
= 1;
4658 case LINK_FLOWCTRL_TX
:
4659 case LINK_FLOWCTRL_BI
:
4667 if ((err
== 0) && !isLDOMguest(nxgep
)) {
4668 if (!nxge_param_link_update(nxgep
)) {
4677 case MAC_PROP_PRIVATE
:
4678 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4679 "==> nxge_m_setprop: private property"));
4680 err
= nxge_set_priv_prop(nxgep
, pr_name
, pr_valsize
, pr_val
);
4688 mutex_exit(nxgep
->genlock
);
4690 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4691 "<== nxge_m_setprop (return %d)", err
));
4696 nxge_m_getprop(void *barg
, const char *pr_name
, mac_prop_id_t pr_num
,
4697 uint_t pr_valsize
, void *pr_val
)
4699 nxge_t
*nxgep
= barg
;
4700 p_nxge_param_t param_arr
= nxgep
->param_arr
;
4701 p_nxge_stats_t statsp
= nxgep
->statsp
;
4703 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4704 "==> nxge_m_getprop: pr_num %d", pr_num
));
4707 case MAC_PROP_DUPLEX
:
4708 *(uint8_t *)pr_val
= statsp
->mac_stats
.link_duplex
;
4711 case MAC_PROP_SPEED
: {
4712 uint64_t val
= statsp
->mac_stats
.link_speed
* 1000000ull;
4714 ASSERT(pr_valsize
>= sizeof (val
));
4715 bcopy(&val
, pr_val
, sizeof (val
));
4719 case MAC_PROP_STATUS
: {
4720 link_state_t state
= statsp
->mac_stats
.link_up
?
4721 LINK_STATE_UP
: LINK_STATE_DOWN
;
4723 ASSERT(pr_valsize
>= sizeof (state
));
4724 bcopy(&state
, pr_val
, sizeof (state
));
4728 case MAC_PROP_AUTONEG
:
4729 *(uint8_t *)pr_val
= param_arr
[param_autoneg
].value
;
4732 case MAC_PROP_FLOWCTRL
: {
4733 link_flowctrl_t fl
= param_arr
[param_anar_pause
].value
!= 0 ?
4734 LINK_FLOWCTRL_RX
: LINK_FLOWCTRL_NONE
;
4736 ASSERT(pr_valsize
>= sizeof (fl
));
4737 bcopy(&fl
, pr_val
, sizeof (fl
));
4741 case MAC_PROP_ADV_1000FDX_CAP
:
4742 *(uint8_t *)pr_val
= param_arr
[param_anar_1000fdx
].value
;
4745 case MAC_PROP_EN_1000FDX_CAP
:
4746 *(uint8_t *)pr_val
= nxgep
->param_en_1000fdx
;
4749 case MAC_PROP_ADV_100FDX_CAP
:
4750 *(uint8_t *)pr_val
= param_arr
[param_anar_100fdx
].value
;
4753 case MAC_PROP_EN_100FDX_CAP
:
4754 *(uint8_t *)pr_val
= nxgep
->param_en_100fdx
;
4757 case MAC_PROP_ADV_10FDX_CAP
:
4758 *(uint8_t *)pr_val
= param_arr
[param_anar_10fdx
].value
;
4761 case MAC_PROP_EN_10FDX_CAP
:
4762 *(uint8_t *)pr_val
= nxgep
->param_en_10fdx
;
4765 case MAC_PROP_PRIVATE
:
4766 return (nxge_get_priv_prop(nxgep
, pr_name
, pr_valsize
,
4777 nxge_m_propinfo(void *barg
, const char *pr_name
, mac_prop_id_t pr_num
,
4778 mac_prop_info_handle_t prh
)
4780 nxge_t
*nxgep
= barg
;
4781 p_nxge_stats_t statsp
= nxgep
->statsp
;
4784 * By default permissions are read/write unless specified
4785 * otherwise by the driver.
4789 case MAC_PROP_DUPLEX
:
4790 case MAC_PROP_SPEED
:
4791 case MAC_PROP_STATUS
:
4792 case MAC_PROP_EN_1000HDX_CAP
:
4793 case MAC_PROP_EN_100HDX_CAP
:
4794 case MAC_PROP_EN_10HDX_CAP
:
4795 case MAC_PROP_ADV_1000FDX_CAP
:
4796 case MAC_PROP_ADV_1000HDX_CAP
:
4797 case MAC_PROP_ADV_100FDX_CAP
:
4798 case MAC_PROP_ADV_100HDX_CAP
:
4799 case MAC_PROP_ADV_10FDX_CAP
:
4800 case MAC_PROP_ADV_10HDX_CAP
:
4802 * Note that read-only properties don't need to
4803 * provide default values since they cannot be
4804 * changed by the administrator.
4806 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
4809 case MAC_PROP_EN_1000FDX_CAP
:
4810 case MAC_PROP_EN_100FDX_CAP
:
4811 case MAC_PROP_EN_10FDX_CAP
:
4812 mac_prop_info_set_default_uint8(prh
, 1);
4815 case MAC_PROP_AUTONEG
:
4816 mac_prop_info_set_default_uint8(prh
, 1);
4819 case MAC_PROP_FLOWCTRL
:
4820 mac_prop_info_set_default_link_flowctrl(prh
, LINK_FLOWCTRL_RX
);
4824 mac_prop_info_set_range_uint32(prh
,
4825 NXGE_DEFAULT_MTU
, NXGE_MAXIMUM_MTU
);
4828 case MAC_PROP_PRIVATE
:
4829 nxge_priv_propinfo(pr_name
, prh
);
4833 mutex_enter(nxgep
->genlock
);
4834 if (statsp
->port_stats
.lb_mode
!= nxge_lb_normal
&&
4835 nxge_param_locked(pr_num
)) {
4837 * Some properties are locked (read-only) while the
4838 * device is in any sort of loopback mode.
4840 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
4842 mutex_exit(nxgep
->genlock
);
4846 nxge_priv_propinfo(const char *pr_name
, mac_prop_info_handle_t prh
)
4850 bzero(valstr
, sizeof (valstr
));
4852 if (strcmp(pr_name
, "_function_number") == 0 ||
4853 strcmp(pr_name
, "_fw_version") == 0 ||
4854 strcmp(pr_name
, "_port_mode") == 0 ||
4855 strcmp(pr_name
, "_hot_swap_phy") == 0) {
4856 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
4858 } else if (strcmp(pr_name
, "_rxdma_intr_time") == 0) {
4859 (void) snprintf(valstr
, sizeof (valstr
),
4860 "%d", RXDMA_RCR_TO_DEFAULT
);
4862 } else if (strcmp(pr_name
, "_rxdma_intr_pkts") == 0) {
4863 (void) snprintf(valstr
, sizeof (valstr
),
4864 "%d", RXDMA_RCR_PTHRES_DEFAULT
);
4866 } else if (strcmp(pr_name
, "_class_opt_ipv4_tcp") == 0 ||
4867 strcmp(pr_name
, "_class_opt_ipv4_udp") == 0 ||
4868 strcmp(pr_name
, "_class_opt_ipv4_ah") == 0 ||
4869 strcmp(pr_name
, "_class_opt_ipv4_sctp") == 0 ||
4870 strcmp(pr_name
, "_class_opt_ipv6_tcp") == 0 ||
4871 strcmp(pr_name
, "_class_opt_ipv6_udp") == 0 ||
4872 strcmp(pr_name
, "_class_opt_ipv6_ah") == 0 ||
4873 strcmp(pr_name
, "_class_opt_ipv6_sctp") == 0) {
4874 (void) snprintf(valstr
, sizeof (valstr
), "%x",
4875 NXGE_CLASS_FLOW_GEN_SERVER
);
4877 } else if (strcmp(pr_name
, "_soft_lso_enable") == 0) {
4878 (void) snprintf(valstr
, sizeof (valstr
), "%d", 0);
4880 } else if (strcmp(pr_name
, "_adv_10gfdx_cap") == 0) {
4881 (void) snprintf(valstr
, sizeof (valstr
), "%d", 1);
4883 } else if (strcmp(pr_name
, "_adv_pause_cap") == 0) {
4884 (void) snprintf(valstr
, sizeof (valstr
), "%d", 1);
4887 if (strlen(valstr
) > 0)
4888 mac_prop_info_set_default_str(prh
, valstr
);
4893 nxge_set_priv_prop(p_nxge_t nxgep
, const char *pr_name
, uint_t pr_valsize
,
4896 p_nxge_param_t param_arr
= nxgep
->param_arr
;
4900 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4901 "==> nxge_set_priv_prop: name %s", pr_name
));
4904 if (strcmp(pr_name
, "_rxdma_intr_time") == 0) {
4905 err
= nxge_param_rx_intr_time(nxgep
, NULL
, NULL
,
4907 (caddr_t
)¶m_arr
[param_rxdma_intr_time
]);
4909 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4910 "<== nxge_set_priv_prop: "
4911 "unable to set (%s)", pr_name
));
4915 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4916 "<== nxge_set_priv_prop: "
4917 "set (%s)", pr_name
));
4920 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4921 "<== nxge_set_priv_prop: name %s (value %d)",
4927 if (strcmp(pr_name
, "_rxdma_intr_pkts") == 0) {
4928 err
= nxge_param_rx_intr_pkts(nxgep
, NULL
, NULL
,
4930 (caddr_t
)¶m_arr
[param_rxdma_intr_pkts
]);
4932 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4933 "<== nxge_set_priv_prop: "
4934 "unable to set (%s)", pr_name
));
4938 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4939 "<== nxge_set_priv_prop: "
4940 "set (%s)", pr_name
));
4943 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4944 "<== nxge_set_priv_prop: name %s (value %d)",
4950 /* Classification */
4951 if (strcmp(pr_name
, "_class_opt_ipv4_tcp") == 0) {
4952 if (pr_val
== NULL
) {
4956 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
4958 err
= nxge_param_set_ip_opt(nxgep
, NULL
,
4959 NULL
, (char *)pr_val
,
4960 (caddr_t
)¶m_arr
[param_class_opt_ipv4_tcp
]);
4962 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4963 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4969 if (strcmp(pr_name
, "_class_opt_ipv4_udp") == 0) {
4970 if (pr_val
== NULL
) {
4974 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
4976 err
= nxge_param_set_ip_opt(nxgep
, NULL
,
4977 NULL
, (char *)pr_val
,
4978 (caddr_t
)¶m_arr
[param_class_opt_ipv4_udp
]);
4980 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4981 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4986 if (strcmp(pr_name
, "_class_opt_ipv4_ah") == 0) {
4987 if (pr_val
== NULL
) {
4991 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
4993 err
= nxge_param_set_ip_opt(nxgep
, NULL
,
4994 NULL
, (char *)pr_val
,
4995 (caddr_t
)¶m_arr
[param_class_opt_ipv4_ah
]);
4997 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
4998 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5003 if (strcmp(pr_name
, "_class_opt_ipv4_sctp") == 0) {
5004 if (pr_val
== NULL
) {
5008 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
5010 err
= nxge_param_set_ip_opt(nxgep
, NULL
,
5011 NULL
, (char *)pr_val
,
5012 (caddr_t
)¶m_arr
[param_class_opt_ipv4_sctp
]);
5014 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5015 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5021 if (strcmp(pr_name
, "_class_opt_ipv6_tcp") == 0) {
5022 if (pr_val
== NULL
) {
5026 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
5028 err
= nxge_param_set_ip_opt(nxgep
, NULL
,
5029 NULL
, (char *)pr_val
,
5030 (caddr_t
)¶m_arr
[param_class_opt_ipv6_tcp
]);
5032 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5033 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5039 if (strcmp(pr_name
, "_class_opt_ipv6_udp") == 0) {
5040 if (pr_val
== NULL
) {
5044 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
5046 err
= nxge_param_set_ip_opt(nxgep
, NULL
,
5047 NULL
, (char *)pr_val
,
5048 (caddr_t
)¶m_arr
[param_class_opt_ipv6_udp
]);
5050 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5051 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5056 if (strcmp(pr_name
, "_class_opt_ipv6_ah") == 0) {
5057 if (pr_val
== NULL
) {
5061 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
5063 err
= nxge_param_set_ip_opt(nxgep
, NULL
,
5064 NULL
, (char *)pr_val
,
5065 (caddr_t
)¶m_arr
[param_class_opt_ipv6_ah
]);
5067 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5068 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5073 if (strcmp(pr_name
, "_class_opt_ipv6_sctp") == 0) {
5074 if (pr_val
== NULL
) {
5078 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
5080 err
= nxge_param_set_ip_opt(nxgep
, NULL
,
5081 NULL
, (char *)pr_val
,
5082 (caddr_t
)¶m_arr
[param_class_opt_ipv6_sctp
]);
5084 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5085 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5091 if (strcmp(pr_name
, "_soft_lso_enable") == 0) {
5092 if (pr_val
== NULL
) {
5093 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5094 "==> nxge_set_priv_prop: name %s (null)", pr_name
));
5099 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
5100 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5101 "<== nxge_set_priv_prop: name %s "
5102 "(lso %d pr_val %s value %d)",
5103 pr_name
, nxgep
->soft_lso_enable
, pr_val
, result
));
5105 if (result
> 1 || result
< 0) {
5108 if (nxgep
->soft_lso_enable
== (uint32_t)result
) {
5109 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5110 "no change (%d %d)",
5111 nxgep
->soft_lso_enable
, result
));
5116 nxgep
->soft_lso_enable
= (int)result
;
5118 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5119 "<== nxge_set_priv_prop: name %s (value %d)",
5125 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the
5126 * following code to be executed.
5128 if (strcmp(pr_name
, "_adv_10gfdx_cap") == 0) {
5129 err
= nxge_param_set_mac(nxgep
, NULL
, NULL
, (char *)pr_val
,
5130 (caddr_t
)¶m_arr
[param_anar_10gfdx
]);
5133 if (strcmp(pr_name
, "_adv_pause_cap") == 0) {
5134 err
= nxge_param_set_mac(nxgep
, NULL
, NULL
, (char *)pr_val
,
5135 (caddr_t
)¶m_arr
[param_anar_pause
]);
5143 nxge_get_priv_prop(p_nxge_t nxgep
, const char *pr_name
, uint_t pr_valsize
,
5146 p_nxge_param_t param_arr
= nxgep
->param_arr
;
5147 char valstr
[MAXNAMELEN
];
5151 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5152 "==> nxge_get_priv_prop: property %s", pr_name
));
5154 /* function number */
5155 if (strcmp(pr_name
, "_function_number") == 0) {
5156 (void) snprintf(valstr
, sizeof (valstr
), "%d",
5157 nxgep
->function_num
);
5158 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5159 "==> nxge_get_priv_prop: name %s "
5160 "(value %d valstr %s)",
5161 pr_name
, nxgep
->function_num
, valstr
));
5167 /* Neptune firmware version */
5168 if (strcmp(pr_name
, "_fw_version") == 0) {
5169 (void) snprintf(valstr
, sizeof (valstr
), "%s",
5170 nxgep
->vpd_info
.ver
);
5171 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5172 "==> nxge_get_priv_prop: name %s "
5173 "(value %d valstr %s)",
5174 pr_name
, nxgep
->vpd_info
.ver
, valstr
));
5181 if (strcmp(pr_name
, "_port_mode") == 0) {
5182 switch (nxgep
->mac
.portmode
) {
5183 case PORT_1G_COPPER
:
5184 (void) snprintf(valstr
, sizeof (valstr
), "1G copper %s",
5185 nxgep
->hot_swappable_phy
?
5186 "[Hot Swappable]" : "");
5189 (void) snprintf(valstr
, sizeof (valstr
), "1G fiber %s",
5190 nxgep
->hot_swappable_phy
?
5191 "[hot swappable]" : "");
5193 case PORT_10G_COPPER
:
5194 (void) snprintf(valstr
, sizeof (valstr
),
5196 nxgep
->hot_swappable_phy
?
5197 "[hot swappable]" : "");
5199 case PORT_10G_FIBER
:
5200 (void) snprintf(valstr
, sizeof (valstr
), "10G fiber %s",
5201 nxgep
->hot_swappable_phy
?
5202 "[hot swappable]" : "");
5204 case PORT_10G_SERDES
:
5205 (void) snprintf(valstr
, sizeof (valstr
),
5206 "10G serdes %s", nxgep
->hot_swappable_phy
?
5207 "[hot swappable]" : "");
5209 case PORT_1G_SERDES
:
5210 (void) snprintf(valstr
, sizeof (valstr
), "1G serdes %s",
5211 nxgep
->hot_swappable_phy
?
5212 "[hot swappable]" : "");
5214 case PORT_1G_TN1010
:
5215 (void) snprintf(valstr
, sizeof (valstr
),
5216 "1G TN1010 copper %s", nxgep
->hot_swappable_phy
?
5217 "[hot swappable]" : "");
5219 case PORT_10G_TN1010
:
5220 (void) snprintf(valstr
, sizeof (valstr
),
5221 "10G TN1010 copper %s", nxgep
->hot_swappable_phy
?
5222 "[hot swappable]" : "");
5224 case PORT_1G_RGMII_FIBER
:
5225 (void) snprintf(valstr
, sizeof (valstr
),
5226 "1G rgmii fiber %s", nxgep
->hot_swappable_phy
?
5227 "[hot swappable]" : "");
5230 (void) snprintf(valstr
, sizeof (valstr
),
5231 "phy not present[hot swappable]");
5234 (void) snprintf(valstr
, sizeof (valstr
), "unknown %s",
5235 nxgep
->hot_swappable_phy
?
5236 "[hot swappable]" : "");
5240 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5241 "==> nxge_get_priv_prop: name %s (value %s)",
5248 /* Hot swappable PHY */
5249 if (strcmp(pr_name
, "_hot_swap_phy") == 0) {
5250 (void) snprintf(valstr
, sizeof (valstr
), "%s",
5251 nxgep
->hot_swappable_phy
?
5254 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5255 "==> nxge_get_priv_prop: name %s "
5256 "(value %d valstr %s)",
5257 pr_name
, nxgep
->hot_swappable_phy
, valstr
));
5264 /* Receive Interrupt Blanking Parameters */
5265 if (strcmp(pr_name
, "_rxdma_intr_time") == 0) {
5267 (void) snprintf(valstr
, sizeof (valstr
), "%d",
5268 nxgep
->intr_timeout
);
5269 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5270 "==> nxge_get_priv_prop: name %s (value %d)",
5272 (uint32_t)nxgep
->intr_timeout
));
5276 if (strcmp(pr_name
, "_rxdma_intr_pkts") == 0) {
5278 (void) snprintf(valstr
, sizeof (valstr
), "%d",
5279 nxgep
->intr_threshold
);
5280 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5281 "==> nxge_get_priv_prop: name %s (value %d)",
5282 pr_name
, (uint32_t)nxgep
->intr_threshold
));
5287 /* Classification and Load Distribution Configuration */
5288 if (strcmp(pr_name
, "_class_opt_ipv4_tcp") == 0) {
5289 err
= nxge_dld_get_ip_opt(nxgep
,
5290 (caddr_t
)¶m_arr
[param_class_opt_ipv4_tcp
]);
5292 (void) snprintf(valstr
, sizeof (valstr
), "%x",
5293 (int)param_arr
[param_class_opt_ipv4_tcp
].value
);
5295 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5296 "==> nxge_get_priv_prop: %s", valstr
));
5300 if (strcmp(pr_name
, "_class_opt_ipv4_udp") == 0) {
5301 err
= nxge_dld_get_ip_opt(nxgep
,
5302 (caddr_t
)¶m_arr
[param_class_opt_ipv4_udp
]);
5304 (void) snprintf(valstr
, sizeof (valstr
), "%x",
5305 (int)param_arr
[param_class_opt_ipv4_udp
].value
);
5307 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5308 "==> nxge_get_priv_prop: %s", valstr
));
5311 if (strcmp(pr_name
, "_class_opt_ipv4_ah") == 0) {
5312 err
= nxge_dld_get_ip_opt(nxgep
,
5313 (caddr_t
)¶m_arr
[param_class_opt_ipv4_ah
]);
5315 (void) snprintf(valstr
, sizeof (valstr
), "%x",
5316 (int)param_arr
[param_class_opt_ipv4_ah
].value
);
5318 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5319 "==> nxge_get_priv_prop: %s", valstr
));
5323 if (strcmp(pr_name
, "_class_opt_ipv4_sctp") == 0) {
5324 err
= nxge_dld_get_ip_opt(nxgep
,
5325 (caddr_t
)¶m_arr
[param_class_opt_ipv4_sctp
]);
5327 (void) snprintf(valstr
, sizeof (valstr
), "%x",
5328 (int)param_arr
[param_class_opt_ipv4_sctp
].value
);
5330 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5331 "==> nxge_get_priv_prop: %s", valstr
));
5335 if (strcmp(pr_name
, "_class_opt_ipv6_tcp") == 0) {
5336 err
= nxge_dld_get_ip_opt(nxgep
,
5337 (caddr_t
)¶m_arr
[param_class_opt_ipv6_tcp
]);
5339 (void) snprintf(valstr
, sizeof (valstr
), "%x",
5340 (int)param_arr
[param_class_opt_ipv6_tcp
].value
);
5342 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5343 "==> nxge_get_priv_prop: %s", valstr
));
5347 if (strcmp(pr_name
, "_class_opt_ipv6_udp") == 0) {
5348 err
= nxge_dld_get_ip_opt(nxgep
,
5349 (caddr_t
)¶m_arr
[param_class_opt_ipv6_udp
]);
5351 (void) snprintf(valstr
, sizeof (valstr
), "%x",
5352 (int)param_arr
[param_class_opt_ipv6_udp
].value
);
5354 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5355 "==> nxge_get_priv_prop: %s", valstr
));
5359 if (strcmp(pr_name
, "_class_opt_ipv6_ah") == 0) {
5360 err
= nxge_dld_get_ip_opt(nxgep
,
5361 (caddr_t
)¶m_arr
[param_class_opt_ipv6_ah
]);
5363 (void) snprintf(valstr
, sizeof (valstr
), "%x",
5364 (int)param_arr
[param_class_opt_ipv6_ah
].value
);
5366 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5367 "==> nxge_get_priv_prop: %s", valstr
));
5371 if (strcmp(pr_name
, "_class_opt_ipv6_sctp") == 0) {
5372 err
= nxge_dld_get_ip_opt(nxgep
,
5373 (caddr_t
)¶m_arr
[param_class_opt_ipv6_sctp
]);
5375 (void) snprintf(valstr
, sizeof (valstr
), "%x",
5376 (int)param_arr
[param_class_opt_ipv6_sctp
].value
);
5378 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5379 "==> nxge_get_priv_prop: %s", valstr
));
5384 if (strcmp(pr_name
, "_soft_lso_enable") == 0) {
5385 (void) snprintf(valstr
, sizeof (valstr
),
5386 "%d", nxgep
->soft_lso_enable
);
5388 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5389 "==> nxge_get_priv_prop: name %s (value %d)",
5390 pr_name
, nxgep
->soft_lso_enable
));
5394 if (strcmp(pr_name
, "_adv_10gfdx_cap") == 0) {
5396 if (nxgep
->param_arr
[param_anar_10gfdx
].value
!= 0) {
5397 (void) snprintf(valstr
, sizeof (valstr
), "%d", 1);
5400 (void) snprintf(valstr
, sizeof (valstr
), "%d", 0);
5404 if (strcmp(pr_name
, "_adv_pause_cap") == 0) {
5406 if (nxgep
->param_arr
[param_anar_pause
].value
!= 0) {
5407 (void) snprintf(valstr
, sizeof (valstr
), "%d", 1);
5410 (void) snprintf(valstr
, sizeof (valstr
), "%d", 0);
5417 strsize
= (uint_t
)strlen(valstr
);
5418 if (pr_valsize
< strsize
) {
5421 (void) strlcpy(pr_val
, valstr
, pr_valsize
);
5425 NXGE_DEBUG_MSG((nxgep
, NXGE_CTL
,
5426 "<== nxge_get_priv_prop: return %d", err
));
5431 * Module loading and removing entry points.
5434 DDI_DEFINE_STREAM_OPS(nxge_dev_ops
, nulldev
, nulldev
, nxge_attach
, nxge_detach
,
5435 nodev
, NULL
, D_MP
, NULL
, nxge_quiesce
);
5437 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet"
5440 * Module linkage information for the kernel.
5442 static struct modldrv nxge_modldrv
= {
5448 static struct modlinkage modlinkage
= {
5449 MODREV_1
, (void *) &nxge_modldrv
, NULL
5457 MUTEX_INIT(&nxgedebuglock
, NULL
, MUTEX_DRIVER
, NULL
);
5459 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, "==> _init"));
5461 mac_init_ops(&nxge_dev_ops
, "nxge");
5463 status
= ddi_soft_state_init(&nxge_list
, sizeof (nxge_t
), 0);
5465 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
,
5466 "failed to init device soft state"));
5470 status
= mod_install(&modlinkage
);
5472 ddi_soft_state_fini(&nxge_list
);
5473 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
, "Mod install failed"));
5477 MUTEX_INIT(&nxge_common_lock
, NULL
, MUTEX_DRIVER
, NULL
);
5479 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, "<== _init status = 0x%X", status
));
5483 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, "<== _init status = 0x%X", status
));
5484 MUTEX_DESTROY(&nxgedebuglock
);
5493 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, "==> _fini"));
5494 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, "==> _fini: mod_remove"));
5496 if (nxge_mblks_pending
)
5499 status
= mod_remove(&modlinkage
);
5500 if (status
!= DDI_SUCCESS
) {
5501 NXGE_DEBUG_MSG((NULL
, MOD_CTL
,
5502 "Module removal failed 0x%08x",
5507 mac_fini_ops(&nxge_dev_ops
);
5509 ddi_soft_state_fini(&nxge_list
);
5511 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, "<== _fini status = 0x%08x", status
));
5513 MUTEX_DESTROY(&nxge_common_lock
);
5514 MUTEX_DESTROY(&nxgedebuglock
);
5518 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, "<== _fini status = 0x%08x", status
));
5523 _info(struct modinfo
*modinfop
)
5527 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, "==> _info"));
5528 status
= mod_info(&modlinkage
, modinfop
);
5529 NXGE_DEBUG_MSG((NULL
, MOD_CTL
, " _info status = 0x%X", status
));
5536 nxge_tx_ring_start(mac_ring_driver_t rdriver
, uint64_t mr_gen_num
)
5538 p_nxge_ring_handle_t rhp
= (p_nxge_ring_handle_t
)rdriver
;
5539 p_nxge_t nxgep
= rhp
->nxgep
;
5543 channel
= nxgep
->pt_config
.hw_config
.tdc
.start
+ rhp
->index
;
5544 ring
= nxgep
->tx_rings
->rings
[channel
];
5546 MUTEX_ENTER(&ring
->lock
);
5547 ASSERT(ring
->tx_ring_handle
== NULL
);
5548 ring
->tx_ring_handle
= rhp
->ring_handle
;
5549 MUTEX_EXIT(&ring
->lock
);
5555 nxge_tx_ring_stop(mac_ring_driver_t rdriver
)
5557 p_nxge_ring_handle_t rhp
= (p_nxge_ring_handle_t
)rdriver
;
5558 p_nxge_t nxgep
= rhp
->nxgep
;
5562 channel
= nxgep
->pt_config
.hw_config
.tdc
.start
+ rhp
->index
;
5563 ring
= nxgep
->tx_rings
->rings
[channel
];
5565 MUTEX_ENTER(&ring
->lock
);
5566 ASSERT(ring
->tx_ring_handle
!= NULL
);
5567 ring
->tx_ring_handle
= (mac_ring_handle_t
)NULL
;
5568 MUTEX_EXIT(&ring
->lock
);
5572 nxge_rx_ring_start(mac_ring_driver_t rdriver
, uint64_t mr_gen_num
)
5574 p_nxge_ring_handle_t rhp
= (p_nxge_ring_handle_t
)rdriver
;
5575 p_nxge_t nxgep
= rhp
->nxgep
;
5577 p_rx_rcr_ring_t ring
;
5580 channel
= nxgep
->pt_config
.hw_config
.start_rdc
+ rhp
->index
;
5581 ring
= nxgep
->rx_rcr_rings
->rcr_rings
[channel
];
5583 MUTEX_ENTER(&ring
->lock
);
5585 if (ring
->started
) {
5586 ASSERT(ring
->started
== B_FALSE
);
5587 MUTEX_EXIT(&ring
->lock
);
5592 for (i
= 0; i
< nxgep
->ldgvp
->maxldvs
; i
++) {
5593 if ((nxgep
->ldgvp
->ldvp
[i
].is_rxdma
) &&
5594 (nxgep
->ldgvp
->ldvp
[i
].channel
== channel
)) {
5595 ring
->ldvp
= &nxgep
->ldgvp
->ldvp
[i
];
5596 ring
->ldgp
= nxgep
->ldgvp
->ldvp
[i
].ldgp
;
5600 ring
->rcr_mac_handle
= rhp
->ring_handle
;
5601 ring
->rcr_gen_num
= mr_gen_num
;
5602 ring
->started
= B_TRUE
;
5603 rhp
->ring_gen_num
= mr_gen_num
;
5604 MUTEX_EXIT(&ring
->lock
);
5610 nxge_rx_ring_stop(mac_ring_driver_t rdriver
)
5612 p_nxge_ring_handle_t rhp
= (p_nxge_ring_handle_t
)rdriver
;
5613 p_nxge_t nxgep
= rhp
->nxgep
;
5615 p_rx_rcr_ring_t ring
;
5617 channel
= nxgep
->pt_config
.hw_config
.start_rdc
+ rhp
->index
;
5618 ring
= nxgep
->rx_rcr_rings
->rcr_rings
[channel
];
5620 MUTEX_ENTER(&ring
->lock
);
5621 ASSERT(ring
->started
== B_TRUE
);
5622 ring
->rcr_mac_handle
= NULL
;
5625 ring
->started
= B_FALSE
;
5626 MUTEX_EXIT(&ring
->lock
);
5630 nxge_ring_get_htable_idx(p_nxge_t nxgep
, mac_ring_type_t type
, uint32_t channel
)
5635 if (isLDOMguest(nxgep
)) {
5636 return (nxge_hio_get_dc_htable_idx(nxgep
,
5637 (type
== MAC_RING_TYPE_TX
) ? VP_BOUND_TX
: VP_BOUND_RX
,
5642 ASSERT(nxgep
->ldgvp
!= NULL
);
5645 case MAC_RING_TYPE_TX
:
5646 for (i
= 0; i
< nxgep
->ldgvp
->maxldvs
; i
++) {
5647 if ((nxgep
->ldgvp
->ldvp
[i
].is_txdma
) &&
5648 (nxgep
->ldgvp
->ldvp
[i
].channel
== channel
)) {
5650 nxgep
->ldgvp
->ldvp
[i
].ldgp
->htable_idx
);
5655 case MAC_RING_TYPE_RX
:
5656 for (i
= 0; i
< nxgep
->ldgvp
->maxldvs
; i
++) {
5657 if ((nxgep
->ldgvp
->ldvp
[i
].is_rxdma
) &&
5658 (nxgep
->ldgvp
->ldvp
[i
].channel
== channel
)) {
5660 nxgep
->ldgvp
->ldvp
[i
].ldgp
->htable_idx
);
5669 * Callback funtion for MAC layer to register all rings.
5672 nxge_fill_ring(void *arg
, mac_ring_type_t rtype
, const int rg_index
,
5673 const int index
, mac_ring_info_t
*infop
, mac_ring_handle_t rh
)
5675 p_nxge_t nxgep
= (p_nxge_t
)arg
;
5676 p_nxge_hw_pt_cfg_t p_cfgp
= &nxgep
->pt_config
.hw_config
;
5677 p_nxge_intr_t intrp
;
5680 p_nxge_ring_handle_t rhandlep
;
5682 ASSERT(nxgep
!= NULL
);
5683 ASSERT(p_cfgp
!= NULL
);
5684 ASSERT(infop
!= NULL
);
5686 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
5687 "==> nxge_fill_ring 0x%x index %d", rtype
, index
));
5691 case MAC_RING_TYPE_TX
: {
5692 mac_intr_t
*mintr
= &infop
->mri_intr
;
5694 NXGE_DEBUG_MSG((nxgep
, TX_CTL
,
5695 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d",
5696 rtype
, index
, p_cfgp
->tdc
.count
));
5698 ASSERT((index
>= 0) && (index
< p_cfgp
->tdc
.count
));
5699 rhandlep
= &nxgep
->tx_ring_handles
[index
];
5700 rhandlep
->nxgep
= nxgep
;
5701 rhandlep
->index
= index
;
5702 rhandlep
->ring_handle
= rh
;
5704 channel
= nxgep
->pt_config
.hw_config
.tdc
.start
+ index
;
5705 rhandlep
->channel
= channel
;
5706 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
5707 htable_idx
= nxge_ring_get_htable_idx(nxgep
, rtype
,
5709 if (htable_idx
>= 0)
5710 mintr
->mi_ddi_handle
= intrp
->htable
[htable_idx
];
5712 mintr
->mi_ddi_handle
= NULL
;
5714 infop
->mri_driver
= (mac_ring_driver_t
)rhandlep
;
5715 infop
->mri_start
= nxge_tx_ring_start
;
5716 infop
->mri_stop
= nxge_tx_ring_stop
;
5717 infop
->mri_tx
= nxge_tx_ring_send
;
5718 infop
->mri_stat
= nxge_tx_ring_stat
;
5719 infop
->mri_flags
= MAC_RING_TX_SERIALIZE
;
5723 case MAC_RING_TYPE_RX
: {
5724 mac_intr_t nxge_mac_intr
;
5726 p_nxge_intr_t intrp
;
5728 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
5730 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
5731 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d",
5732 rtype
, index
, p_cfgp
->max_rdcs
));
5735 * 'index' is the ring index within the group.
5736 * Find the ring index in the nxge instance.
5738 nxge_rindex
= nxge_get_rxring_index(nxgep
, rg_index
, index
);
5739 channel
= nxgep
->pt_config
.hw_config
.start_rdc
+ index
;
5740 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
5742 ASSERT((nxge_rindex
>= 0) && (nxge_rindex
< p_cfgp
->max_rdcs
));
5743 rhandlep
= &nxgep
->rx_ring_handles
[nxge_rindex
];
5744 rhandlep
->nxgep
= nxgep
;
5745 rhandlep
->index
= nxge_rindex
;
5746 rhandlep
->ring_handle
= rh
;
5747 rhandlep
->channel
= channel
;
5750 * Entrypoint to enable interrupt (disable poll) and
5751 * disable interrupt (enable poll).
5753 bzero(&nxge_mac_intr
, sizeof (nxge_mac_intr
));
5754 nxge_mac_intr
.mi_handle
= (mac_intr_handle_t
)rhandlep
;
5755 nxge_mac_intr
.mi_enable
= (mac_intr_enable_t
)nxge_disable_poll
;
5756 nxge_mac_intr
.mi_disable
= (mac_intr_disable_t
)nxge_enable_poll
;
5758 htable_idx
= nxge_ring_get_htable_idx(nxgep
, rtype
,
5760 if (htable_idx
>= 0)
5761 nxge_mac_intr
.mi_ddi_handle
= intrp
->htable
[htable_idx
];
5763 nxge_mac_intr
.mi_ddi_handle
= NULL
;
5765 infop
->mri_driver
= (mac_ring_driver_t
)rhandlep
;
5766 infop
->mri_start
= nxge_rx_ring_start
;
5767 infop
->mri_stop
= nxge_rx_ring_stop
;
5768 infop
->mri_intr
= nxge_mac_intr
;
5769 infop
->mri_poll
= nxge_rx_poll
;
5770 infop
->mri_stat
= nxge_rx_ring_stat
;
5771 infop
->mri_flags
= MAC_RING_RX_ENQUEUE
;
5779 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_fill_ring 0x%x", rtype
));
5783 nxge_group_add_ring(mac_group_driver_t gh
, mac_ring_driver_t rh
,
5784 mac_ring_type_t type
)
5786 nxge_ring_group_t
*rgroup
= (nxge_ring_group_t
*)gh
;
5787 nxge_ring_handle_t
*rhandle
= (nxge_ring_handle_t
*)rh
;
5790 nxge_rdc_grp_t
*rdc_grp
;
5791 uint16_t channel
; /* device-wise ring id */
5795 nxge
= rgroup
->nxgep
;
5798 case MAC_RING_TYPE_TX
:
5800 * nxge_grp_dc_add takes a channel number which is a
5803 channel
= nxge
->pt_config
.hw_config
.tdc
.start
+ rhandle
->index
;
5806 * Remove the ring from the default group
5808 if (rgroup
->gindex
!= 0) {
5809 (void) nxge_grp_dc_remove(nxge
, VP_BOUND_TX
, channel
);
5813 * nxge->tx_set.group[] is an array of groups indexed by
5814 * a "port" group ID.
5816 grp
= nxge
->tx_set
.group
[rgroup
->gindex
];
5817 rv
= nxge_grp_dc_add(nxge
, grp
, VP_BOUND_TX
, channel
);
5819 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
5820 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5824 case MAC_RING_TYPE_RX
:
5826 * nxge->rx_set.group[] is an array of groups indexed by
5827 * a "port" group ID.
5829 grp
= nxge
->rx_set
.group
[rgroup
->gindex
];
5831 dev_gindex
= nxge
->pt_config
.hw_config
.def_mac_rxdma_grpid
+
5833 rdc_grp
= &nxge
->pt_config
.rdc_grps
[dev_gindex
];
5836 * nxge_grp_dc_add takes a channel number which is a
5839 channel
= nxge
->pt_config
.hw_config
.start_rdc
+ rhandle
->index
;
5840 rv
= nxge_grp_dc_add(nxge
, grp
, VP_BOUND_RX
, channel
);
5842 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
5843 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5846 rdc_grp
->map
|= (1 << channel
);
5847 rdc_grp
->max_rdcs
++;
5849 (void) nxge_init_fzc_rdc_tbl(nxge
, rdc_grp
, rgroup
->rdctbl
);
5855 nxge_group_rem_ring(mac_group_driver_t gh
, mac_ring_driver_t rh
,
5856 mac_ring_type_t type
)
5858 nxge_ring_group_t
*rgroup
= (nxge_ring_group_t
*)gh
;
5859 nxge_ring_handle_t
*rhandle
= (nxge_ring_handle_t
*)rh
;
5861 uint16_t channel
; /* device-wise ring id */
5862 nxge_rdc_grp_t
*rdc_grp
;
5865 nxge
= rgroup
->nxgep
;
5868 case MAC_RING_TYPE_TX
:
5869 dev_gindex
= nxge
->pt_config
.hw_config
.def_mac_txdma_grpid
+
5871 channel
= nxge
->pt_config
.hw_config
.tdc
.start
+ rhandle
->index
;
5872 nxge_grp_dc_remove(nxge
, VP_BOUND_TX
, channel
);
5875 * Add the ring back to the default group
5877 if (rgroup
->gindex
!= 0) {
5879 grp
= nxge
->tx_set
.group
[0];
5880 (void) nxge_grp_dc_add(nxge
, grp
, VP_BOUND_TX
, channel
);
5884 case MAC_RING_TYPE_RX
:
5885 dev_gindex
= nxge
->pt_config
.hw_config
.def_mac_rxdma_grpid
+
5887 rdc_grp
= &nxge
->pt_config
.rdc_grps
[dev_gindex
];
5888 channel
= rdc_grp
->start_rdc
+ rhandle
->index
;
5889 nxge_grp_dc_remove(nxge
, VP_BOUND_RX
, channel
);
5891 rdc_grp
->map
&= ~(1 << channel
);
5892 rdc_grp
->max_rdcs
--;
5894 (void) nxge_init_fzc_rdc_tbl(nxge
, rdc_grp
, rgroup
->rdctbl
);
5901 static nxge_status_t
5902 nxge_add_intrs(p_nxge_t nxgep
)
5907 int ddi_status
= DDI_SUCCESS
;
5908 nxge_status_t status
= NXGE_OK
;
5910 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs"));
5912 nxgep
->nxge_intr_type
.intr_registered
= B_FALSE
;
5913 nxgep
->nxge_intr_type
.intr_enabled
= B_FALSE
;
5914 nxgep
->nxge_intr_type
.msi_intx_cnt
= 0;
5915 nxgep
->nxge_intr_type
.intr_added
= 0;
5916 nxgep
->nxge_intr_type
.niu_msi_enable
= B_FALSE
;
5917 nxgep
->nxge_intr_type
.intr_type
= 0;
5919 if (nxgep
->niu_type
== N2_NIU
) {
5920 nxgep
->nxge_intr_type
.niu_msi_enable
= B_TRUE
;
5921 } else if (nxge_msi_enable
) {
5922 nxgep
->nxge_intr_type
.niu_msi_enable
= B_TRUE
;
5925 /* Get the supported interrupt types */
5926 if ((ddi_status
= ddi_intr_get_supported_types(nxgep
->dip
, &intr_types
))
5928 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "<== nxge_add_intrs: "
5929 "ddi_intr_get_supported_types failed: status 0x%08x",
5931 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
5933 nxgep
->nxge_intr_type
.intr_types
= intr_types
;
5935 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs: "
5936 "ddi_intr_get_supported_types: 0x%08x", intr_types
));
5939 * Solaris MSIX is not supported yet. use MSI for now.
5940 * nxge_msi_enable (1):
5941 * 1 - MSI 2 - MSI-X others - FIXED
5943 switch (nxge_msi_enable
) {
5945 type
= DDI_INTR_TYPE_FIXED
;
5946 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_add_intrs: "
5947 "use fixed (intx emulation) type %08x",
5952 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_add_intrs: "
5953 "ddi_intr_get_supported_types: 0x%08x", intr_types
));
5954 if (intr_types
& DDI_INTR_TYPE_MSIX
) {
5955 type
= DDI_INTR_TYPE_MSIX
;
5956 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs: "
5957 "ddi_intr_get_supported_types: MSIX 0x%08x",
5959 } else if (intr_types
& DDI_INTR_TYPE_MSI
) {
5960 type
= DDI_INTR_TYPE_MSI
;
5961 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs: "
5962 "ddi_intr_get_supported_types: MSI 0x%08x",
5964 } else if (intr_types
& DDI_INTR_TYPE_FIXED
) {
5965 type
= DDI_INTR_TYPE_FIXED
;
5966 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_add_intrs: "
5967 "ddi_intr_get_supported_types: MSXED0x%08x",
5973 if (intr_types
& DDI_INTR_TYPE_MSI
) {
5974 type
= DDI_INTR_TYPE_MSI
;
5975 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_add_intrs: "
5976 "ddi_intr_get_supported_types: MSI 0x%08x",
5978 } else if (intr_types
& DDI_INTR_TYPE_MSIX
) {
5979 type
= DDI_INTR_TYPE_MSIX
;
5980 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs: "
5981 "ddi_intr_get_supported_types: MSIX 0x%08x",
5983 } else if (intr_types
& DDI_INTR_TYPE_FIXED
) {
5984 type
= DDI_INTR_TYPE_FIXED
;
5985 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs: "
5986 "ddi_intr_get_supported_types: MSXED0x%08x",
5991 nxgep
->nxge_intr_type
.intr_type
= type
;
5992 if ((type
== DDI_INTR_TYPE_MSIX
|| type
== DDI_INTR_TYPE_MSI
||
5993 type
== DDI_INTR_TYPE_FIXED
) &&
5994 nxgep
->nxge_intr_type
.niu_msi_enable
) {
5995 if ((status
= nxge_add_intrs_adv(nxgep
)) != DDI_SUCCESS
) {
5996 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
5998 " nxge_add_intrs_adv failed: status 0x%08x",
6002 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs: "
6003 "interrupts registered : type %d", type
));
6004 nxgep
->nxge_intr_type
.intr_registered
= B_TRUE
;
6006 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
6007 "\nAdded advanced nxge add_intr_adv "
6008 "intr type 0x%x\n", type
));
6014 if (!nxgep
->nxge_intr_type
.intr_registered
) {
6015 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "==> nxge_add_intrs: "
6016 "failed to register interrupts"));
6017 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6020 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_add_intrs"));
6024 static nxge_status_t
6025 nxge_add_intrs_adv(p_nxge_t nxgep
)
6028 p_nxge_intr_t intrp
;
6030 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs_adv"));
6032 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
6033 intr_type
= intrp
->intr_type
;
6034 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_add_intrs_adv: type 0x%x",
6037 switch (intr_type
) {
6038 case DDI_INTR_TYPE_MSI
: /* 0x2 */
6039 case DDI_INTR_TYPE_MSIX
: /* 0x4 */
6040 return (nxge_add_intrs_adv_type(nxgep
, intr_type
));
6042 case DDI_INTR_TYPE_FIXED
: /* 0x1 */
6043 return (nxge_add_intrs_adv_type_fix(nxgep
, intr_type
));
6046 return (NXGE_ERROR
);
6052 static nxge_status_t
6053 nxge_add_intrs_adv_type(p_nxge_t nxgep
, uint32_t int_type
)
6055 dev_info_t
*dip
= nxgep
->dip
;
6057 p_nxge_intr_t intrp
;
6061 int nintrs
, navail
, nrequest
;
6062 int nactual
, nrequired
;
6065 int ddi_status
= DDI_SUCCESS
;
6066 nxge_status_t status
= NXGE_OK
;
6068 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_add_intrs_adv_type"));
6069 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
6070 intrp
->start_inum
= 0;
6072 ddi_status
= ddi_intr_get_nintrs(dip
, int_type
, &nintrs
);
6073 if ((ddi_status
!= DDI_SUCCESS
) || (nintrs
== 0)) {
6074 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6075 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6076 "nintrs: %d", ddi_status
, nintrs
));
6077 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6080 ddi_status
= ddi_intr_get_navail(dip
, int_type
, &navail
);
6081 if ((ddi_status
!= DDI_SUCCESS
) || (navail
== 0)) {
6082 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6083 "ddi_intr_get_navail() failed, status: 0x%x%, "
6084 "nintrs: %d", ddi_status
, navail
));
6085 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6088 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6089 "ddi_intr_get_navail() returned: nintrs %d, navail %d",
6092 /* PSARC/2007/453 MSI-X interrupt limit override */
6093 if (int_type
== DDI_INTR_TYPE_MSIX
) {
6094 nrequest
= nxge_create_msi_property(nxgep
);
6095 if (nrequest
< navail
) {
6097 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6098 "nxge_add_intrs_adv_type: nintrs %d "
6099 "navail %d (nrequest %d)",
6100 nintrs
, navail
, nrequest
));
6104 if (int_type
== DDI_INTR_TYPE_MSI
&& !ISP2(navail
)) {
6105 /* MSI must be power of 2 */
6106 if ((navail
& 16) == 16) {
6108 } else if ((navail
& 8) == 8) {
6110 } else if ((navail
& 4) == 4) {
6112 } else if ((navail
& 2) == 2) {
6117 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6118 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
6119 "navail %d", nintrs
, navail
));
6122 behavior
= ((int_type
== DDI_INTR_TYPE_FIXED
) ? DDI_INTR_ALLOC_STRICT
:
6123 DDI_INTR_ALLOC_NORMAL
);
6124 intrp
->intr_size
= navail
* sizeof (ddi_intr_handle_t
);
6125 intrp
->htable
= kmem_alloc(intrp
->intr_size
, KM_SLEEP
);
6126 ddi_status
= ddi_intr_alloc(dip
, intrp
->htable
, int_type
, inum
,
6127 navail
, &nactual
, behavior
);
6128 if (ddi_status
!= DDI_SUCCESS
|| nactual
== 0) {
6129 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6130 " ddi_intr_alloc() failed: %d",
6132 kmem_free(intrp
->htable
, intrp
->intr_size
);
6133 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6136 if ((ddi_status
= ddi_intr_get_pri(intrp
->htable
[0],
6137 (uint_t
*)&intrp
->pri
)) != DDI_SUCCESS
) {
6138 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6139 " ddi_intr_get_pri() failed: %d",
6141 /* Free already allocated interrupts */
6142 for (y
= 0; y
< nactual
; y
++) {
6143 (void) ddi_intr_free(intrp
->htable
[y
]);
6146 kmem_free(intrp
->htable
, intrp
->intr_size
);
6147 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6151 switch (nxgep
->niu_type
) {
6153 status
= nxge_ldgv_init(nxgep
, &nactual
, &nrequired
);
6157 status
= nxge_ldgv_init_n2(nxgep
, &nactual
, &nrequired
);
6161 if (status
!= NXGE_OK
) {
6162 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6163 "nxge_add_intrs_adv_typ:nxge_ldgv_init "
6164 "failed: 0x%x", status
));
6165 /* Free already allocated interrupts */
6166 for (y
= 0; y
< nactual
; y
++) {
6167 (void) ddi_intr_free(intrp
->htable
[y
]);
6170 kmem_free(intrp
->htable
, intrp
->intr_size
);
6174 ldgp
= nxgep
->ldgvp
->ldgp
;
6175 for (x
= 0; x
< nrequired
; x
++, ldgp
++) {
6176 ldgp
->vector
= (uint8_t)x
;
6177 ldgp
->intdata
= SID_DATA(ldgp
->func
, x
);
6180 if (ldgp
->nldvs
== 1) {
6181 inthandler
= (uint_t
*)ldgp
->ldvp
->ldv_intr_handler
;
6182 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6183 "nxge_add_intrs_adv_type: "
6184 "arg1 0x%x arg2 0x%x: "
6185 "1-1 int handler (entry %d intdata 0x%x)\n",
6188 } else if (ldgp
->nldvs
> 1) {
6189 inthandler
= (uint_t
*)ldgp
->sys_intr_handler
;
6190 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6191 "nxge_add_intrs_adv_type: "
6192 "arg1 0x%x arg2 0x%x: "
6193 "nldevs %d int handler "
6194 "(entry %d intdata 0x%x)\n",
6196 ldgp
->nldvs
, x
, ldgp
->intdata
));
6199 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6200 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
6201 "htable 0x%llx", x
, intrp
->htable
[x
]));
6203 if ((ddi_status
= ddi_intr_add_handler(intrp
->htable
[x
],
6204 (ddi_intr_handler_t
*)inthandler
, arg1
, arg2
))
6206 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6207 "==> nxge_add_intrs_adv_type: failed #%d "
6208 "status 0x%x", x
, ddi_status
));
6209 for (y
= 0; y
< intrp
->intr_added
; y
++) {
6210 (void) ddi_intr_remove_handler(
6213 /* Free already allocated intr */
6214 for (y
= 0; y
< nactual
; y
++) {
6215 (void) ddi_intr_free(intrp
->htable
[y
]);
6217 kmem_free(intrp
->htable
, intrp
->intr_size
);
6219 (void) nxge_ldgv_uninit(nxgep
);
6221 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6224 ldgp
->htable_idx
= x
;
6225 intrp
->intr_added
++;
6228 intrp
->msi_intx_cnt
= nactual
;
6230 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
6231 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
6233 intrp
->msi_intx_cnt
,
6234 intrp
->intr_added
));
6236 (void) ddi_intr_get_cap(intrp
->htable
[0], &intrp
->intr_cap
);
6238 (void) nxge_intr_ldgv_init(nxgep
);
6240 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "<== nxge_add_intrs_adv_type"));
6246 static nxge_status_t
6247 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep
, uint32_t int_type
)
6249 dev_info_t
*dip
= nxgep
->dip
;
6251 p_nxge_intr_t intrp
;
6256 int nactual
, nrequired
;
6259 int ddi_status
= DDI_SUCCESS
;
6260 nxge_status_t status
= NXGE_OK
;
6262 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_add_intrs_adv_type_fix"));
6263 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
6264 intrp
->start_inum
= 0;
6266 ddi_status
= ddi_intr_get_nintrs(dip
, int_type
, &nintrs
);
6267 if ((ddi_status
!= DDI_SUCCESS
) || (nintrs
== 0)) {
6268 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6269 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6270 "nintrs: %d", status
, nintrs
));
6271 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6274 ddi_status
= ddi_intr_get_navail(dip
, int_type
, &navail
);
6275 if ((ddi_status
!= DDI_SUCCESS
) || (navail
== 0)) {
6276 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6277 "ddi_intr_get_navail() failed, status: 0x%x%, "
6278 "nintrs: %d", ddi_status
, navail
));
6279 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6282 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6283 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
6286 behavior
= ((int_type
== DDI_INTR_TYPE_FIXED
) ? DDI_INTR_ALLOC_STRICT
:
6287 DDI_INTR_ALLOC_NORMAL
);
6288 intrp
->intr_size
= navail
* sizeof (ddi_intr_handle_t
);
6289 intrp
->htable
= kmem_alloc(intrp
->intr_size
, KM_SLEEP
);
6290 ddi_status
= ddi_intr_alloc(dip
, intrp
->htable
, int_type
, inum
,
6291 navail
, &nactual
, behavior
);
6292 if (ddi_status
!= DDI_SUCCESS
|| nactual
== 0) {
6293 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6294 " ddi_intr_alloc() failed: %d",
6296 kmem_free(intrp
->htable
, intrp
->intr_size
);
6297 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6300 if ((ddi_status
= ddi_intr_get_pri(intrp
->htable
[0],
6301 (uint_t
*)&intrp
->pri
)) != DDI_SUCCESS
) {
6302 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6303 " ddi_intr_get_pri() failed: %d",
6305 /* Free already allocated interrupts */
6306 for (y
= 0; y
< nactual
; y
++) {
6307 (void) ddi_intr_free(intrp
->htable
[y
]);
6310 kmem_free(intrp
->htable
, intrp
->intr_size
);
6311 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6315 switch (nxgep
->niu_type
) {
6317 status
= nxge_ldgv_init(nxgep
, &nactual
, &nrequired
);
6321 status
= nxge_ldgv_init_n2(nxgep
, &nactual
, &nrequired
);
6325 if (status
!= NXGE_OK
) {
6326 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6327 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
6328 "failed: 0x%x", status
));
6329 /* Free already allocated interrupts */
6330 for (y
= 0; y
< nactual
; y
++) {
6331 (void) ddi_intr_free(intrp
->htable
[y
]);
6334 kmem_free(intrp
->htable
, intrp
->intr_size
);
6338 ldgp
= nxgep
->ldgvp
->ldgp
;
6339 for (x
= 0; x
< nrequired
; x
++, ldgp
++) {
6340 ldgp
->vector
= (uint8_t)x
;
6341 if (nxgep
->niu_type
!= N2_NIU
) {
6342 ldgp
->intdata
= SID_DATA(ldgp
->func
, x
);
6347 if (ldgp
->nldvs
== 1) {
6348 inthandler
= (uint_t
*)ldgp
->ldvp
->ldv_intr_handler
;
6349 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6350 "nxge_add_intrs_adv_type_fix: "
6351 "1-1 int handler(%d) ldg %d ldv %d "
6352 "arg1 $%p arg2 $%p\n",
6353 x
, ldgp
->ldg
, ldgp
->ldvp
->ldv
,
6355 } else if (ldgp
->nldvs
> 1) {
6356 inthandler
= (uint_t
*)ldgp
->sys_intr_handler
;
6357 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6358 "nxge_add_intrs_adv_type_fix: "
6359 "shared ldv %d int handler(%d) ldv %d ldg %d"
6360 "arg1 0x%016llx arg2 0x%016llx\n",
6361 x
, ldgp
->nldvs
, ldgp
->ldg
, ldgp
->ldvp
->ldv
,
6365 if ((ddi_status
= ddi_intr_add_handler(intrp
->htable
[x
],
6366 (ddi_intr_handler_t
*)inthandler
, arg1
, arg2
))
6368 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
6369 "==> nxge_add_intrs_adv_type_fix: failed #%d "
6370 "status 0x%x", x
, ddi_status
));
6371 for (y
= 0; y
< intrp
->intr_added
; y
++) {
6372 (void) ddi_intr_remove_handler(
6375 for (y
= 0; y
< nactual
; y
++) {
6376 (void) ddi_intr_free(intrp
->htable
[y
]);
6378 /* Free already allocated intr */
6379 kmem_free(intrp
->htable
, intrp
->intr_size
);
6381 (void) nxge_ldgv_uninit(nxgep
);
6383 return (NXGE_ERROR
| NXGE_DDI_FAILED
);
6386 ldgp
->htable_idx
= x
;
6387 intrp
->intr_added
++;
6390 intrp
->msi_intx_cnt
= nactual
;
6392 (void) ddi_intr_get_cap(intrp
->htable
[0], &intrp
->intr_cap
);
6394 status
= nxge_intr_ldgv_init(nxgep
);
6395 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "<== nxge_add_intrs_adv_type_fix"));
6401 nxge_remove_intrs(p_nxge_t nxgep
)
6404 p_nxge_intr_t intrp
;
6406 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_remove_intrs"));
6407 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
6408 if (!intrp
->intr_registered
) {
6409 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6410 "<== nxge_remove_intrs: interrupts not registered"));
6414 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_remove_intrs:advanced"));
6416 if (intrp
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
6417 (void) ddi_intr_block_disable(intrp
->htable
,
6420 for (i
= 0; i
< intrp
->intr_added
; i
++) {
6421 (void) ddi_intr_disable(intrp
->htable
[i
]);
6425 for (inum
= 0; inum
< intrp
->intr_added
; inum
++) {
6426 if (intrp
->htable
[inum
]) {
6427 (void) ddi_intr_remove_handler(intrp
->htable
[inum
]);
6431 for (inum
= 0; inum
< intrp
->msi_intx_cnt
; inum
++) {
6432 if (intrp
->htable
[inum
]) {
6433 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
6434 "nxge_remove_intrs: ddi_intr_free inum %d "
6435 "msi_intx_cnt %d intr_added %d",
6437 intrp
->msi_intx_cnt
,
6438 intrp
->intr_added
));
6440 (void) ddi_intr_free(intrp
->htable
[inum
]);
6444 kmem_free(intrp
->htable
, intrp
->intr_size
);
6445 intrp
->intr_registered
= B_FALSE
;
6446 intrp
->intr_enabled
= B_FALSE
;
6447 intrp
->msi_intx_cnt
= 0;
6448 intrp
->intr_added
= 0;
6450 (void) nxge_ldgv_uninit(nxgep
);
6452 (void) ddi_prop_remove(DDI_DEV_T_NONE
, nxgep
->dip
,
6455 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "<== nxge_remove_intrs"));
6460 nxge_intrs_enable(p_nxge_t nxgep
)
6462 p_nxge_intr_t intrp
;
6466 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_intrs_enable"));
6468 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
6470 if (!intrp
->intr_registered
) {
6471 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "<== nxge_intrs_enable: "
6472 "interrupts are not registered"));
6476 if (intrp
->intr_enabled
) {
6477 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
6478 "<== nxge_intrs_enable: already enabled"));
6482 if (intrp
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
6483 status
= ddi_intr_block_enable(intrp
->htable
,
6485 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_intrs_enable "
6486 "block enable - status 0x%x total inums #%d\n",
6487 status
, intrp
->intr_added
));
6489 for (i
= 0; i
< intrp
->intr_added
; i
++) {
6490 status
= ddi_intr_enable(intrp
->htable
[i
]);
6491 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_intrs_enable "
6492 "ddi_intr_enable:enable - status 0x%x "
6493 "total inums %d enable inum #%d\n",
6494 status
, intrp
->intr_added
, i
));
6495 if (status
== DDI_SUCCESS
) {
6496 intrp
->intr_enabled
= B_TRUE
;
6501 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "<== nxge_intrs_enable"));
6506 nxge_intrs_disable(p_nxge_t nxgep
)
6508 p_nxge_intr_t intrp
;
6511 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_intrs_disable"));
6513 intrp
= (p_nxge_intr_t
)&nxgep
->nxge_intr_type
;
6515 if (!intrp
->intr_registered
) {
6516 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "<== nxge_intrs_disable: "
6517 "interrupts are not registered"));
6521 if (intrp
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
6522 (void) ddi_intr_block_disable(intrp
->htable
,
6525 for (i
= 0; i
< intrp
->intr_added
; i
++) {
6526 (void) ddi_intr_disable(intrp
->htable
[i
]);
6530 intrp
->intr_enabled
= B_FALSE
;
6531 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "<== nxge_intrs_disable"));
6535 nxge_mac_register(p_nxge_t nxgep
)
6537 mac_register_t
*macp
;
6540 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_mac_register"));
6542 if ((macp
= mac_alloc(MAC_VERSION
)) == NULL
)
6543 return (NXGE_ERROR
);
6545 macp
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
6546 macp
->m_driver
= nxgep
;
6547 macp
->m_dip
= nxgep
->dip
;
6548 if (!isLDOMguest(nxgep
)) {
6549 macp
->m_src_addr
= nxgep
->ouraddr
.ether_addr_octet
;
6551 macp
->m_src_addr
= KMEM_ZALLOC(MAXMACADDRLEN
, KM_SLEEP
);
6552 macp
->m_dst_addr
= KMEM_ZALLOC(MAXMACADDRLEN
, KM_SLEEP
);
6553 (void) memset(macp
->m_src_addr
, 0xff, sizeof (MAXMACADDRLEN
));
6555 macp
->m_callbacks
= &nxge_m_callbacks
;
6556 macp
->m_min_sdu
= 0;
6557 nxgep
->mac
.default_mtu
= nxgep
->mac
.maxframesize
-
6558 NXGE_EHEADER_VLAN_CRC
;
6559 macp
->m_max_sdu
= nxgep
->mac
.default_mtu
;
6560 macp
->m_margin
= VLAN_TAGSZ
;
6561 macp
->m_priv_props
= nxge_priv_props
;
6562 if (isLDOMguest(nxgep
))
6563 macp
->m_v12n
= MAC_VIRT_LEVEL1
;
6565 macp
->m_v12n
= MAC_VIRT_HIO
| MAC_VIRT_LEVEL1
;
6567 NXGE_DEBUG_MSG((nxgep
, MAC_CTL
,
6568 "==> nxge_mac_register: instance %d "
6569 "max_sdu %d margin %d maxframe %d (header %d)",
6571 macp
->m_max_sdu
, macp
->m_margin
,
6572 nxgep
->mac
.maxframesize
,
6573 NXGE_EHEADER_VLAN_CRC
));
6575 status
= mac_register(macp
, &nxgep
->mach
);
6576 if (isLDOMguest(nxgep
)) {
6577 KMEM_FREE(macp
->m_src_addr
, MAXMACADDRLEN
);
6578 KMEM_FREE(macp
->m_dst_addr
, MAXMACADDRLEN
);
6584 "!nxge_mac_register failed (status %d instance %d)",
6585 status
, nxgep
->instance
);
6586 return (NXGE_ERROR
);
6589 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_mac_register success "
6590 "(instance %d)", nxgep
->instance
));
6596 nxge_err_inject(p_nxge_t nxgep
, queue_t
*wq
, mblk_t
*mp
)
6605 NXGE_DEBUG_MSG((nxgep
, STR_CTL
, "==> nxge_err_inject"));
6609 eip
= (err_inject_t
*)nmp
->b_rptr
;
6610 blk_id
= eip
->blk_id
;
6611 err_id
= eip
->err_id
;
6613 cmn_err(CE_NOTE
, "!blk_id = 0x%x\n", blk_id
);
6614 cmn_err(CE_NOTE
, "!err_id = 0x%x\n", err_id
);
6615 cmn_err(CE_NOTE
, "!chan = 0x%x\n", chan
);
6626 nxge_ipp_inject_err(nxgep
, err_id
);
6629 nxge_txc_inject_err(nxgep
, err_id
);
6632 nxge_txdma_inject_err(nxgep
, err_id
, chan
);
6635 nxge_rxdma_inject_err(nxgep
, err_id
, chan
);
6638 nxge_zcp_inject_err(nxgep
, err_id
);
6646 case ETHER_SERDES_BLK_ID
:
6648 case PCIE_SERDES_BLK_ID
:
6654 nmp
->b_wptr
= nmp
->b_rptr
+ size
;
6655 NXGE_DEBUG_MSG((nxgep
, STR_CTL
, "<== nxge_err_inject"));
6657 miocack(wq
, mp
, (int)size
, 0);
6661 nxge_init_common_dev(p_nxge_t nxgep
)
6663 p_nxge_hw_list_t hw_p
;
6666 ASSERT(nxgep
!= NULL
);
6668 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
, "==> nxge_init_common_device"));
6670 p_dip
= nxgep
->p_dip
;
6671 MUTEX_ENTER(&nxge_common_lock
);
6672 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6673 "==> nxge_init_common_dev:func # %d",
6674 nxgep
->function_num
));
6676 * Loop through existing per neptune hardware list.
6678 for (hw_p
= nxge_hw_list
; hw_p
; hw_p
= hw_p
->next
) {
6679 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6680 "==> nxge_init_common_device:func # %d "
6681 "hw_p $%p parent dip $%p",
6682 nxgep
->function_num
,
6685 if (hw_p
->parent_devp
== p_dip
) {
6686 nxgep
->nxge_hw_p
= hw_p
;
6688 hw_p
->nxge_p
[nxgep
->function_num
] = nxgep
;
6689 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6690 "==> nxge_init_common_device:func # %d "
6691 "hw_p $%p parent dip $%p "
6693 nxgep
->function_num
,
6707 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6708 "==> nxge_init_common_device:func # %d "
6709 "parent dip $%p (new)",
6710 nxgep
->function_num
,
6712 hw_p
= kmem_zalloc(sizeof (nxge_hw_list_t
), KM_SLEEP
);
6713 hw_p
->parent_devp
= p_dip
;
6714 hw_p
->magic
= NXGE_NEPTUNE_MAGIC
;
6715 nxgep
->nxge_hw_p
= hw_p
;
6717 hw_p
->nxge_p
[nxgep
->function_num
] = nxgep
;
6718 hw_p
->next
= nxge_hw_list
;
6719 if (nxgep
->niu_type
== N2_NIU
) {
6720 hw_p
->niu_type
= N2_NIU
;
6721 hw_p
->platform_type
= P_NEPTUNE_NIU
;
6722 hw_p
->tcam_size
= TCAM_NIU_TCAM_MAX_ENTRY
;
6724 hw_p
->niu_type
= NIU_TYPE_NONE
;
6725 hw_p
->platform_type
= P_NEPTUNE_NONE
;
6726 hw_p
->tcam_size
= TCAM_NXGE_TCAM_MAX_ENTRY
;
6729 hw_p
->tcam
= KMEM_ZALLOC(sizeof (tcam_flow_spec_t
) *
6730 hw_p
->tcam_size
, KM_SLEEP
);
6732 MUTEX_INIT(&hw_p
->nxge_cfg_lock
, NULL
, MUTEX_DRIVER
, NULL
);
6733 MUTEX_INIT(&hw_p
->nxge_tcam_lock
, NULL
, MUTEX_DRIVER
, NULL
);
6734 MUTEX_INIT(&hw_p
->nxge_vlan_lock
, NULL
, MUTEX_DRIVER
, NULL
);
6735 MUTEX_INIT(&hw_p
->nxge_mdio_lock
, NULL
, MUTEX_DRIVER
, NULL
);
6737 nxge_hw_list
= hw_p
;
6739 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY
, nxgep
->dip
, 0,
6740 "compatible", &prop_val
, &prop_len
) == DDI_PROP_SUCCESS
) {
6741 for (i
= 0; i
< prop_len
; i
++) {
6742 if ((strcmp((caddr_t
)prop_val
[i
],
6743 NXGE_ROCK_COMPATIBLE
) == 0)) {
6744 hw_p
->platform_type
= P_NEPTUNE_ROCK
;
6745 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6746 "ROCK hw_p->platform_type %d",
6747 hw_p
->platform_type
));
6750 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6751 "nxge_init_common_dev: read compatible"
6752 " property[%d] val[%s]",
6753 i
, (caddr_t
)prop_val
[i
]));
6757 ddi_prop_free(prop_val
);
6759 (void) nxge_scan_ports_phy(nxgep
, nxge_hw_list
);
6762 MUTEX_EXIT(&nxge_common_lock
);
6764 nxgep
->platform_type
= hw_p
->platform_type
;
6765 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
, "nxgep->platform_type %d",
6766 nxgep
->platform_type
));
6767 if (nxgep
->niu_type
!= N2_NIU
) {
6768 nxgep
->niu_type
= hw_p
->niu_type
;
6771 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6772 "==> nxge_init_common_device (nxge_hw_list) $%p",
6774 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
, "<== nxge_init_common_device"));
6780 nxge_uninit_common_dev(p_nxge_t nxgep
)
6782 p_nxge_hw_list_t hw_p
, h_hw_p
;
6783 p_nxge_dma_pt_cfg_t p_dma_cfgp
;
6784 p_nxge_hw_pt_cfg_t p_cfgp
;
6787 ASSERT(nxgep
!= NULL
);
6789 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
, "==> nxge_uninit_common_device"));
6790 if (nxgep
->nxge_hw_p
== NULL
) {
6791 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6792 "<== nxge_uninit_common_device (no common)"));
6796 MUTEX_ENTER(&nxge_common_lock
);
6797 h_hw_p
= nxge_hw_list
;
6798 for (hw_p
= nxge_hw_list
; hw_p
; hw_p
= hw_p
->next
) {
6799 p_dip
= hw_p
->parent_devp
;
6800 if (nxgep
->nxge_hw_p
== hw_p
&&
6801 p_dip
== nxgep
->p_dip
&&
6802 nxgep
->nxge_hw_p
->magic
== NXGE_NEPTUNE_MAGIC
&&
6803 hw_p
->magic
== NXGE_NEPTUNE_MAGIC
) {
6805 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6806 "==> nxge_uninit_common_device:func # %d "
6807 "hw_p $%p parent dip $%p "
6809 nxgep
->function_num
,
6815 * Release the RDC table, a shared resoruce
6816 * of the nxge hardware. The RDC table was
6817 * assigned to this instance of nxge in
6818 * nxge_use_cfg_dma_config().
6820 if (!isLDOMguest(nxgep
)) {
6822 (p_nxge_dma_pt_cfg_t
)&nxgep
->pt_config
;
6824 (p_nxge_hw_pt_cfg_t
)&p_dma_cfgp
->hw_config
;
6825 (void) nxge_fzc_rdc_tbl_unbind(nxgep
,
6826 p_cfgp
->def_mac_rxdma_grpid
);
6828 /* Cleanup any outstanding groups. */
6829 nxge_grp_cleanup(nxgep
);
6835 hw_p
->nxge_p
[nxgep
->function_num
] = NULL
;
6837 KMEM_FREE(hw_p
->tcam
,
6838 sizeof (tcam_flow_spec_t
) *
6840 MUTEX_DESTROY(&hw_p
->nxge_vlan_lock
);
6841 MUTEX_DESTROY(&hw_p
->nxge_tcam_lock
);
6842 MUTEX_DESTROY(&hw_p
->nxge_cfg_lock
);
6843 MUTEX_DESTROY(&hw_p
->nxge_mdio_lock
);
6844 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6845 "==> nxge_uninit_common_device: "
6847 "hw_p $%p parent dip $%p "
6849 nxgep
->function_num
,
6854 nxge_hio_uninit(nxgep
);
6856 if (hw_p
== nxge_hw_list
) {
6857 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6858 "==> nxge_uninit_common_device:"
6859 "remove head func # %d "
6860 "hw_p $%p parent dip $%p "
6862 nxgep
->function_num
,
6866 nxge_hw_list
= hw_p
->next
;
6868 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6869 "==> nxge_uninit_common_device:"
6870 "remove middle func # %d "
6871 "hw_p $%p parent dip $%p "
6872 "ndevs %d (middle)",
6873 nxgep
->function_num
,
6877 h_hw_p
->next
= hw_p
->next
;
6880 nxgep
->nxge_hw_p
= NULL
;
6881 KMEM_FREE(hw_p
, sizeof (nxge_hw_list_t
));
6889 MUTEX_EXIT(&nxge_common_lock
);
6890 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6891 "==> nxge_uninit_common_device (nxge_hw_list) $%p",
6894 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
, "<= nxge_uninit_common_device"));
6898 * Determines the number of ports from the niu_type or the platform type.
6899 * Returns the number of ports, or returns zero on failure.
6903 nxge_get_nports(p_nxge_t nxgep
)
6907 switch (nxgep
->niu_type
) {
6909 case NEPTUNE_2_10GF
:
6913 case NEPTUNE_2_10GF_2_1GC
:
6914 case NEPTUNE_1_10GF_3_1GC
:
6915 case NEPTUNE_1_1GC_1_10GF_2_1GC
:
6916 case NEPTUNE_2_10GF_2_1GRF
:
6920 switch (nxgep
->platform_type
) {
6922 case P_NEPTUNE_ATLAS_2PORT
:
6925 case P_NEPTUNE_ATLAS_4PORT
:
6926 case P_NEPTUNE_MARAMBA_P0
:
6927 case P_NEPTUNE_MARAMBA_P1
:
6928 case P_NEPTUNE_ROCK
:
6929 case P_NEPTUNE_ALONSO
:
6942 * The following two functions are to support
6943 * PSARC/2007/453 MSI-X interrupt limit override.
6946 nxge_create_msi_property(p_nxge_t nxgep
)
6951 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
, "==>nxge_create_msi_property"));
6953 switch (nxgep
->mac
.portmode
) {
6954 case PORT_10G_COPPER
:
6955 case PORT_10G_FIBER
:
6956 case PORT_10G_TN1010
:
6957 (void) ddi_prop_create(DDI_DEV_T_NONE
, nxgep
->dip
,
6958 DDI_PROP_CANSLEEP
, "#msix-request", NULL
, 0);
6960 * The maximum MSI-X requested will be 8.
6961 * If the # of CPUs is less than 8, we will request
6962 * # MSI-X based on the # of CPUs (default).
6964 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6965 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d",
6966 nxge_msix_10g_intrs
));
6967 if ((nxge_msix_10g_intrs
== 0) ||
6968 (nxge_msix_10g_intrs
> NXGE_MSIX_MAX_ALLOWED
)) {
6969 nmsi
= NXGE_MSIX_REQUEST_10G
;
6970 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6971 "==>nxge_create_msi_property (10G): reset to 8"));
6973 nmsi
= nxge_msix_10g_intrs
;
6977 * If # of interrupts requested is 8 (default),
6978 * the checking of the number of cpus will be
6981 if ((nmsi
== NXGE_MSIX_REQUEST_10G
) &&
6983 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6984 "==>nxge_create_msi_property (10G): reset to 8"));
6987 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6988 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
6989 ddi_prop_exists(DDI_DEV_T_NONE
, nxgep
->dip
,
6990 DDI_PROP_CANSLEEP
, "#msix-request"), nmsi
));
6994 (void) ddi_prop_create(DDI_DEV_T_NONE
, nxgep
->dip
,
6995 DDI_PROP_CANSLEEP
, "#msix-request", NULL
, 0);
6996 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
6997 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d",
6998 nxge_msix_1g_intrs
));
6999 if ((nxge_msix_1g_intrs
== 0) ||
7000 (nxge_msix_1g_intrs
> NXGE_MSIX_MAX_ALLOWED
)) {
7001 nmsi
= NXGE_MSIX_REQUEST_1G
;
7002 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
7003 "==>nxge_create_msi_property (1G): reset to 2"));
7005 nmsi
= nxge_msix_1g_intrs
;
7007 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
,
7008 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
7009 ddi_prop_exists(DDI_DEV_T_NONE
, nxgep
->dip
,
7010 DDI_PROP_CANSLEEP
, "#msix-request"), nmsi
));
7014 NXGE_DEBUG_MSG((nxgep
, MOD_CTL
, "<==nxge_create_msi_property"));
7019 * The following is a software around for the Neptune hardware's
7020 * interrupt bugs; The Neptune hardware may generate spurious interrupts when
7021 * an interrupr handler is removed.
7023 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98
7024 #define NXGE_PIM_RESET (1ULL << 29)
7025 #define NXGE_GLU_RESET (1ULL << 30)
7026 #define NXGE_NIU_RESET (1ULL << 31)
7027 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \
7031 #define NXGE_WAIT_QUITE_TIME 200000
7032 #define NXGE_WAIT_QUITE_RETRY 40
7033 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */
7036 nxge_niu_peu_reset(p_nxge_t nxgep
)
7039 p_nxge_hw_list_t hw_p
;
7043 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
, "==> nxge_niu_peu_reset"));
7044 if ((hw_p
= nxgep
->nxge_hw_p
) == NULL
) {
7045 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
7046 "==> nxge_niu_peu_reset: NULL hardware pointer"));
7050 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
7051 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
7052 hw_p
->flags
, nxgep
->nxge_link_poll_timerid
,
7053 nxgep
->nxge_timerid
));
7055 MUTEX_ENTER(&hw_p
->nxge_cfg_lock
);
7057 * Make sure other instances from the same hardware
7058 * stop sending PIO and in quiescent state.
7060 for (i
= 0; i
< NXGE_MAX_PORTS
; i
++) {
7061 fnxgep
= hw_p
->nxge_p
[i
];
7062 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
7063 "==> nxge_niu_peu_reset: checking entry %d "
7064 "nxgep $%p", i
, fnxgep
));
7067 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
7068 "==> nxge_niu_peu_reset: entry %d (function %d) "
7069 "link timer id %d hw timer id %d",
7070 i
, fnxgep
->function_num
,
7071 fnxgep
->nxge_link_poll_timerid
,
7072 fnxgep
->nxge_timerid
));
7075 if (fnxgep
&& fnxgep
!= nxgep
&&
7076 (fnxgep
->nxge_timerid
|| fnxgep
->nxge_link_poll_timerid
)) {
7077 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
7078 "==> nxge_niu_peu_reset: checking $%p "
7079 "(function %d) timer ids",
7080 fnxgep
, fnxgep
->function_num
));
7081 for (j
= 0; j
< NXGE_WAIT_QUITE_RETRY
; j
++) {
7082 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
7083 "==> nxge_niu_peu_reset: waiting"));
7084 NXGE_DELAY(NXGE_WAIT_QUITE_TIME
);
7085 if (!fnxgep
->nxge_timerid
&&
7086 !fnxgep
->nxge_link_poll_timerid
) {
7090 NXGE_DELAY(NXGE_WAIT_QUITE_TIME
);
7091 if (fnxgep
->nxge_timerid
||
7092 fnxgep
->nxge_link_poll_timerid
) {
7093 MUTEX_EXIT(&hw_p
->nxge_cfg_lock
);
7094 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
7095 "<== nxge_niu_peu_reset: cannot reset "
7096 "hardware (devices are still in use)"));
7102 if ((hw_p
->flags
& COMMON_RESET_NIU_PCI
) != COMMON_RESET_NIU_PCI
) {
7103 hw_p
->flags
|= COMMON_RESET_NIU_PCI
;
7104 rvalue
= pci_config_get32(nxgep
->dev_regs
->nxge_pciregh
,
7105 NXGE_PCI_PORT_LOGIC_OFFSET
);
7106 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
7107 "nxge_niu_peu_reset: read offset 0x%x (%d) "
7109 NXGE_PCI_PORT_LOGIC_OFFSET
,
7110 NXGE_PCI_PORT_LOGIC_OFFSET
,
7113 rvalue
|= NXGE_PCI_RESET_ALL
;
7114 pci_config_put32(nxgep
->dev_regs
->nxge_pciregh
,
7115 NXGE_PCI_PORT_LOGIC_OFFSET
, rvalue
);
7116 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
7117 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
7120 NXGE_DELAY(NXGE_PCI_RESET_WAIT
);
7123 MUTEX_EXIT(&hw_p
->nxge_cfg_lock
);
7124 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
, "<== nxge_niu_peu_reset"));
7128 nxge_set_pci_replay_timeout(p_nxge_t nxgep
)
7130 p_dev_regs_t dev_regs
;
7133 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_set_pci_replay_timeout"));
7135 if (!nxge_set_replay_timer
) {
7136 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
7137 "==> nxge_set_pci_replay_timeout: will not change "
7142 dev_regs
= nxgep
->dev_regs
;
7143 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
7144 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
7145 dev_regs
, dev_regs
->nxge_pciregh
));
7147 if (dev_regs
== NULL
|| (dev_regs
->nxge_pciregh
== NULL
)) {
7148 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
7149 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
7154 value
= (pci_config_get32(dev_regs
->nxge_pciregh
,
7155 PCI_REPLAY_TIMEOUT_CFG_OFFSET
) |
7156 (nxge_replay_timeout
<< PCI_REPLAY_TIMEOUT_SHIFT
));
7158 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
7159 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
7160 "(timeout value to set 0x%x at offset 0x%x) value 0x%x",
7161 pci_config_get32(dev_regs
->nxge_pciregh
,
7162 PCI_REPLAY_TIMEOUT_CFG_OFFSET
), nxge_replay_timeout
,
7163 PCI_REPLAY_TIMEOUT_CFG_OFFSET
, value
));
7165 pci_config_put32(dev_regs
->nxge_pciregh
, PCI_REPLAY_TIMEOUT_CFG_OFFSET
,
7168 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
,
7169 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
7170 pci_config_get32(dev_regs
->nxge_pciregh
,
7171 PCI_REPLAY_TIMEOUT_CFG_OFFSET
)));
7173 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_set_pci_replay_timeout"));
7177 * quiesce(9E) entry point.
7179 * This function is called when the system is single-threaded at high
7180 * PIL with preemption disabled. Therefore, this function must not be
7183 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7184 * DDI_FAILURE indicates an error condition and should almost never happen.
7187 nxge_quiesce(dev_info_t
*dip
)
7189 int instance
= ddi_get_instance(dip
);
7190 p_nxge_t nxgep
= (p_nxge_t
)ddi_get_soft_state(nxge_list
, instance
);
7193 return (DDI_FAILURE
);
7195 /* Turn off debugging */
7196 nxge_debug_level
= NO_DEBUG
;
7197 nxgep
->nxge_debug_level
= NO_DEBUG
;
7198 npi_debug_level
= NO_DEBUG
;
7201 * Stop link monitor only when linkchkmod is interrupt based
7203 if (nxgep
->mac
.linkchkmode
== LINKCHK_INTR
) {
7204 (void) nxge_link_monitor(nxgep
, LINK_MONITOR_STOP
);
7207 (void) nxge_intr_hw_disable(nxgep
);
7210 * Reset the receive MAC side.
7212 (void) nxge_rx_mac_disable(nxgep
);
7214 /* Disable and soft reset the IPP */
7215 if (!isLDOMguest(nxgep
))
7216 (void) nxge_ipp_disable(nxgep
);
7219 * Reset the transmit/receive DMA side.
7221 (void) nxge_txdma_hw_mode(nxgep
, NXGE_DMA_STOP
);
7222 (void) nxge_rxdma_hw_mode(nxgep
, NXGE_DMA_STOP
);
7225 * Reset the transmit MAC side.
7227 (void) nxge_tx_mac_disable(nxgep
);
7229 return (DDI_SUCCESS
);