832 need Intel 82579 Gigabit Ethernet PHY support in e1000g
[illumos-gate.git] / usr / src / uts / common / io / hxge / hxge_main.c
blobee2dfc365a91db35434da8612be4ca5345ba8775
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
33 * PSARC/2007/453 MSI-X interrupt limit override
34 * (This PSARC case is limited to MSI-X vectors
35 * and SPARC platforms only).
37 uint32_t hxge_msi_enable = 2;
40 * Globals: tunable parameters (/etc/system or adb)
43 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
44 uint32_t hxge_rbr_spare_size = 0;
45 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
46 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
47 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
48 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
49 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
50 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
52 static hxge_os_mutex_t hxgedebuglock;
53 static int hxge_debug_init = 0;
56 * Debugging flags:
57 * hxge_no_tx_lb : transmit load balancing
58 * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
59 * 1 - From the Stack
60 * 2 - Destination IP Address
62 uint32_t hxge_no_tx_lb = 0;
63 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
66 * Tunables to manage the receive buffer blocks.
68 * hxge_rx_threshold_hi: copy all buffers.
69 * hxge_rx_bcopy_size_type: receive buffer block size type.
70 * hxge_rx_threshold_lo: copy only up to tunable block size type.
72 #if defined(__sparc)
73 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
74 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
75 #else
76 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
77 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
78 #endif
79 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
81 rtrace_t hpi_rtracebuf;
84 * Function Prototypes
86 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
87 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
88 static void hxge_unattach(p_hxge_t);
90 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
92 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
93 static void hxge_destroy_mutexes(p_hxge_t);
95 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
96 static void hxge_unmap_regs(p_hxge_t hxgep);
98 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
99 static void hxge_remove_intrs(p_hxge_t hxgep);
100 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
101 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
102 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
103 static void hxge_intrs_enable(p_hxge_t hxgep);
104 static void hxge_intrs_disable(p_hxge_t hxgep);
105 static void hxge_suspend(p_hxge_t);
106 static hxge_status_t hxge_resume(p_hxge_t);
107 static hxge_status_t hxge_setup_dev(p_hxge_t);
108 static void hxge_destroy_dev(p_hxge_t);
109 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
110 static void hxge_free_mem_pool(p_hxge_t);
111 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
112 static void hxge_free_rx_mem_pool(p_hxge_t);
113 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
114 static void hxge_free_tx_mem_pool(p_hxge_t);
115 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
116 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
117 p_hxge_dma_common_t);
118 static void hxge_dma_mem_free(p_hxge_dma_common_t);
119 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
120 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
121 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
122 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
123 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
124 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
125 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
126 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
127 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
128 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
129 p_hxge_dma_common_t *, size_t);
130 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
131 static int hxge_init_common_dev(p_hxge_t);
132 static void hxge_uninit_common_dev(p_hxge_t);
135 * The next declarations are for the GLDv3 interface.
137 static int hxge_m_start(void *);
138 static void hxge_m_stop(void *);
139 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
140 static int hxge_m_promisc(void *, boolean_t);
141 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
142 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
144 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
145 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
146 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
147 uint_t pr_valsize, const void *pr_val);
148 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
149 uint_t pr_valsize, void *pr_val);
150 static void hxge_m_propinfo(void *barg, const char *pr_name,
151 mac_prop_id_t pr_num, mac_prop_info_handle_t mph);
152 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
153 uint_t pr_valsize, const void *pr_val);
154 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
155 uint_t pr_valsize, void *pr_val);
156 static void hxge_link_poll(void *arg);
157 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
158 static void hxge_msix_init(p_hxge_t hxgep);
160 char *hxge_priv_props[] = {
161 "_rxdma_intr_time",
162 "_rxdma_intr_pkts",
163 "_class_opt_ipv4_tcp",
164 "_class_opt_ipv4_udp",
165 "_class_opt_ipv4_ah",
166 "_class_opt_ipv4_sctp",
167 "_class_opt_ipv6_tcp",
168 "_class_opt_ipv6_udp",
169 "_class_opt_ipv6_ah",
170 "_class_opt_ipv6_sctp",
171 NULL
174 #define HXGE_MAX_PRIV_PROPS \
175 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
177 #define HXGE_MAGIC 0x4E584745UL
178 #define MAX_DUMP_SZ 256
180 #define HXGE_M_CALLBACK_FLAGS \
181 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
183 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
185 static mac_callbacks_t hxge_m_callbacks = {
186 HXGE_M_CALLBACK_FLAGS,
187 hxge_m_stat,
188 hxge_m_start,
189 hxge_m_stop,
190 hxge_m_promisc,
191 hxge_m_multicst,
192 NULL,
193 NULL,
194 NULL,
195 hxge_m_ioctl,
196 hxge_m_getcapab,
197 NULL,
198 NULL,
199 hxge_m_setprop,
200 hxge_m_getprop,
201 hxge_m_propinfo
204 /* PSARC/2007/453 MSI-X interrupt limit override. */
205 #define HXGE_MSIX_REQUEST_10G 8
206 static int hxge_create_msi_property(p_hxge_t);
208 /* Enable debug messages as necessary. */
209 uint64_t hxge_debug_level = 0;
212 * This list contains the instance structures for the Hydra
213 * devices present in the system. The lock exists to guarantee
214 * mutually exclusive access to the list.
216 void *hxge_list = NULL;
217 void *hxge_hw_list = NULL;
218 hxge_os_mutex_t hxge_common_lock;
220 extern uint64_t hpi_debug_level;
222 extern hxge_status_t hxge_ldgv_init();
223 extern hxge_status_t hxge_ldgv_uninit();
224 extern hxge_status_t hxge_intr_ldgv_init();
225 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
226 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
227 extern void hxge_fm_fini(p_hxge_t hxgep);
230 * Count used to maintain the number of buffers being used
231 * by Hydra instances and loaned up to the upper layers.
233 uint32_t hxge_mblks_pending = 0;
236 * Device register access attributes for PIO.
238 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
239 DDI_DEVICE_ATTR_V0,
240 DDI_STRUCTURE_LE_ACC,
241 DDI_STRICTORDER_ACC,
245 * Device descriptor access attributes for DMA.
247 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
248 DDI_DEVICE_ATTR_V0,
249 DDI_STRUCTURE_LE_ACC,
250 DDI_STRICTORDER_ACC
254 * Device buffer access attributes for DMA.
256 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
257 DDI_DEVICE_ATTR_V0,
258 DDI_STRUCTURE_BE_ACC,
259 DDI_STRICTORDER_ACC
262 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
263 DMA_ATTR_V0, /* version number. */
264 0, /* low address */
265 0xffffffffffffffff, /* high address */
266 0xffffffffffffffff, /* address counter max */
267 0x80000, /* alignment */
268 0xfc00fc, /* dlim_burstsizes */
269 0x1, /* minimum transfer size */
270 0xffffffffffffffff, /* maximum transfer size */
271 0xffffffffffffffff, /* maximum segment size */
272 1, /* scatter/gather list length */
273 (unsigned int)1, /* granularity */
274 0 /* attribute flags */
277 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
278 DMA_ATTR_V0, /* version number. */
279 0, /* low address */
280 0xffffffffffffffff, /* high address */
281 0xffffffffffffffff, /* address counter max */
282 0x100000, /* alignment */
283 0xfc00fc, /* dlim_burstsizes */
284 0x1, /* minimum transfer size */
285 0xffffffffffffffff, /* maximum transfer size */
286 0xffffffffffffffff, /* maximum segment size */
287 1, /* scatter/gather list length */
288 (unsigned int)1, /* granularity */
289 0 /* attribute flags */
292 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
293 DMA_ATTR_V0, /* version number. */
294 0, /* low address */
295 0xffffffffffffffff, /* high address */
296 0xffffffffffffffff, /* address counter max */
297 0x40000, /* alignment */
298 0xfc00fc, /* dlim_burstsizes */
299 0x1, /* minimum transfer size */
300 0xffffffffffffffff, /* maximum transfer size */
301 0xffffffffffffffff, /* maximum segment size */
302 1, /* scatter/gather list length */
303 (unsigned int)1, /* granularity */
304 0 /* attribute flags */
307 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
308 DMA_ATTR_V0, /* version number. */
309 0, /* low address */
310 0xffffffffffffffff, /* high address */
311 0xffffffffffffffff, /* address counter max */
312 #if defined(_BIG_ENDIAN)
313 0x2000, /* alignment */
314 #else
315 0x1000, /* alignment */
316 #endif
317 0xfc00fc, /* dlim_burstsizes */
318 0x1, /* minimum transfer size */
319 0xffffffffffffffff, /* maximum transfer size */
320 0xffffffffffffffff, /* maximum segment size */
321 5, /* scatter/gather list length */
322 (unsigned int)1, /* granularity */
323 0 /* attribute flags */
326 ddi_dma_attr_t hxge_tx_dma_attr = {
327 DMA_ATTR_V0, /* version number. */
328 0, /* low address */
329 0xffffffffffffffff, /* high address */
330 0xffffffffffffffff, /* address counter max */
331 #if defined(_BIG_ENDIAN)
332 0x2000, /* alignment */
333 #else
334 0x1000, /* alignment */
335 #endif
336 0xfc00fc, /* dlim_burstsizes */
337 0x1, /* minimum transfer size */
338 0xffffffffffffffff, /* maximum transfer size */
339 0xffffffffffffffff, /* maximum segment size */
340 5, /* scatter/gather list length */
341 (unsigned int)1, /* granularity */
342 0 /* attribute flags */
345 ddi_dma_attr_t hxge_rx_dma_attr = {
346 DMA_ATTR_V0, /* version number. */
347 0, /* low address */
348 0xffffffffffffffff, /* high address */
349 0xffffffffffffffff, /* address counter max */
350 0x10000, /* alignment */
351 0xfc00fc, /* dlim_burstsizes */
352 0x1, /* minimum transfer size */
353 0xffffffffffffffff, /* maximum transfer size */
354 0xffffffffffffffff, /* maximum segment size */
355 1, /* scatter/gather list length */
356 (unsigned int)1, /* granularity */
357 DDI_DMA_RELAXED_ORDERING /* attribute flags */
360 ddi_dma_lim_t hxge_dma_limits = {
361 (uint_t)0, /* dlim_addr_lo */
362 (uint_t)0xffffffff, /* dlim_addr_hi */
363 (uint_t)0xffffffff, /* dlim_cntr_max */
364 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
365 0x1, /* dlim_minxfer */
366 1024 /* dlim_speed */
369 dma_method_t hxge_force_dma = DVMA;
372 * dma chunk sizes.
374 * Try to allocate the largest possible size
375 * so that fewer number of dma chunks would be managed
377 size_t alloc_sizes[] = {
378 0x1000, 0x2000, 0x4000, 0x8000,
379 0x10000, 0x20000, 0x40000, 0x80000,
380 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
384 * Translate "dev_t" to a pointer to the associated "dev_info_t".
386 static int
387 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
389 p_hxge_t hxgep = NULL;
390 int instance;
391 int status = DDI_SUCCESS;
392 int i;
394 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
397 * Get the device instance since we'll need to setup or retrieve a soft
398 * state for this instance.
400 instance = ddi_get_instance(dip);
402 switch (cmd) {
403 case DDI_ATTACH:
404 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
405 break;
407 case DDI_RESUME:
408 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
409 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
410 if (hxgep == NULL) {
411 status = DDI_FAILURE;
412 break;
414 if (hxgep->dip != dip) {
415 status = DDI_FAILURE;
416 break;
418 if (hxgep->suspended == DDI_PM_SUSPEND) {
419 status = ddi_dev_is_needed(hxgep->dip, 0, 1);
420 } else {
421 (void) hxge_resume(hxgep);
423 goto hxge_attach_exit;
425 case DDI_PM_RESUME:
426 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
427 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
428 if (hxgep == NULL) {
429 status = DDI_FAILURE;
430 break;
432 if (hxgep->dip != dip) {
433 status = DDI_FAILURE;
434 break;
436 (void) hxge_resume(hxgep);
437 goto hxge_attach_exit;
439 default:
440 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
441 status = DDI_FAILURE;
442 goto hxge_attach_exit;
445 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
446 status = DDI_FAILURE;
447 HXGE_ERROR_MSG((hxgep, DDI_CTL,
448 "ddi_soft_state_zalloc failed"));
449 goto hxge_attach_exit;
452 hxgep = ddi_get_soft_state(hxge_list, instance);
453 if (hxgep == NULL) {
454 status = HXGE_ERROR;
455 HXGE_ERROR_MSG((hxgep, DDI_CTL,
456 "ddi_get_soft_state failed"));
457 goto hxge_attach_fail2;
460 hxgep->drv_state = 0;
461 hxgep->dip = dip;
462 hxgep->instance = instance;
463 hxgep->p_dip = ddi_get_parent(dip);
464 hxgep->hxge_debug_level = hxge_debug_level;
465 hpi_debug_level = hxge_debug_level;
468 * Initialize MMAC struture.
470 (void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
471 hxgep->mmac.available = hxgep->mmac.total;
472 for (i = 0; i < hxgep->mmac.total; i++) {
473 hxgep->mmac.addrs[i].set = B_FALSE;
474 hxgep->mmac.addrs[i].primary = B_FALSE;
477 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
478 &hxge_rx_dma_attr);
480 status = hxge_map_regs(hxgep);
481 if (status != HXGE_OK) {
482 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
483 goto hxge_attach_fail3;
486 status = hxge_init_common_dev(hxgep);
487 if (status != HXGE_OK) {
488 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
489 "hxge_init_common_dev failed"));
490 goto hxge_attach_fail4;
494 * Setup the Ndd parameters for this instance.
496 hxge_init_param(hxgep);
499 * Setup Register Tracing Buffer.
501 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
503 /* init stats ptr */
504 hxge_init_statsp(hxgep);
506 status = hxge_setup_mutexes(hxgep);
507 if (status != HXGE_OK) {
508 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
509 goto hxge_attach_fail;
512 /* Scrub the MSI-X memory */
513 hxge_msix_init(hxgep);
515 status = hxge_get_config_properties(hxgep);
516 if (status != HXGE_OK) {
517 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
518 goto hxge_attach_fail;
522 * Setup the Kstats for the driver.
524 hxge_setup_kstats(hxgep);
525 hxge_setup_param(hxgep);
527 status = hxge_setup_system_dma_pages(hxgep);
528 if (status != HXGE_OK) {
529 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
530 goto hxge_attach_fail;
533 hxge_hw_id_init(hxgep);
534 hxge_hw_init_niu_common(hxgep);
536 status = hxge_setup_dev(hxgep);
537 if (status != DDI_SUCCESS) {
538 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
539 goto hxge_attach_fail;
542 status = hxge_add_intrs(hxgep);
543 if (status != DDI_SUCCESS) {
544 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
545 goto hxge_attach_fail;
549 * Enable interrupts.
551 hxge_intrs_enable(hxgep);
553 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
554 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
555 "unable to register to mac layer (%d)", status));
556 goto hxge_attach_fail;
558 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
560 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
561 instance));
563 goto hxge_attach_exit;
565 hxge_attach_fail:
566 hxge_unattach(hxgep);
567 goto hxge_attach_fail1;
569 hxge_attach_fail5:
571 * Tear down the ndd parameters setup.
573 hxge_destroy_param(hxgep);
576 * Tear down the kstat setup.
578 hxge_destroy_kstats(hxgep);
580 hxge_attach_fail4:
581 if (hxgep->hxge_hw_p) {
582 hxge_uninit_common_dev(hxgep);
583 hxgep->hxge_hw_p = NULL;
585 hxge_attach_fail3:
587 * Unmap the register setup.
589 hxge_unmap_regs(hxgep);
591 hxge_fm_fini(hxgep);
593 hxge_attach_fail2:
594 ddi_soft_state_free(hxge_list, hxgep->instance);
596 hxge_attach_fail1:
597 if (status != HXGE_OK)
598 status = (HXGE_ERROR | HXGE_DDI_FAILED);
599 hxgep = NULL;
601 hxge_attach_exit:
602 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
603 status));
605 return (status);
608 static int
609 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
611 int status = DDI_SUCCESS;
612 int instance;
613 p_hxge_t hxgep = NULL;
615 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
616 instance = ddi_get_instance(dip);
617 hxgep = ddi_get_soft_state(hxge_list, instance);
618 if (hxgep == NULL) {
619 status = DDI_FAILURE;
620 goto hxge_detach_exit;
623 switch (cmd) {
624 case DDI_DETACH:
625 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
626 break;
628 case DDI_PM_SUSPEND:
629 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
630 hxgep->suspended = DDI_PM_SUSPEND;
631 hxge_suspend(hxgep);
632 break;
634 case DDI_SUSPEND:
635 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
636 if (hxgep->suspended != DDI_PM_SUSPEND) {
637 hxgep->suspended = DDI_SUSPEND;
638 hxge_suspend(hxgep);
640 break;
642 default:
643 status = DDI_FAILURE;
644 break;
647 if (cmd != DDI_DETACH)
648 goto hxge_detach_exit;
651 * Stop the xcvr polling.
653 hxgep->suspended = cmd;
655 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
656 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
657 "<== hxge_detach status = 0x%08X", status));
658 return (DDI_FAILURE);
660 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
661 "<== hxge_detach (mac_unregister) status = 0x%08X", status));
663 hxge_unattach(hxgep);
664 hxgep = NULL;
666 hxge_detach_exit:
667 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
668 status));
670 return (status);
673 static void
674 hxge_unattach(p_hxge_t hxgep)
676 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
678 if (hxgep == NULL || hxgep->dev_regs == NULL) {
679 return;
682 if (hxgep->hxge_hw_p) {
683 hxge_uninit_common_dev(hxgep);
684 hxgep->hxge_hw_p = NULL;
687 if (hxgep->hxge_timerid) {
688 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
689 hxgep->hxge_timerid = 0;
692 /* Stop interrupts. */
693 hxge_intrs_disable(hxgep);
695 /* Stop any further interrupts. */
696 hxge_remove_intrs(hxgep);
698 /* Stop the device and free resources. */
699 hxge_destroy_dev(hxgep);
701 /* Tear down the ndd parameters setup. */
702 hxge_destroy_param(hxgep);
704 /* Tear down the kstat setup. */
705 hxge_destroy_kstats(hxgep);
708 * Remove the list of ndd parameters which were setup during attach.
710 if (hxgep->dip) {
711 HXGE_DEBUG_MSG((hxgep, OBP_CTL,
712 " hxge_unattach: remove all properties"));
713 (void) ddi_prop_remove_all(hxgep->dip);
717 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
718 * previous state before unmapping the registers.
720 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
721 HXGE_DELAY(1000);
724 * Unmap the register setup.
726 hxge_unmap_regs(hxgep);
728 hxge_fm_fini(hxgep);
730 /* Destroy all mutexes. */
731 hxge_destroy_mutexes(hxgep);
734 * Free the soft state data structures allocated with this instance.
736 ddi_soft_state_free(hxge_list, hxgep->instance);
738 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
741 static hxge_status_t
742 hxge_map_regs(p_hxge_t hxgep)
744 int ddi_status = DDI_SUCCESS;
745 p_dev_regs_t dev_regs;
747 #ifdef HXGE_DEBUG
748 char *sysname;
749 #endif
751 off_t regsize;
752 hxge_status_t status = HXGE_OK;
753 int nregs;
755 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
757 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
758 return (HXGE_ERROR);
760 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
762 hxgep->dev_regs = NULL;
763 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
764 dev_regs->hxge_regh = NULL;
765 dev_regs->hxge_pciregh = NULL;
766 dev_regs->hxge_msix_regh = NULL;
768 (void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
769 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
770 "hxge_map_regs: pci config size 0x%x", regsize));
772 ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
773 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
774 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
775 if (ddi_status != DDI_SUCCESS) {
776 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
777 "ddi_map_regs, hxge bus config regs failed"));
778 goto hxge_map_regs_fail0;
781 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
782 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
783 dev_regs->hxge_pciregp,
784 dev_regs->hxge_pciregh));
786 (void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
787 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
788 "hxge_map_regs: pio size 0x%x", regsize));
790 /* set up the device mapped register */
791 ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
792 (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
793 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
795 if (ddi_status != DDI_SUCCESS) {
796 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
797 "ddi_map_regs for Hydra global reg failed"));
798 goto hxge_map_regs_fail1;
801 /* set up the msi/msi-x mapped register */
802 (void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
803 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
804 "hxge_map_regs: msix size 0x%x", regsize));
806 ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
807 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
808 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
810 if (ddi_status != DDI_SUCCESS) {
811 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
812 "ddi_map_regs for msi reg failed"));
813 goto hxge_map_regs_fail2;
816 hxgep->dev_regs = dev_regs;
818 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
819 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
820 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
821 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
823 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
824 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
826 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
827 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
829 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
830 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
832 goto hxge_map_regs_exit;
834 hxge_map_regs_fail3:
835 if (dev_regs->hxge_msix_regh) {
836 ddi_regs_map_free(&dev_regs->hxge_msix_regh);
839 hxge_map_regs_fail2:
840 if (dev_regs->hxge_regh) {
841 ddi_regs_map_free(&dev_regs->hxge_regh);
844 hxge_map_regs_fail1:
845 if (dev_regs->hxge_pciregh) {
846 ddi_regs_map_free(&dev_regs->hxge_pciregh);
849 hxge_map_regs_fail0:
850 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
851 kmem_free(dev_regs, sizeof (dev_regs_t));
853 hxge_map_regs_exit:
854 if (ddi_status != DDI_SUCCESS)
855 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
856 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
857 return (status);
860 static void
861 hxge_unmap_regs(p_hxge_t hxgep)
863 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
864 if (hxgep->dev_regs) {
865 if (hxgep->dev_regs->hxge_pciregh) {
866 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
867 "==> hxge_unmap_regs: bus"));
868 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
869 hxgep->dev_regs->hxge_pciregh = NULL;
872 if (hxgep->dev_regs->hxge_regh) {
873 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
874 "==> hxge_unmap_regs: device registers"));
875 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
876 hxgep->dev_regs->hxge_regh = NULL;
879 if (hxgep->dev_regs->hxge_msix_regh) {
880 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
881 "==> hxge_unmap_regs: device interrupts"));
882 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
883 hxgep->dev_regs->hxge_msix_regh = NULL;
885 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
886 hxgep->dev_regs = NULL;
888 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
891 static hxge_status_t
892 hxge_setup_mutexes(p_hxge_t hxgep)
894 int ddi_status = DDI_SUCCESS;
895 hxge_status_t status = HXGE_OK;
897 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
900 * Get the interrupt cookie so the mutexes can be Initialised.
902 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
903 &hxgep->interrupt_cookie);
905 if (ddi_status != DDI_SUCCESS) {
906 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
907 "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
908 goto hxge_setup_mutexes_exit;
912 * Initialize mutex's for this device.
914 MUTEX_INIT(hxgep->genlock, NULL,
915 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
916 MUTEX_INIT(&hxgep->vmac_lock, NULL,
917 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
918 MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
919 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
920 RW_INIT(&hxgep->filter_lock, NULL,
921 RW_DRIVER, (void *) hxgep->interrupt_cookie);
922 MUTEX_INIT(&hxgep->pio_lock, NULL,
923 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
924 MUTEX_INIT(&hxgep->timeout.lock, NULL,
925 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
927 hxge_setup_mutexes_exit:
928 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
929 "<== hxge_setup_mutexes status = %x", status));
931 if (ddi_status != DDI_SUCCESS)
932 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
934 return (status);
937 static void
938 hxge_destroy_mutexes(p_hxge_t hxgep)
940 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
941 RW_DESTROY(&hxgep->filter_lock);
942 MUTEX_DESTROY(&hxgep->vmac_lock);
943 MUTEX_DESTROY(&hxgep->ouraddr_lock);
944 MUTEX_DESTROY(hxgep->genlock);
945 MUTEX_DESTROY(&hxgep->pio_lock);
946 MUTEX_DESTROY(&hxgep->timeout.lock);
948 if (hxge_debug_init == 1) {
949 MUTEX_DESTROY(&hxgedebuglock);
950 hxge_debug_init = 0;
953 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
956 hxge_status_t
957 hxge_init(p_hxge_t hxgep)
959 hxge_status_t status = HXGE_OK;
961 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
963 if (hxgep->drv_state & STATE_HW_INITIALIZED) {
964 return (status);
968 * Allocate system memory for the receive/transmit buffer blocks and
969 * receive/transmit descriptor rings.
971 status = hxge_alloc_mem_pool(hxgep);
972 if (status != HXGE_OK) {
973 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
974 goto hxge_init_fail1;
978 * Initialize and enable TXDMA channels.
980 status = hxge_init_txdma_channels(hxgep);
981 if (status != HXGE_OK) {
982 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
983 goto hxge_init_fail3;
987 * Initialize and enable RXDMA channels.
989 status = hxge_init_rxdma_channels(hxgep);
990 if (status != HXGE_OK) {
991 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
992 goto hxge_init_fail4;
996 * Initialize TCAM
998 status = hxge_classify_init(hxgep);
999 if (status != HXGE_OK) {
1000 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1001 goto hxge_init_fail5;
1005 * Initialize the VMAC block.
1007 status = hxge_vmac_init(hxgep);
1008 if (status != HXGE_OK) {
1009 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1010 goto hxge_init_fail5;
1013 /* Bringup - this may be unnecessary when PXE and FCODE available */
1014 status = hxge_pfc_set_default_mac_addr(hxgep);
1015 if (status != HXGE_OK) {
1016 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1017 "Default Address Failure\n"));
1018 goto hxge_init_fail5;
1022 * Enable hardware interrupts.
1024 hxge_intr_hw_enable(hxgep);
1025 hxgep->drv_state |= STATE_HW_INITIALIZED;
1027 goto hxge_init_exit;
1029 hxge_init_fail5:
1030 hxge_uninit_rxdma_channels(hxgep);
1031 hxge_init_fail4:
1032 hxge_uninit_txdma_channels(hxgep);
1033 hxge_init_fail3:
1034 hxge_free_mem_pool(hxgep);
1035 hxge_init_fail1:
1036 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1037 "<== hxge_init status (failed) = 0x%08x", status));
1038 return (status);
1040 hxge_init_exit:
1042 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1043 status));
1045 return (status);
1048 timeout_id_t
1049 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1051 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1052 return (timeout(func, (caddr_t)hxgep,
1053 drv_usectohz(1000 * msec)));
1055 return (NULL);
1058 /*ARGSUSED*/
1059 void
1060 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1062 if (timerid) {
1063 (void) untimeout(timerid);
1067 void
1068 hxge_uninit(p_hxge_t hxgep)
1070 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1072 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1073 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1074 "==> hxge_uninit: not initialized"));
1075 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1076 return;
1079 /* Stop timer */
1080 if (hxgep->hxge_timerid) {
1081 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1082 hxgep->hxge_timerid = 0;
1085 (void) hxge_intr_hw_disable(hxgep);
1087 /* Reset the receive VMAC side. */
1088 (void) hxge_rx_vmac_disable(hxgep);
1090 /* Free classification resources */
1091 (void) hxge_classify_uninit(hxgep);
1093 /* Reset the transmit/receive DMA side. */
1094 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1095 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1097 hxge_uninit_txdma_channels(hxgep);
1098 hxge_uninit_rxdma_channels(hxgep);
1100 /* Reset the transmit VMAC side. */
1101 (void) hxge_tx_vmac_disable(hxgep);
1103 hxge_free_mem_pool(hxgep);
1105 hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1107 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1110 /*ARGSUSED*/
1111 /*VARARGS*/
1112 void
1113 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1115 char msg_buffer[1048];
1116 char prefix_buffer[32];
1117 int instance;
1118 uint64_t debug_level;
1119 int cmn_level = CE_CONT;
1120 va_list ap;
1122 debug_level = (hxgep == NULL) ? hxge_debug_level :
1123 hxgep->hxge_debug_level;
1125 if ((level & debug_level) || (level == HXGE_NOTE) ||
1126 (level == HXGE_ERR_CTL)) {
1127 /* do the msg processing */
1128 if (hxge_debug_init == 0) {
1129 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1130 hxge_debug_init = 1;
1133 MUTEX_ENTER(&hxgedebuglock);
1135 if ((level & HXGE_NOTE)) {
1136 cmn_level = CE_NOTE;
1139 if (level & HXGE_ERR_CTL) {
1140 cmn_level = CE_WARN;
1143 va_start(ap, fmt);
1144 (void) vsprintf(msg_buffer, fmt, ap);
1145 va_end(ap);
1147 if (hxgep == NULL) {
1148 instance = -1;
1149 (void) sprintf(prefix_buffer, "%s :", "hxge");
1150 } else {
1151 instance = hxgep->instance;
1152 (void) sprintf(prefix_buffer,
1153 "%s%d :", "hxge", instance);
1156 MUTEX_EXIT(&hxgedebuglock);
1157 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1161 char *
1162 hxge_dump_packet(char *addr, int size)
1164 uchar_t *ap = (uchar_t *)addr;
1165 int i;
1166 static char etherbuf[1024];
1167 char *cp = etherbuf;
1168 char digits[] = "0123456789abcdef";
1170 if (!size)
1171 size = 60;
1173 if (size > MAX_DUMP_SZ) {
1174 /* Dump the leading bytes */
1175 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1176 if (*ap > 0x0f)
1177 *cp++ = digits[*ap >> 4];
1178 *cp++ = digits[*ap++ & 0xf];
1179 *cp++ = ':';
1181 for (i = 0; i < 20; i++)
1182 *cp++ = '.';
1183 /* Dump the last MAX_DUMP_SZ/2 bytes */
1184 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1185 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1186 if (*ap > 0x0f)
1187 *cp++ = digits[*ap >> 4];
1188 *cp++ = digits[*ap++ & 0xf];
1189 *cp++ = ':';
1191 } else {
1192 for (i = 0; i < size; i++) {
1193 if (*ap > 0x0f)
1194 *cp++ = digits[*ap >> 4];
1195 *cp++ = digits[*ap++ & 0xf];
1196 *cp++ = ':';
1199 *--cp = 0;
1200 return (etherbuf);
1203 static void
1204 hxge_suspend(p_hxge_t hxgep)
1206 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1209 * Stop the link status timer before hxge_intrs_disable() to avoid
1210 * accessing the the MSIX table simultaneously. Note that the timer
1211 * routine polls for MSIX parity errors.
1213 MUTEX_ENTER(&hxgep->timeout.lock);
1214 if (hxgep->timeout.id)
1215 (void) untimeout(hxgep->timeout.id);
1216 MUTEX_EXIT(&hxgep->timeout.lock);
1218 hxge_intrs_disable(hxgep);
1219 hxge_destroy_dev(hxgep);
1221 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1224 static hxge_status_t
1225 hxge_resume(p_hxge_t hxgep)
1227 hxge_status_t status = HXGE_OK;
1229 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1230 hxgep->suspended = DDI_RESUME;
1232 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1233 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1235 (void) hxge_rx_vmac_enable(hxgep);
1236 (void) hxge_tx_vmac_enable(hxgep);
1238 hxge_intrs_enable(hxgep);
1240 hxgep->suspended = 0;
1243 * Resume the link status timer after hxge_intrs_enable to avoid
1244 * accessing MSIX table simultaneously.
1246 MUTEX_ENTER(&hxgep->timeout.lock);
1247 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1248 hxgep->timeout.ticks);
1249 MUTEX_EXIT(&hxgep->timeout.lock);
1251 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1252 "<== hxge_resume status = 0x%x", status));
1254 return (status);
1257 static hxge_status_t
1258 hxge_setup_dev(p_hxge_t hxgep)
1260 hxge_status_t status = HXGE_OK;
1262 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1264 status = hxge_link_init(hxgep);
1265 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1266 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1267 "Bad register acc handle"));
1268 status = HXGE_ERROR;
1271 if (status != HXGE_OK) {
1272 HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1273 " hxge_setup_dev status (link init 0x%08x)", status));
1274 goto hxge_setup_dev_exit;
1277 hxge_setup_dev_exit:
1278 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1279 "<== hxge_setup_dev status = 0x%08x", status));
1281 return (status);
1284 static void
1285 hxge_destroy_dev(p_hxge_t hxgep)
1287 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1289 (void) hxge_hw_stop(hxgep);
1291 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1294 static hxge_status_t
1295 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1297 int ddi_status = DDI_SUCCESS;
1298 uint_t count;
1299 ddi_dma_cookie_t cookie;
1300 uint_t iommu_pagesize;
1301 hxge_status_t status = HXGE_OK;
1303 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1305 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1306 iommu_pagesize = dvma_pagesize(hxgep->dip);
1308 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1309 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1310 " default_block_size %d iommu_pagesize %d",
1311 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1312 hxgep->rx_default_block_size, iommu_pagesize));
1314 if (iommu_pagesize != 0) {
1315 if (hxgep->sys_page_sz == iommu_pagesize) {
1316 /* Hydra support up to 8K pages */
1317 if (iommu_pagesize > 0x2000)
1318 hxgep->sys_page_sz = 0x2000;
1319 } else {
1320 if (hxgep->sys_page_sz > iommu_pagesize)
1321 hxgep->sys_page_sz = iommu_pagesize;
1325 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1327 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1328 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1329 "default_block_size %d page mask %d",
1330 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1331 hxgep->rx_default_block_size, hxgep->sys_page_mask));
1333 switch (hxgep->sys_page_sz) {
1334 default:
1335 hxgep->sys_page_sz = 0x1000;
1336 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1337 hxgep->rx_default_block_size = 0x1000;
1338 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1339 break;
1340 case 0x1000:
1341 hxgep->rx_default_block_size = 0x1000;
1342 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1343 break;
1344 case 0x2000:
1345 hxgep->rx_default_block_size = 0x2000;
1346 hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1347 break;
1350 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1351 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1354 * Get the system DMA burst size.
1356 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1357 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1358 if (ddi_status != DDI_SUCCESS) {
1359 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1360 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1361 goto hxge_get_soft_properties_exit;
1364 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1365 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1366 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1367 &cookie, &count);
1368 if (ddi_status != DDI_DMA_MAPPED) {
1369 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1370 "Binding spare handle to find system burstsize failed."));
1371 ddi_status = DDI_FAILURE;
1372 goto hxge_get_soft_properties_fail1;
1375 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1376 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1378 hxge_get_soft_properties_fail1:
1379 ddi_dma_free_handle(&hxgep->dmasparehandle);
1381 hxge_get_soft_properties_exit:
1383 if (ddi_status != DDI_SUCCESS)
1384 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1386 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1387 "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1389 return (status);
1392 static hxge_status_t
1393 hxge_alloc_mem_pool(p_hxge_t hxgep)
1395 hxge_status_t status = HXGE_OK;
1397 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1399 status = hxge_alloc_rx_mem_pool(hxgep);
1400 if (status != HXGE_OK) {
1401 return (HXGE_ERROR);
1404 status = hxge_alloc_tx_mem_pool(hxgep);
1405 if (status != HXGE_OK) {
1406 hxge_free_rx_mem_pool(hxgep);
1407 return (HXGE_ERROR);
1410 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1411 return (HXGE_OK);
1414 static void
1415 hxge_free_mem_pool(p_hxge_t hxgep)
1417 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1419 hxge_free_rx_mem_pool(hxgep);
1420 hxge_free_tx_mem_pool(hxgep);
1422 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1425 static hxge_status_t
1426 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1428 int i, j;
1429 uint32_t ndmas, st_rdc;
1430 p_hxge_dma_pt_cfg_t p_all_cfgp;
1431 p_hxge_hw_pt_cfg_t p_cfgp;
1432 p_hxge_dma_pool_t dma_poolp;
1433 p_hxge_dma_common_t *dma_buf_p;
1434 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1435 p_hxge_dma_common_t *dma_rbr_cntl_p;
1436 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1437 p_hxge_dma_common_t *dma_rcr_cntl_p;
1438 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1439 p_hxge_dma_common_t *dma_mbox_cntl_p;
1440 size_t rx_buf_alloc_size;
1441 size_t rx_rbr_cntl_alloc_size;
1442 size_t rx_rcr_cntl_alloc_size;
1443 size_t rx_mbox_cntl_alloc_size;
1444 uint32_t *num_chunks; /* per dma */
1445 hxge_status_t status = HXGE_OK;
1447 uint32_t hxge_port_rbr_size;
1448 uint32_t hxge_port_rbr_spare_size;
1449 uint32_t hxge_port_rcr_size;
1451 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1453 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1454 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1455 st_rdc = p_cfgp->start_rdc;
1456 ndmas = p_cfgp->max_rdcs;
1458 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1459 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1462 * Allocate memory for each receive DMA channel.
1464 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1465 KM_SLEEP);
1466 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1467 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1469 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1470 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1471 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1472 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1473 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1474 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1475 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1476 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1477 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1478 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1479 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1480 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1482 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1483 KM_SLEEP);
1486 * Assume that each DMA channel will be configured with default block
1487 * size. rbr block counts are mod of batch count (16).
1489 hxge_port_rbr_size = p_all_cfgp->rbr_size;
1490 hxge_port_rcr_size = p_all_cfgp->rcr_size;
1492 if (!hxge_port_rbr_size) {
1493 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1496 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1497 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1498 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1501 p_all_cfgp->rbr_size = hxge_port_rbr_size;
1502 hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1504 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1505 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1506 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1509 rx_buf_alloc_size = (hxgep->rx_default_block_size *
1510 (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1513 * Addresses of receive block ring, receive completion ring and the
1514 * mailbox must be all cache-aligned (64 bytes).
1516 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1517 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1518 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1519 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1521 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1522 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1523 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1524 hxge_port_rbr_size, hxge_port_rbr_spare_size,
1525 hxge_port_rcr_size, rx_cntl_alloc_size));
1527 hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1528 hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1531 * Allocate memory for receive buffers and descriptor rings. Replace
1532 * allocation functions with interface functions provided by the
1533 * partition manager when it is available.
1536 * Allocate memory for the receive buffer blocks.
1538 for (i = 0; i < ndmas; i++) {
1539 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1540 " hxge_alloc_rx_mem_pool to alloc mem: "
1541 " dma %d dma_buf_p %llx &dma_buf_p %llx",
1542 i, dma_buf_p[i], &dma_buf_p[i]));
1544 num_chunks[i] = 0;
1546 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1547 rx_buf_alloc_size, hxgep->rx_default_block_size,
1548 &num_chunks[i]);
1549 if (status != HXGE_OK) {
1550 break;
1553 st_rdc++;
1554 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1555 " hxge_alloc_rx_mem_pool DONE alloc mem: "
1556 "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1557 dma_buf_p[i], &dma_buf_p[i]));
1560 if (i < ndmas) {
1561 goto hxge_alloc_rx_mem_fail1;
1565 * Allocate memory for descriptor rings and mailbox.
1567 st_rdc = p_cfgp->start_rdc;
1568 for (j = 0; j < ndmas; j++) {
1569 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1570 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1571 rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1572 break;
1575 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1576 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1577 rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1578 break;
1581 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1582 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1583 rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1584 break;
1586 st_rdc++;
1589 if (j < ndmas) {
1590 goto hxge_alloc_rx_mem_fail2;
1593 dma_poolp->ndmas = ndmas;
1594 dma_poolp->num_chunks = num_chunks;
1595 dma_poolp->buf_allocated = B_TRUE;
1596 hxgep->rx_buf_pool_p = dma_poolp;
1597 dma_poolp->dma_buf_pool_p = dma_buf_p;
1599 dma_rbr_cntl_poolp->ndmas = ndmas;
1600 dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1601 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1602 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1604 dma_rcr_cntl_poolp->ndmas = ndmas;
1605 dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1606 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1607 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1609 dma_mbox_cntl_poolp->ndmas = ndmas;
1610 dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1611 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1612 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1614 goto hxge_alloc_rx_mem_pool_exit;
1616 hxge_alloc_rx_mem_fail2:
1617 /* Free control buffers */
1618 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1619 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1620 for (; j >= 0; j--) {
1621 hxge_free_rx_cntl_dma(hxgep,
1622 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1623 hxge_free_rx_cntl_dma(hxgep,
1624 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1625 hxge_free_rx_cntl_dma(hxgep,
1626 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1627 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1628 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1630 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1631 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1633 hxge_alloc_rx_mem_fail1:
1634 /* Free data buffers */
1635 i--;
1636 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1637 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1638 for (; i >= 0; i--) {
1639 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1640 num_chunks[i]);
1642 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1643 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1645 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1646 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1647 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1648 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1649 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1650 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1651 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1652 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1653 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1655 hxge_alloc_rx_mem_pool_exit:
1656 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1657 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1659 return (status);
1662 static void
1663 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1665 uint32_t i, ndmas;
1666 p_hxge_dma_pool_t dma_poolp;
1667 p_hxge_dma_common_t *dma_buf_p;
1668 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1669 p_hxge_dma_common_t *dma_rbr_cntl_p;
1670 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1671 p_hxge_dma_common_t *dma_rcr_cntl_p;
1672 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1673 p_hxge_dma_common_t *dma_mbox_cntl_p;
1674 uint32_t *num_chunks;
1676 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1678 dma_poolp = hxgep->rx_buf_pool_p;
1679 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1680 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1681 "(null rx buf pool or buf not allocated"));
1682 return;
1685 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1686 if (dma_rbr_cntl_poolp == NULL ||
1687 (!dma_rbr_cntl_poolp->buf_allocated)) {
1688 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1689 "<== hxge_free_rx_mem_pool "
1690 "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1691 return;
1694 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1695 if (dma_rcr_cntl_poolp == NULL ||
1696 (!dma_rcr_cntl_poolp->buf_allocated)) {
1697 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1698 "<== hxge_free_rx_mem_pool "
1699 "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1700 return;
1703 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1704 if (dma_mbox_cntl_poolp == NULL ||
1705 (!dma_mbox_cntl_poolp->buf_allocated)) {
1706 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1707 "<== hxge_free_rx_mem_pool "
1708 "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1709 return;
1712 dma_buf_p = dma_poolp->dma_buf_pool_p;
1713 num_chunks = dma_poolp->num_chunks;
1715 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1716 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1717 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1718 ndmas = dma_rbr_cntl_poolp->ndmas;
1720 for (i = 0; i < ndmas; i++) {
1721 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1724 for (i = 0; i < ndmas; i++) {
1725 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1726 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1727 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1730 for (i = 0; i < ndmas; i++) {
1731 KMEM_FREE(dma_buf_p[i],
1732 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1733 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1734 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1735 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1738 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1739 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1740 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1741 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1742 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1743 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1744 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1745 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1746 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1748 hxgep->rx_buf_pool_p = NULL;
1749 hxgep->rx_rbr_cntl_pool_p = NULL;
1750 hxgep->rx_rcr_cntl_pool_p = NULL;
1751 hxgep->rx_mbox_cntl_pool_p = NULL;
1753 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1756 static hxge_status_t
1757 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1758 p_hxge_dma_common_t *dmap,
1759 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1761 p_hxge_dma_common_t rx_dmap;
1762 hxge_status_t status = HXGE_OK;
1763 size_t total_alloc_size;
1764 size_t allocated = 0;
1765 int i, size_index, array_size;
1767 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1769 rx_dmap = (p_hxge_dma_common_t)
1770 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1772 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1773 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1774 dma_channel, alloc_size, block_size, dmap));
1776 total_alloc_size = alloc_size;
1778 i = 0;
1779 size_index = 0;
1780 array_size = sizeof (alloc_sizes) / sizeof (size_t);
1781 while ((size_index < array_size) &&
1782 (alloc_sizes[size_index] < alloc_size))
1783 size_index++;
1784 if (size_index >= array_size) {
1785 size_index = array_size - 1;
1788 while ((allocated < total_alloc_size) &&
1789 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1790 rx_dmap[i].dma_chunk_index = i;
1791 rx_dmap[i].block_size = block_size;
1792 rx_dmap[i].alength = alloc_sizes[size_index];
1793 rx_dmap[i].orig_alength = rx_dmap[i].alength;
1794 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1795 rx_dmap[i].dma_channel = dma_channel;
1796 rx_dmap[i].contig_alloc_type = B_FALSE;
1798 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1799 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1800 "i %d nblocks %d alength %d",
1801 dma_channel, i, &rx_dmap[i], block_size,
1802 i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1803 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1804 &hxge_rx_dma_attr, rx_dmap[i].alength,
1805 &hxge_dev_buf_dma_acc_attr,
1806 DDI_DMA_READ | DDI_DMA_STREAMING,
1807 (p_hxge_dma_common_t)(&rx_dmap[i]));
1808 if (status != HXGE_OK) {
1809 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1810 " hxge_alloc_rx_buf_dma: Alloc Failed: "
1811 " for size: %d", alloc_sizes[size_index]));
1812 size_index--;
1813 } else {
1814 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1815 " alloc_rx_buf_dma allocated rdc %d "
1816 "chunk %d size %x dvma %x bufp %llx ",
1817 dma_channel, i, rx_dmap[i].alength,
1818 rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1819 i++;
1820 allocated += alloc_sizes[size_index];
1824 if (allocated < total_alloc_size) {
1825 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1826 " hxge_alloc_rx_buf_dma failed due to"
1827 " allocated(%d) < required(%d)",
1828 allocated, total_alloc_size));
1829 goto hxge_alloc_rx_mem_fail1;
1832 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1833 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1835 *num_chunks = i;
1836 *dmap = rx_dmap;
1838 goto hxge_alloc_rx_mem_exit;
1840 hxge_alloc_rx_mem_fail1:
1841 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1843 hxge_alloc_rx_mem_exit:
1844 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1845 "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1847 return (status);
1850 /*ARGSUSED*/
1851 static void
1852 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1853 uint32_t num_chunks)
1855 int i;
1857 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1858 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1860 for (i = 0; i < num_chunks; i++) {
1861 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1862 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1863 hxge_dma_mem_free(dmap++);
1866 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1869 /*ARGSUSED*/
1870 static hxge_status_t
1871 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1872 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1874 p_hxge_dma_common_t rx_dmap;
1875 hxge_status_t status = HXGE_OK;
1877 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1879 rx_dmap = (p_hxge_dma_common_t)
1880 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1882 rx_dmap->contig_alloc_type = B_FALSE;
1884 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1885 attr, size, &hxge_dev_desc_dma_acc_attr,
1886 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1887 if (status != HXGE_OK) {
1888 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1889 " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1890 " for size: %d", size));
1891 goto hxge_alloc_rx_cntl_dma_fail1;
1894 *dmap = rx_dmap;
1896 goto hxge_alloc_rx_cntl_dma_exit;
1898 hxge_alloc_rx_cntl_dma_fail1:
1899 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1901 hxge_alloc_rx_cntl_dma_exit:
1902 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1903 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1905 return (status);
1908 /*ARGSUSED*/
1909 static void
1910 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1912 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1914 hxge_dma_mem_free(dmap);
1916 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1919 static hxge_status_t
1920 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1922 hxge_status_t status = HXGE_OK;
1923 int i, j;
1924 uint32_t ndmas, st_tdc;
1925 p_hxge_dma_pt_cfg_t p_all_cfgp;
1926 p_hxge_hw_pt_cfg_t p_cfgp;
1927 p_hxge_dma_pool_t dma_poolp;
1928 p_hxge_dma_common_t *dma_buf_p;
1929 p_hxge_dma_pool_t dma_cntl_poolp;
1930 p_hxge_dma_common_t *dma_cntl_p;
1931 size_t tx_buf_alloc_size;
1932 size_t tx_cntl_alloc_size;
1933 uint32_t *num_chunks; /* per dma */
1935 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1937 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1938 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1939 st_tdc = p_cfgp->start_tdc;
1940 ndmas = p_cfgp->max_tdcs;
1942 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1943 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1944 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1946 * Allocate memory for each transmit DMA channel.
1948 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1949 KM_SLEEP);
1950 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1951 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1953 dma_cntl_poolp = (p_hxge_dma_pool_t)
1954 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1955 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1956 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1958 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1961 * Assume that each DMA channel will be configured with default
1962 * transmit bufer size for copying transmit data. (For packet payload
1963 * over this limit, packets will not be copied.)
1965 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1968 * Addresses of transmit descriptor ring and the mailbox must be all
1969 * cache-aligned (64 bytes).
1971 tx_cntl_alloc_size = hxge_tx_ring_size;
1972 tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1973 tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1975 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1976 KM_SLEEP);
1979 * Allocate memory for transmit buffers and descriptor rings. Replace
1980 * allocation functions with interface functions provided by the
1981 * partition manager when it is available.
1983 * Allocate memory for the transmit buffer pool.
1985 for (i = 0; i < ndmas; i++) {
1986 num_chunks[i] = 0;
1987 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1988 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1989 if (status != HXGE_OK) {
1990 break;
1992 st_tdc++;
1995 if (i < ndmas) {
1996 goto hxge_alloc_tx_mem_pool_fail1;
1999 st_tdc = p_cfgp->start_tdc;
2002 * Allocate memory for descriptor rings and mailbox.
2004 for (j = 0; j < ndmas; j++) {
2005 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2006 tx_cntl_alloc_size);
2007 if (status != HXGE_OK) {
2008 break;
2010 st_tdc++;
2013 if (j < ndmas) {
2014 goto hxge_alloc_tx_mem_pool_fail2;
2017 dma_poolp->ndmas = ndmas;
2018 dma_poolp->num_chunks = num_chunks;
2019 dma_poolp->buf_allocated = B_TRUE;
2020 dma_poolp->dma_buf_pool_p = dma_buf_p;
2021 hxgep->tx_buf_pool_p = dma_poolp;
2023 dma_cntl_poolp->ndmas = ndmas;
2024 dma_cntl_poolp->buf_allocated = B_TRUE;
2025 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2026 hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2028 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2029 "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2030 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2032 goto hxge_alloc_tx_mem_pool_exit;
2034 hxge_alloc_tx_mem_pool_fail2:
2035 /* Free control buffers */
2036 j--;
2037 for (; j >= 0; j--) {
2038 hxge_free_tx_cntl_dma(hxgep,
2039 (p_hxge_dma_common_t)dma_cntl_p[j]);
2042 hxge_alloc_tx_mem_pool_fail1:
2043 /* Free data buffers */
2044 i--;
2045 for (; i >= 0; i--) {
2046 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2047 num_chunks[i]);
2050 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2051 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2052 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2053 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2054 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2056 hxge_alloc_tx_mem_pool_exit:
2057 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2058 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2060 return (status);
2063 static hxge_status_t
2064 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2065 p_hxge_dma_common_t *dmap, size_t alloc_size,
2066 size_t block_size, uint32_t *num_chunks)
2068 p_hxge_dma_common_t tx_dmap;
2069 hxge_status_t status = HXGE_OK;
2070 size_t total_alloc_size;
2071 size_t allocated = 0;
2072 int i, size_index, array_size;
2074 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2076 tx_dmap = (p_hxge_dma_common_t)
2077 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2079 total_alloc_size = alloc_size;
2080 i = 0;
2081 size_index = 0;
2082 array_size = sizeof (alloc_sizes) / sizeof (size_t);
2083 while ((size_index < array_size) &&
2084 (alloc_sizes[size_index] < alloc_size))
2085 size_index++;
2086 if (size_index >= array_size) {
2087 size_index = array_size - 1;
2090 while ((allocated < total_alloc_size) &&
2091 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2092 tx_dmap[i].dma_chunk_index = i;
2093 tx_dmap[i].block_size = block_size;
2094 tx_dmap[i].alength = alloc_sizes[size_index];
2095 tx_dmap[i].orig_alength = tx_dmap[i].alength;
2096 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2097 tx_dmap[i].dma_channel = dma_channel;
2098 tx_dmap[i].contig_alloc_type = B_FALSE;
2100 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2101 &hxge_tx_dma_attr, tx_dmap[i].alength,
2102 &hxge_dev_buf_dma_acc_attr,
2103 DDI_DMA_WRITE | DDI_DMA_STREAMING,
2104 (p_hxge_dma_common_t)(&tx_dmap[i]));
2105 if (status != HXGE_OK) {
2106 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2107 " hxge_alloc_tx_buf_dma: Alloc Failed: "
2108 " for size: %d", alloc_sizes[size_index]));
2109 size_index--;
2110 } else {
2111 i++;
2112 allocated += alloc_sizes[size_index];
2116 if (allocated < total_alloc_size) {
2117 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2118 " hxge_alloc_tx_buf_dma: failed due to"
2119 " allocated(%d) < required(%d)",
2120 allocated, total_alloc_size));
2121 goto hxge_alloc_tx_mem_fail1;
2124 *num_chunks = i;
2125 *dmap = tx_dmap;
2126 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2127 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2128 *dmap, i));
2129 goto hxge_alloc_tx_mem_exit;
2131 hxge_alloc_tx_mem_fail1:
2132 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2134 hxge_alloc_tx_mem_exit:
2135 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2136 "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2138 return (status);
2141 /*ARGSUSED*/
2142 static void
2143 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2144 uint32_t num_chunks)
2146 int i;
2148 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2150 for (i = 0; i < num_chunks; i++) {
2151 hxge_dma_mem_free(dmap++);
2154 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2157 /*ARGSUSED*/
2158 static hxge_status_t
2159 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2160 p_hxge_dma_common_t *dmap, size_t size)
2162 p_hxge_dma_common_t tx_dmap;
2163 hxge_status_t status = HXGE_OK;
2165 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2167 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2168 KM_SLEEP);
2170 tx_dmap->contig_alloc_type = B_FALSE;
2172 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2173 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2174 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2175 if (status != HXGE_OK) {
2176 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2177 " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2178 " for size: %d", size));
2179 goto hxge_alloc_tx_cntl_dma_fail1;
2182 *dmap = tx_dmap;
2184 goto hxge_alloc_tx_cntl_dma_exit;
2186 hxge_alloc_tx_cntl_dma_fail1:
2187 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2189 hxge_alloc_tx_cntl_dma_exit:
2190 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2191 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2193 return (status);
2196 /*ARGSUSED*/
2197 static void
2198 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2200 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2202 hxge_dma_mem_free(dmap);
2204 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2207 static void
2208 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2210 uint32_t i, ndmas;
2211 p_hxge_dma_pool_t dma_poolp;
2212 p_hxge_dma_common_t *dma_buf_p;
2213 p_hxge_dma_pool_t dma_cntl_poolp;
2214 p_hxge_dma_common_t *dma_cntl_p;
2215 uint32_t *num_chunks;
2217 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2219 dma_poolp = hxgep->tx_buf_pool_p;
2220 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2221 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2222 "<== hxge_free_tx_mem_pool "
2223 "(null rx buf pool or buf not allocated"));
2224 return;
2227 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2228 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2229 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2230 "<== hxge_free_tx_mem_pool "
2231 "(null tx cntl buf pool or cntl buf not allocated"));
2232 return;
2235 dma_buf_p = dma_poolp->dma_buf_pool_p;
2236 num_chunks = dma_poolp->num_chunks;
2238 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2239 ndmas = dma_cntl_poolp->ndmas;
2241 for (i = 0; i < ndmas; i++) {
2242 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2245 for (i = 0; i < ndmas; i++) {
2246 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2249 for (i = 0; i < ndmas; i++) {
2250 KMEM_FREE(dma_buf_p[i],
2251 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2252 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2255 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2256 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2257 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2258 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2259 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2261 hxgep->tx_buf_pool_p = NULL;
2262 hxgep->tx_cntl_pool_p = NULL;
2264 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2267 /*ARGSUSED*/
2268 static hxge_status_t
2269 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2270 struct ddi_dma_attr *dma_attrp,
2271 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2272 p_hxge_dma_common_t dma_p)
2274 caddr_t kaddrp;
2275 int ddi_status = DDI_SUCCESS;
2277 dma_p->dma_handle = NULL;
2278 dma_p->acc_handle = NULL;
2279 dma_p->kaddrp = NULL;
2281 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2282 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2283 if (ddi_status != DDI_SUCCESS) {
2284 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2285 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2286 return (HXGE_ERROR | HXGE_DDI_FAILED);
2289 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2290 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2291 &dma_p->acc_handle);
2292 if (ddi_status != DDI_SUCCESS) {
2293 /* The caller will decide whether it is fatal */
2294 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2295 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2296 ddi_dma_free_handle(&dma_p->dma_handle);
2297 dma_p->dma_handle = NULL;
2298 return (HXGE_ERROR | HXGE_DDI_FAILED);
2301 if (dma_p->alength < length) {
2302 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2303 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2304 ddi_dma_mem_free(&dma_p->acc_handle);
2305 ddi_dma_free_handle(&dma_p->dma_handle);
2306 dma_p->acc_handle = NULL;
2307 dma_p->dma_handle = NULL;
2308 return (HXGE_ERROR);
2311 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2312 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2313 &dma_p->dma_cookie, &dma_p->ncookies);
2314 if (ddi_status != DDI_DMA_MAPPED) {
2315 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2316 "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2317 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2318 if (dma_p->acc_handle) {
2319 ddi_dma_mem_free(&dma_p->acc_handle);
2320 dma_p->acc_handle = NULL;
2322 ddi_dma_free_handle(&dma_p->dma_handle);
2323 dma_p->dma_handle = NULL;
2324 return (HXGE_ERROR | HXGE_DDI_FAILED);
2327 if (dma_p->ncookies != 1) {
2328 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2329 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2330 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2331 if (dma_p->acc_handle) {
2332 ddi_dma_mem_free(&dma_p->acc_handle);
2333 dma_p->acc_handle = NULL;
2335 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2336 ddi_dma_free_handle(&dma_p->dma_handle);
2337 dma_p->dma_handle = NULL;
2338 return (HXGE_ERROR);
2341 dma_p->kaddrp = kaddrp;
2342 #if defined(__i386)
2343 dma_p->ioaddr_pp =
2344 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2345 #else
2346 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2347 #endif
2349 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2351 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2352 "dma buffer allocated: dma_p $%p "
2353 "return dmac_ladress from cookie $%p dmac_size %d "
2354 "dma_p->ioaddr_p $%p "
2355 "dma_p->orig_ioaddr_p $%p "
2356 "orig_vatopa $%p "
2357 "alength %d (0x%x) "
2358 "kaddrp $%p "
2359 "length %d (0x%x)",
2360 dma_p,
2361 dma_p->dma_cookie.dmac_laddress,
2362 dma_p->dma_cookie.dmac_size,
2363 dma_p->ioaddr_pp,
2364 dma_p->orig_ioaddr_pp,
2365 dma_p->orig_vatopa,
2366 dma_p->alength, dma_p->alength,
2367 kaddrp,
2368 length, length));
2370 return (HXGE_OK);
2373 static void
2374 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2376 if (dma_p == NULL)
2377 return;
2379 if (dma_p->dma_handle != NULL) {
2380 if (dma_p->ncookies) {
2381 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2382 dma_p->ncookies = 0;
2384 ddi_dma_free_handle(&dma_p->dma_handle);
2385 dma_p->dma_handle = NULL;
2388 if (dma_p->acc_handle != NULL) {
2389 ddi_dma_mem_free(&dma_p->acc_handle);
2390 dma_p->acc_handle = NULL;
2391 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2394 dma_p->kaddrp = NULL;
2395 dma_p->alength = NULL;
2399 * hxge_m_start() -- start transmitting and receiving.
2401 * This function is called by the MAC layer when the first
2402 * stream is open to prepare the hardware ready for sending
2403 * and transmitting packets.
2405 static int
2406 hxge_m_start(void *arg)
2408 p_hxge_t hxgep = (p_hxge_t)arg;
2410 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2412 MUTEX_ENTER(hxgep->genlock);
2414 if (hxge_init(hxgep) != DDI_SUCCESS) {
2415 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2416 "<== hxge_m_start: initialization failed"));
2417 MUTEX_EXIT(hxgep->genlock);
2418 return (EIO);
2421 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2423 * Start timer to check the system error and tx hangs
2425 hxgep->hxge_timerid = hxge_start_timer(hxgep,
2426 hxge_check_hw_state, HXGE_CHECK_TIMER);
2428 hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2430 hxgep->timeout.link_status = 0;
2431 hxgep->timeout.report_link_status = B_TRUE;
2432 hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2434 /* Start the link status timer to check the link status */
2435 MUTEX_ENTER(&hxgep->timeout.lock);
2436 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2437 hxgep->timeout.ticks);
2438 MUTEX_EXIT(&hxgep->timeout.lock);
2441 MUTEX_EXIT(hxgep->genlock);
2443 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2445 return (0);
2449 * hxge_m_stop(): stop transmitting and receiving.
2451 static void
2452 hxge_m_stop(void *arg)
2454 p_hxge_t hxgep = (p_hxge_t)arg;
2456 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2458 if (hxgep->hxge_timerid) {
2459 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2460 hxgep->hxge_timerid = 0;
2463 /* Stop the link status timer before unregistering */
2464 MUTEX_ENTER(&hxgep->timeout.lock);
2465 if (hxgep->timeout.id) {
2466 (void) untimeout(hxgep->timeout.id);
2467 hxgep->timeout.id = 0;
2469 hxge_link_update(hxgep, LINK_STATE_DOWN);
2470 MUTEX_EXIT(&hxgep->timeout.lock);
2472 MUTEX_ENTER(hxgep->genlock);
2474 hxge_uninit(hxgep);
2476 hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2478 MUTEX_EXIT(hxgep->genlock);
2480 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2483 static int
2484 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2486 p_hxge_t hxgep = (p_hxge_t)arg;
2487 struct ether_addr addrp;
2489 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2491 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2493 if (add) {
2494 if (hxge_add_mcast_addr(hxgep, &addrp)) {
2495 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2496 "<== hxge_m_multicst: add multicast failed"));
2497 return (EINVAL);
2499 } else {
2500 if (hxge_del_mcast_addr(hxgep, &addrp)) {
2501 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2502 "<== hxge_m_multicst: del multicast failed"));
2503 return (EINVAL);
2507 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2509 return (0);
2512 static int
2513 hxge_m_promisc(void *arg, boolean_t on)
2515 p_hxge_t hxgep = (p_hxge_t)arg;
2517 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2519 if (hxge_set_promisc(hxgep, on)) {
2520 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2521 "<== hxge_m_promisc: set promisc failed"));
2522 return (EINVAL);
2525 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2527 return (0);
2530 static void
2531 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2533 p_hxge_t hxgep = (p_hxge_t)arg;
2534 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
2535 boolean_t need_privilege;
2536 int err;
2537 int cmd;
2539 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2541 iocp = (struct iocblk *)mp->b_rptr;
2542 iocp->ioc_error = 0;
2543 need_privilege = B_TRUE;
2544 cmd = iocp->ioc_cmd;
2546 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2547 switch (cmd) {
2548 default:
2549 miocnak(wq, mp, 0, EINVAL);
2550 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2551 return;
2553 case LB_GET_INFO_SIZE:
2554 case LB_GET_INFO:
2555 case LB_GET_MODE:
2556 need_privilege = B_FALSE;
2557 break;
2559 case LB_SET_MODE:
2560 break;
2562 case ND_GET:
2563 need_privilege = B_FALSE;
2564 break;
2565 case ND_SET:
2566 break;
2568 case HXGE_GET_TX_RING_SZ:
2569 case HXGE_GET_TX_DESC:
2570 case HXGE_TX_SIDE_RESET:
2571 case HXGE_RX_SIDE_RESET:
2572 case HXGE_GLOBAL_RESET:
2573 case HXGE_RESET_MAC:
2574 case HXGE_PUT_TCAM:
2575 case HXGE_GET_TCAM:
2576 case HXGE_RTRACE:
2578 need_privilege = B_FALSE;
2579 break;
2582 if (need_privilege) {
2583 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2584 if (err != 0) {
2585 miocnak(wq, mp, 0, err);
2586 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2587 "<== hxge_m_ioctl: no priv"));
2588 return;
2592 switch (cmd) {
2593 case ND_GET:
2594 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2595 case ND_SET:
2596 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2597 hxge_param_ioctl(hxgep, wq, mp, iocp);
2598 break;
2600 case LB_GET_MODE:
2601 case LB_SET_MODE:
2602 case LB_GET_INFO_SIZE:
2603 case LB_GET_INFO:
2604 hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2605 break;
2607 case HXGE_PUT_TCAM:
2608 case HXGE_GET_TCAM:
2609 case HXGE_GET_TX_RING_SZ:
2610 case HXGE_GET_TX_DESC:
2611 case HXGE_TX_SIDE_RESET:
2612 case HXGE_RX_SIDE_RESET:
2613 case HXGE_GLOBAL_RESET:
2614 case HXGE_RESET_MAC:
2615 HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2616 "==> hxge_m_ioctl: cmd 0x%x", cmd));
2617 hxge_hw_ioctl(hxgep, wq, mp, iocp);
2618 break;
2621 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2624 /*ARGSUSED*/
2625 static int
2626 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2628 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2629 p_hxge_t hxgep;
2630 p_tx_ring_t ring;
2632 ASSERT(rhp != NULL);
2633 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2635 hxgep = rhp->hxgep;
2638 * Get the ring pointer.
2640 ring = hxgep->tx_rings->rings[rhp->index];
2643 * Fill in the handle for the transmit.
2645 MUTEX_ENTER(&ring->lock);
2646 rhp->started = B_TRUE;
2647 ring->ring_handle = rhp->ring_handle;
2648 MUTEX_EXIT(&ring->lock);
2650 return (0);
2653 static void
2654 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2656 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2657 p_hxge_t hxgep;
2658 p_tx_ring_t ring;
2660 ASSERT(rhp != NULL);
2661 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2663 hxgep = rhp->hxgep;
2664 ring = hxgep->tx_rings->rings[rhp->index];
2666 MUTEX_ENTER(&ring->lock);
2667 ring->ring_handle = (mac_ring_handle_t)NULL;
2668 rhp->started = B_FALSE;
2669 MUTEX_EXIT(&ring->lock);
2672 static int
2673 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2675 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2676 p_hxge_t hxgep;
2677 p_rx_rcr_ring_t ring;
2678 int i;
2680 ASSERT(rhp != NULL);
2681 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2683 hxgep = rhp->hxgep;
2686 * Get pointer to ring.
2688 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2690 MUTEX_ENTER(&ring->lock);
2692 if (rhp->started) {
2693 MUTEX_EXIT(&ring->lock);
2694 return (0);
2698 * Set the ldvp and ldgp pointers to enable/disable
2699 * polling.
2701 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2702 if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2703 (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2704 ring->ldvp = &hxgep->ldgvp->ldvp[i];
2705 ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2706 break;
2710 rhp->started = B_TRUE;
2711 ring->rcr_mac_handle = rhp->ring_handle;
2712 ring->rcr_gen_num = mr_gen_num;
2713 MUTEX_EXIT(&ring->lock);
2715 return (0);
2718 static void
2719 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2721 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2722 p_hxge_t hxgep;
2723 p_rx_rcr_ring_t ring;
2725 ASSERT(rhp != NULL);
2726 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2728 hxgep = rhp->hxgep;
2729 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2731 MUTEX_ENTER(&ring->lock);
2732 rhp->started = B_TRUE;
2733 ring->rcr_mac_handle = NULL;
2734 ring->ldvp = NULL;
2735 ring->ldgp = NULL;
2736 MUTEX_EXIT(&ring->lock);
2739 static int
2740 hxge_rx_group_start(mac_group_driver_t gdriver)
2742 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2744 ASSERT(group->hxgep != NULL);
2745 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2747 MUTEX_ENTER(group->hxgep->genlock);
2748 group->started = B_TRUE;
2749 MUTEX_EXIT(group->hxgep->genlock);
2751 return (0);
2754 static void
2755 hxge_rx_group_stop(mac_group_driver_t gdriver)
2757 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2759 ASSERT(group->hxgep != NULL);
2760 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2761 ASSERT(group->started == B_TRUE);
2763 MUTEX_ENTER(group->hxgep->genlock);
2764 group->started = B_FALSE;
2765 MUTEX_EXIT(group->hxgep->genlock);
2768 static int
2769 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2771 int i;
2774 * Find an open slot.
2776 for (i = 0; i < hxgep->mmac.total; i++) {
2777 if (!hxgep->mmac.addrs[i].set) {
2778 *slot = i;
2779 return (0);
2783 return (ENXIO);
2786 static int
2787 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2789 struct ether_addr eaddr;
2790 hxge_status_t status = HXGE_OK;
2792 bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2795 * Set new interface local address and re-init device.
2796 * This is destructive to any other streams attached
2797 * to this device.
2799 RW_ENTER_WRITER(&hxgep->filter_lock);
2800 status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2801 RW_EXIT(&hxgep->filter_lock);
2802 if (status != HXGE_OK)
2803 return (status);
2805 hxgep->mmac.addrs[slot].set = B_TRUE;
2806 bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2807 hxgep->mmac.available--;
2808 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2809 hxgep->mmac.addrs[slot].primary = B_TRUE;
2811 return (0);
2814 static int
2815 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2817 int i, result;
2819 for (i = 0; i < hxgep->mmac.total; i++) {
2820 if (hxgep->mmac.addrs[i].set) {
2821 result = memcmp(hxgep->mmac.addrs[i].addr,
2822 addr, ETHERADDRL);
2823 if (result == 0) {
2824 *slot = i;
2825 return (0);
2830 return (EINVAL);
2833 static int
2834 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2836 hxge_status_t status;
2837 int i;
2839 status = hxge_pfc_clear_mac_address(hxgep, slot);
2840 if (status != HXGE_OK)
2841 return (status);
2843 for (i = 0; i < ETHERADDRL; i++)
2844 hxgep->mmac.addrs[slot].addr[i] = 0;
2846 hxgep->mmac.addrs[slot].set = B_FALSE;
2847 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2848 hxgep->mmac.addrs[slot].primary = B_FALSE;
2849 hxgep->mmac.available++;
2851 return (0);
2854 static int
2855 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2857 hxge_ring_group_t *group = arg;
2858 p_hxge_t hxgep = group->hxgep;
2859 int slot = 0;
2861 ASSERT(group->type == MAC_RING_TYPE_RX);
2863 MUTEX_ENTER(hxgep->genlock);
2866 * Find a slot for the address.
2868 if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2869 MUTEX_EXIT(hxgep->genlock);
2870 return (ENOSPC);
2874 * Program the MAC address.
2876 if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2877 MUTEX_EXIT(hxgep->genlock);
2878 return (ENOSPC);
2881 MUTEX_EXIT(hxgep->genlock);
2882 return (0);
2885 static int
2886 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2888 hxge_ring_group_t *group = arg;
2889 p_hxge_t hxgep = group->hxgep;
2890 int rv, slot;
2892 ASSERT(group->type == MAC_RING_TYPE_RX);
2894 MUTEX_ENTER(hxgep->genlock);
2896 if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2897 MUTEX_EXIT(hxgep->genlock);
2898 return (rv);
2901 if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2902 MUTEX_EXIT(hxgep->genlock);
2903 return (rv);
2906 MUTEX_EXIT(hxgep->genlock);
2907 return (0);
2910 static void
2911 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2912 mac_group_info_t *infop, mac_group_handle_t gh)
2914 p_hxge_t hxgep = arg;
2915 hxge_ring_group_t *group;
2917 ASSERT(type == MAC_RING_TYPE_RX);
2919 switch (type) {
2920 case MAC_RING_TYPE_RX:
2921 group = &hxgep->rx_groups[groupid];
2922 group->hxgep = hxgep;
2923 group->ghandle = gh;
2924 group->index = groupid;
2925 group->type = type;
2927 infop->mgi_driver = (mac_group_driver_t)group;
2928 infop->mgi_start = hxge_rx_group_start;
2929 infop->mgi_stop = hxge_rx_group_stop;
2930 infop->mgi_addmac = hxge_rx_group_add_mac;
2931 infop->mgi_remmac = hxge_rx_group_rem_mac;
2932 infop->mgi_count = HXGE_MAX_RDCS;
2933 break;
2935 case MAC_RING_TYPE_TX:
2936 default:
2937 break;
2941 static int
2942 hxge_ring_get_htable_idx(p_hxge_t hxgep, mac_ring_type_t type, uint32_t channel)
2944 int i;
2946 ASSERT(hxgep->ldgvp != NULL);
2948 switch (type) {
2949 case MAC_RING_TYPE_RX:
2950 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2951 if ((hxgep->ldgvp->ldvp[i].is_rxdma) &&
2952 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2953 return ((int)
2954 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2957 break;
2959 case MAC_RING_TYPE_TX:
2960 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2961 if ((hxgep->ldgvp->ldvp[i].is_txdma) &&
2962 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2963 return ((int)
2964 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2967 break;
2969 default:
2970 break;
2973 return (-1);
2977 * Callback function for the GLDv3 layer to register all rings.
2979 /*ARGSUSED*/
2980 static void
2981 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2982 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2984 p_hxge_t hxgep = arg;
2986 ASSERT(hxgep != NULL);
2987 ASSERT(infop != NULL);
2989 switch (type) {
2990 case MAC_RING_TYPE_TX: {
2991 p_hxge_ring_handle_t rhp;
2992 mac_intr_t *mintr = &infop->mri_intr;
2993 p_hxge_intr_t intrp;
2994 int htable_idx;
2996 ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2997 rhp = &hxgep->tx_ring_handles[index];
2998 rhp->hxgep = hxgep;
2999 rhp->index = index;
3000 rhp->ring_handle = rh;
3001 infop->mri_driver = (mac_ring_driver_t)rhp;
3002 infop->mri_start = hxge_tx_ring_start;
3003 infop->mri_stop = hxge_tx_ring_stop;
3004 infop->mri_tx = hxge_tx_ring_send;
3005 infop->mri_stat = hxge_tx_ring_stat;
3007 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3008 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3009 if (htable_idx >= 0)
3010 mintr->mi_ddi_handle = intrp->htable[htable_idx];
3011 else
3012 mintr->mi_ddi_handle = NULL;
3013 break;
3016 case MAC_RING_TYPE_RX: {
3017 p_hxge_ring_handle_t rhp;
3018 mac_intr_t hxge_mac_intr;
3019 p_hxge_intr_t intrp;
3020 int htable_idx;
3022 ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
3023 rhp = &hxgep->rx_ring_handles[index];
3024 rhp->hxgep = hxgep;
3025 rhp->index = index;
3026 rhp->ring_handle = rh;
3029 * Entrypoint to enable interrupt (disable poll) and
3030 * disable interrupt (enable poll).
3032 hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
3033 hxge_mac_intr.mi_enable = (mac_intr_enable_t)hxge_disable_poll;
3034 hxge_mac_intr.mi_disable = (mac_intr_disable_t)hxge_enable_poll;
3036 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3037 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3038 if (htable_idx >= 0)
3039 hxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
3040 else
3041 hxge_mac_intr.mi_ddi_handle = NULL;
3043 infop->mri_driver = (mac_ring_driver_t)rhp;
3044 infop->mri_start = hxge_rx_ring_start;
3045 infop->mri_stop = hxge_rx_ring_stop;
3046 infop->mri_intr = hxge_mac_intr;
3047 infop->mri_poll = hxge_rx_poll;
3048 infop->mri_stat = hxge_rx_ring_stat;
3049 break;
3052 default:
3053 break;
3057 /*ARGSUSED*/
3058 boolean_t
3059 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3061 p_hxge_t hxgep = arg;
3063 switch (cap) {
3064 case MAC_CAPAB_HCKSUM: {
3065 uint32_t *txflags = cap_data;
3067 *txflags = HCKSUM_INET_PARTIAL;
3068 break;
3071 case MAC_CAPAB_RINGS: {
3072 mac_capab_rings_t *cap_rings = cap_data;
3074 MUTEX_ENTER(hxgep->genlock);
3075 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3076 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3077 cap_rings->mr_rnum = HXGE_MAX_RDCS;
3078 cap_rings->mr_rget = hxge_fill_ring;
3079 cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3080 cap_rings->mr_gget = hxge_group_get;
3081 cap_rings->mr_gaddring = NULL;
3082 cap_rings->mr_gremring = NULL;
3083 } else {
3084 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3085 cap_rings->mr_rnum = HXGE_MAX_TDCS;
3086 cap_rings->mr_rget = hxge_fill_ring;
3087 cap_rings->mr_gnum = 0;
3088 cap_rings->mr_gget = NULL;
3089 cap_rings->mr_gaddring = NULL;
3090 cap_rings->mr_gremring = NULL;
3092 MUTEX_EXIT(hxgep->genlock);
3093 break;
3096 default:
3097 return (B_FALSE);
3099 return (B_TRUE);
3102 static boolean_t
3103 hxge_param_locked(mac_prop_id_t pr_num)
3106 * All adv_* parameters are locked (read-only) while
3107 * the device is in any sort of loopback mode ...
3109 switch (pr_num) {
3110 case MAC_PROP_ADV_1000FDX_CAP:
3111 case MAC_PROP_EN_1000FDX_CAP:
3112 case MAC_PROP_ADV_1000HDX_CAP:
3113 case MAC_PROP_EN_1000HDX_CAP:
3114 case MAC_PROP_ADV_100FDX_CAP:
3115 case MAC_PROP_EN_100FDX_CAP:
3116 case MAC_PROP_ADV_100HDX_CAP:
3117 case MAC_PROP_EN_100HDX_CAP:
3118 case MAC_PROP_ADV_10FDX_CAP:
3119 case MAC_PROP_EN_10FDX_CAP:
3120 case MAC_PROP_ADV_10HDX_CAP:
3121 case MAC_PROP_EN_10HDX_CAP:
3122 case MAC_PROP_AUTONEG:
3123 case MAC_PROP_FLOWCTRL:
3124 return (B_TRUE);
3126 return (B_FALSE);
3130 * callback functions for set/get of properties
3132 static int
3133 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3134 uint_t pr_valsize, const void *pr_val)
3136 hxge_t *hxgep = barg;
3137 p_hxge_stats_t statsp;
3138 int err = 0;
3139 uint32_t new_mtu, old_framesize, new_framesize;
3141 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3143 statsp = hxgep->statsp;
3144 MUTEX_ENTER(hxgep->genlock);
3145 if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3146 hxge_param_locked(pr_num)) {
3148 * All adv_* parameters are locked (read-only)
3149 * while the device is in any sort of loopback mode.
3151 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3152 "==> hxge_m_setprop: loopback mode: read only"));
3153 MUTEX_EXIT(hxgep->genlock);
3154 return (EBUSY);
3157 switch (pr_num) {
3159 * These properties are either not exist or read only
3161 case MAC_PROP_EN_1000FDX_CAP:
3162 case MAC_PROP_EN_100FDX_CAP:
3163 case MAC_PROP_EN_10FDX_CAP:
3164 case MAC_PROP_EN_1000HDX_CAP:
3165 case MAC_PROP_EN_100HDX_CAP:
3166 case MAC_PROP_EN_10HDX_CAP:
3167 case MAC_PROP_ADV_1000FDX_CAP:
3168 case MAC_PROP_ADV_1000HDX_CAP:
3169 case MAC_PROP_ADV_100FDX_CAP:
3170 case MAC_PROP_ADV_100HDX_CAP:
3171 case MAC_PROP_ADV_10FDX_CAP:
3172 case MAC_PROP_ADV_10HDX_CAP:
3173 case MAC_PROP_STATUS:
3174 case MAC_PROP_SPEED:
3175 case MAC_PROP_DUPLEX:
3176 case MAC_PROP_AUTONEG:
3178 * Flow control is handled in the shared domain and
3179 * it is readonly here.
3181 case MAC_PROP_FLOWCTRL:
3182 err = EINVAL;
3183 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3184 "==> hxge_m_setprop: read only property %d",
3185 pr_num));
3186 break;
3188 case MAC_PROP_MTU:
3189 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3190 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3191 "==> hxge_m_setprop: set MTU: %d", new_mtu));
3193 new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3194 if (new_framesize == hxgep->vmac.maxframesize) {
3195 err = 0;
3196 break;
3199 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3200 err = EBUSY;
3201 break;
3204 if (new_framesize < MIN_FRAME_SIZE ||
3205 new_framesize > MAX_FRAME_SIZE) {
3206 err = EINVAL;
3207 break;
3210 old_framesize = hxgep->vmac.maxframesize;
3211 hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3213 if (hxge_vmac_set_framesize(hxgep)) {
3214 hxgep->vmac.maxframesize =
3215 (uint16_t)old_framesize;
3216 err = EINVAL;
3217 break;
3220 err = mac_maxsdu_update(hxgep->mach, new_mtu);
3221 if (err) {
3222 hxgep->vmac.maxframesize =
3223 (uint16_t)old_framesize;
3224 (void) hxge_vmac_set_framesize(hxgep);
3227 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3228 "==> hxge_m_setprop: set MTU: %d maxframe %d",
3229 new_mtu, hxgep->vmac.maxframesize));
3230 break;
3232 case MAC_PROP_PRIVATE:
3233 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3234 "==> hxge_m_setprop: private property"));
3235 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3236 pr_val);
3237 break;
3239 default:
3240 err = ENOTSUP;
3241 break;
3244 MUTEX_EXIT(hxgep->genlock);
3246 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3247 "<== hxge_m_setprop (return %d)", err));
3249 return (err);
3252 static int
3253 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3254 uint_t pr_valsize, void *pr_val)
3256 hxge_t *hxgep = barg;
3257 p_hxge_stats_t statsp = hxgep->statsp;
3258 int err = 0;
3259 link_flowctrl_t fl;
3260 uint64_t tmp = 0;
3261 link_state_t ls;
3263 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3264 "==> hxge_m_getprop: pr_num %d", pr_num));
3266 switch (pr_num) {
3267 case MAC_PROP_DUPLEX:
3268 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3269 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3270 "==> hxge_m_getprop: duplex mode %d",
3271 *(uint8_t *)pr_val));
3272 break;
3274 case MAC_PROP_SPEED:
3275 ASSERT(pr_valsize >= sizeof (uint64_t));
3276 tmp = statsp->mac_stats.link_speed * 1000000ull;
3277 bcopy(&tmp, pr_val, sizeof (tmp));
3278 break;
3280 case MAC_PROP_STATUS:
3281 ASSERT(pr_valsize >= sizeof (link_state_t));
3282 if (!statsp->mac_stats.link_up)
3283 ls = LINK_STATE_DOWN;
3284 else
3285 ls = LINK_STATE_UP;
3286 bcopy(&ls, pr_val, sizeof (ls));
3287 break;
3289 case MAC_PROP_FLOWCTRL:
3291 * Flow control is supported by the shared domain and
3292 * it is currently transmit only
3294 ASSERT(pr_valsize < sizeof (link_flowctrl_t));
3295 fl = LINK_FLOWCTRL_TX;
3296 bcopy(&fl, pr_val, sizeof (fl));
3297 break;
3298 case MAC_PROP_AUTONEG:
3299 /* 10G link only and it is not negotiable */
3300 *(uint8_t *)pr_val = 0;
3301 break;
3302 case MAC_PROP_ADV_1000FDX_CAP:
3303 case MAC_PROP_ADV_100FDX_CAP:
3304 case MAC_PROP_ADV_10FDX_CAP:
3305 case MAC_PROP_ADV_1000HDX_CAP:
3306 case MAC_PROP_ADV_100HDX_CAP:
3307 case MAC_PROP_ADV_10HDX_CAP:
3308 case MAC_PROP_EN_1000FDX_CAP:
3309 case MAC_PROP_EN_100FDX_CAP:
3310 case MAC_PROP_EN_10FDX_CAP:
3311 case MAC_PROP_EN_1000HDX_CAP:
3312 case MAC_PROP_EN_100HDX_CAP:
3313 case MAC_PROP_EN_10HDX_CAP:
3314 err = ENOTSUP;
3315 break;
3317 case MAC_PROP_PRIVATE:
3318 err = hxge_get_priv_prop(hxgep, pr_name, pr_valsize,
3319 pr_val);
3320 break;
3322 default:
3323 err = EINVAL;
3324 break;
3327 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3329 return (err);
3332 static void
3333 hxge_m_propinfo(void *arg, const char *pr_name,
3334 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
3336 _NOTE(ARGUNUSED(arg));
3337 switch (pr_num) {
3338 case MAC_PROP_DUPLEX:
3339 case MAC_PROP_SPEED:
3340 case MAC_PROP_STATUS:
3341 case MAC_PROP_AUTONEG:
3342 case MAC_PROP_FLOWCTRL:
3343 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3344 break;
3346 case MAC_PROP_MTU:
3347 mac_prop_info_set_range_uint32(prh,
3348 MIN_FRAME_SIZE - MTU_TO_FRAME_SIZE,
3349 MAX_FRAME_SIZE - MTU_TO_FRAME_SIZE);
3350 break;
3352 case MAC_PROP_PRIVATE: {
3353 char valstr[MAXNAMELEN];
3355 bzero(valstr, sizeof (valstr));
3357 /* Receive Interrupt Blanking Parameters */
3358 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3359 (void) snprintf(valstr, sizeof (valstr), "%d",
3360 RXDMA_RCR_TO_DEFAULT);
3361 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3362 (void) snprintf(valstr, sizeof (valstr), "%d",
3363 RXDMA_RCR_PTHRES_DEFAULT);
3365 /* Classification and Load Distribution Configuration */
3366 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3367 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3368 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3369 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3370 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3371 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3372 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3373 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3374 (void) snprintf(valstr, sizeof (valstr), "%d",
3375 HXGE_CLASS_TCAM_LOOKUP);
3378 if (strlen(valstr) > 0)
3379 mac_prop_info_set_default_str(prh, valstr);
3380 break;
3386 /* ARGSUSED */
3387 static int
3388 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3389 const void *pr_val)
3391 p_hxge_param_t param_arr = hxgep->param_arr;
3392 int err = 0;
3394 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3395 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3397 if (pr_val == NULL) {
3398 return (EINVAL);
3401 /* Blanking */
3402 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3403 err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3404 (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3405 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3406 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3407 (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3409 /* Classification */
3410 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3411 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3412 (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3413 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3414 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3415 (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3416 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3417 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3418 (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3419 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3420 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3421 (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3422 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3423 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3424 (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3425 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3426 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3427 (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3428 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3429 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3430 (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3431 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3432 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3433 (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3434 } else {
3435 err = EINVAL;
3438 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3439 "<== hxge_set_priv_prop: err %d", err));
3441 return (err);
3444 static int
3445 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3446 void *pr_val)
3448 p_hxge_param_t param_arr = hxgep->param_arr;
3449 char valstr[MAXNAMELEN];
3450 int err = 0;
3451 uint_t strsize;
3452 int value = 0;
3454 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3455 "==> hxge_get_priv_prop: property %s", pr_name));
3457 /* Receive Interrupt Blanking Parameters */
3458 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3459 value = hxgep->intr_timeout;
3460 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3461 value = hxgep->intr_threshold;
3463 /* Classification and Load Distribution Configuration */
3464 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3465 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3466 (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3468 value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3469 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3470 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3471 (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3473 value = (int)param_arr[param_class_opt_ipv4_udp].value;
3474 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3475 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3476 (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3478 value = (int)param_arr[param_class_opt_ipv4_ah].value;
3479 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3480 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3481 (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3483 value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3484 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3485 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3486 (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3488 value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3489 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3490 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3491 (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3493 value = (int)param_arr[param_class_opt_ipv6_udp].value;
3494 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3495 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3496 (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3498 value = (int)param_arr[param_class_opt_ipv6_ah].value;
3499 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3500 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3501 (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3503 value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3504 } else {
3505 err = EINVAL;
3508 if (err == 0) {
3509 (void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3511 strsize = (uint_t)strlen(valstr);
3512 if (pr_valsize < strsize) {
3513 err = ENOBUFS;
3514 } else {
3515 (void) strlcpy(pr_val, valstr, pr_valsize);
3519 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3520 "<== hxge_get_priv_prop: return %d", err));
3522 return (err);
3525 * Module loading and removing entry points.
3527 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3528 nodev, NULL, D_MP, NULL, NULL);
3530 extern struct mod_ops mod_driverops;
3532 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
3535 * Module linkage information for the kernel.
3537 static struct modldrv hxge_modldrv = {
3538 &mod_driverops,
3539 HXGE_DESC_VER,
3540 &hxge_dev_ops
3543 static struct modlinkage modlinkage = {
3544 MODREV_1, (void *) &hxge_modldrv, NULL
3548 _init(void)
3550 int status;
3552 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3553 mac_init_ops(&hxge_dev_ops, "hxge");
3554 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3555 if (status != 0) {
3556 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3557 "failed to init device soft state"));
3558 mac_fini_ops(&hxge_dev_ops);
3559 goto _init_exit;
3562 status = mod_install(&modlinkage);
3563 if (status != 0) {
3564 ddi_soft_state_fini(&hxge_list);
3565 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3566 goto _init_exit;
3569 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3571 _init_exit:
3572 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3574 return (status);
3578 _fini(void)
3580 int status;
3582 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3584 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3586 if (hxge_mblks_pending)
3587 return (EBUSY);
3589 status = mod_remove(&modlinkage);
3590 if (status != DDI_SUCCESS) {
3591 HXGE_DEBUG_MSG((NULL, MOD_CTL,
3592 "Module removal failed 0x%08x", status));
3593 goto _fini_exit;
3596 mac_fini_ops(&hxge_dev_ops);
3598 ddi_soft_state_fini(&hxge_list);
3600 MUTEX_DESTROY(&hxge_common_lock);
3602 _fini_exit:
3603 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3605 return (status);
3609 _info(struct modinfo *modinfop)
3611 int status;
3613 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3614 status = mod_info(&modlinkage, modinfop);
3615 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3617 return (status);
3620 /*ARGSUSED*/
3621 static hxge_status_t
3622 hxge_add_intrs(p_hxge_t hxgep)
3624 int intr_types;
3625 int type = 0;
3626 int ddi_status = DDI_SUCCESS;
3627 hxge_status_t status = HXGE_OK;
3629 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3631 hxgep->hxge_intr_type.intr_registered = B_FALSE;
3632 hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3633 hxgep->hxge_intr_type.msi_intx_cnt = 0;
3634 hxgep->hxge_intr_type.intr_added = 0;
3635 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3636 hxgep->hxge_intr_type.intr_type = 0;
3638 if (hxge_msi_enable) {
3639 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3642 /* Get the supported interrupt types */
3643 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3644 != DDI_SUCCESS) {
3645 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3646 "ddi_intr_get_supported_types failed: status 0x%08x",
3647 ddi_status));
3648 return (HXGE_ERROR | HXGE_DDI_FAILED);
3651 hxgep->hxge_intr_type.intr_types = intr_types;
3653 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3654 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3657 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3658 * (1): 1 - MSI
3659 * (2): 2 - MSI-X
3660 * others - FIXED
3662 switch (hxge_msi_enable) {
3663 default:
3664 type = DDI_INTR_TYPE_FIXED;
3665 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3666 "use fixed (intx emulation) type %08x", type));
3667 break;
3669 case 2:
3670 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3671 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3672 if (intr_types & DDI_INTR_TYPE_MSIX) {
3673 type = DDI_INTR_TYPE_MSIX;
3674 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3675 "==> hxge_add_intrs: "
3676 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3677 } else if (intr_types & DDI_INTR_TYPE_MSI) {
3678 type = DDI_INTR_TYPE_MSI;
3679 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3680 "==> hxge_add_intrs: "
3681 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3682 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3683 type = DDI_INTR_TYPE_FIXED;
3684 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3685 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3687 break;
3689 case 1:
3690 if (intr_types & DDI_INTR_TYPE_MSI) {
3691 type = DDI_INTR_TYPE_MSI;
3692 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3693 "==> hxge_add_intrs: "
3694 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3695 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
3696 type = DDI_INTR_TYPE_MSIX;
3697 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3698 "==> hxge_add_intrs: "
3699 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3700 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3701 type = DDI_INTR_TYPE_FIXED;
3702 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3703 "==> hxge_add_intrs: "
3704 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3708 hxgep->hxge_intr_type.intr_type = type;
3709 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3710 type == DDI_INTR_TYPE_FIXED) &&
3711 hxgep->hxge_intr_type.niu_msi_enable) {
3712 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3713 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3714 " hxge_add_intrs: "
3715 " hxge_add_intrs_adv failed: status 0x%08x",
3716 status));
3717 return (status);
3718 } else {
3719 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3720 "interrupts registered : type %d", type));
3721 hxgep->hxge_intr_type.intr_registered = B_TRUE;
3723 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3724 "\nAdded advanced hxge add_intr_adv "
3725 "intr type 0x%x\n", type));
3727 return (status);
3731 if (!hxgep->hxge_intr_type.intr_registered) {
3732 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3733 "==> hxge_add_intrs: failed to register interrupts"));
3734 return (HXGE_ERROR | HXGE_DDI_FAILED);
3737 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3739 return (status);
3742 /*ARGSUSED*/
3743 static hxge_status_t
3744 hxge_add_intrs_adv(p_hxge_t hxgep)
3746 int intr_type;
3747 p_hxge_intr_t intrp;
3748 hxge_status_t status;
3750 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3752 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3753 intr_type = intrp->intr_type;
3755 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3756 intr_type));
3758 switch (intr_type) {
3759 case DDI_INTR_TYPE_MSI: /* 0x2 */
3760 case DDI_INTR_TYPE_MSIX: /* 0x4 */
3761 status = hxge_add_intrs_adv_type(hxgep, intr_type);
3762 break;
3764 case DDI_INTR_TYPE_FIXED: /* 0x1 */
3765 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3766 break;
3768 default:
3769 status = HXGE_ERROR;
3770 break;
3773 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3775 return (status);
3778 /*ARGSUSED*/
3779 static hxge_status_t
3780 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3782 dev_info_t *dip = hxgep->dip;
3783 p_hxge_ldg_t ldgp;
3784 p_hxge_intr_t intrp;
3785 uint_t *inthandler;
3786 void *arg1, *arg2;
3787 int behavior;
3788 int nintrs, navail;
3789 int nactual, nrequired, nrequest;
3790 int inum = 0;
3791 int loop = 0;
3792 int x, y;
3793 int ddi_status = DDI_SUCCESS;
3794 hxge_status_t status = HXGE_OK;
3796 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3798 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3800 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3801 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3802 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3803 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3804 "nintrs: %d", ddi_status, nintrs));
3805 return (HXGE_ERROR | HXGE_DDI_FAILED);
3808 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3809 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3810 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3811 "ddi_intr_get_navail() failed, status: 0x%x%, "
3812 "nintrs: %d", ddi_status, navail));
3813 return (HXGE_ERROR | HXGE_DDI_FAILED);
3816 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3817 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3818 int_type, nintrs, navail));
3820 /* PSARC/2007/453 MSI-X interrupt limit override */
3821 if (int_type == DDI_INTR_TYPE_MSIX) {
3822 nrequest = hxge_create_msi_property(hxgep);
3823 if (nrequest < navail) {
3824 navail = nrequest;
3825 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3826 "hxge_add_intrs_adv_type: nintrs %d "
3827 "navail %d (nrequest %d)",
3828 nintrs, navail, nrequest));
3832 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3833 /* MSI must be power of 2 */
3834 if ((navail & 16) == 16) {
3835 navail = 16;
3836 } else if ((navail & 8) == 8) {
3837 navail = 8;
3838 } else if ((navail & 4) == 4) {
3839 navail = 4;
3840 } else if ((navail & 2) == 2) {
3841 navail = 2;
3842 } else {
3843 navail = 1;
3845 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3846 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3847 "navail %d", nintrs, navail));
3850 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3851 "requesting: intr type %d nintrs %d, navail %d",
3852 int_type, nintrs, navail));
3854 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3855 DDI_INTR_ALLOC_NORMAL);
3856 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3857 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3859 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3860 navail, &nactual, behavior);
3861 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3862 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3863 " ddi_intr_alloc() failed: %d", ddi_status));
3864 kmem_free(intrp->htable, intrp->intr_size);
3865 return (HXGE_ERROR | HXGE_DDI_FAILED);
3868 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3869 "ddi_intr_alloc() returned: navail %d nactual %d",
3870 navail, nactual));
3872 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3873 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3874 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3875 " ddi_intr_get_pri() failed: %d", ddi_status));
3876 /* Free already allocated interrupts */
3877 for (y = 0; y < nactual; y++) {
3878 (void) ddi_intr_free(intrp->htable[y]);
3881 kmem_free(intrp->htable, intrp->intr_size);
3882 return (HXGE_ERROR | HXGE_DDI_FAILED);
3885 nrequired = 0;
3886 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3887 if (status != HXGE_OK) {
3888 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3889 "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3890 "failed: 0x%x", status));
3891 /* Free already allocated interrupts */
3892 for (y = 0; y < nactual; y++) {
3893 (void) ddi_intr_free(intrp->htable[y]);
3896 kmem_free(intrp->htable, intrp->intr_size);
3897 return (status);
3900 ldgp = hxgep->ldgvp->ldgp;
3901 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3902 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3904 if (nactual < nrequired)
3905 loop = nactual;
3906 else
3907 loop = nrequired;
3909 for (x = 0; x < loop; x++, ldgp++) {
3910 ldgp->vector = (uint8_t)x;
3911 arg1 = ldgp->ldvp;
3912 arg2 = hxgep;
3913 if (ldgp->nldvs == 1) {
3914 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3915 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3916 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3917 "1-1 int handler (entry %d)\n",
3918 arg1, arg2, x));
3919 } else if (ldgp->nldvs > 1) {
3920 inthandler = (uint_t *)ldgp->sys_intr_handler;
3921 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3922 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3923 "nldevs %d int handler (entry %d)\n",
3924 arg1, arg2, ldgp->nldvs, x));
3926 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3927 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3928 "htable 0x%llx", x, intrp->htable[x]));
3930 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3931 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3932 DDI_SUCCESS) {
3933 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3934 "==> hxge_add_intrs_adv_type: failed #%d "
3935 "status 0x%x", x, ddi_status));
3936 for (y = 0; y < intrp->intr_added; y++) {
3937 (void) ddi_intr_remove_handler(
3938 intrp->htable[y]);
3941 /* Free already allocated intr */
3942 for (y = 0; y < nactual; y++) {
3943 (void) ddi_intr_free(intrp->htable[y]);
3945 kmem_free(intrp->htable, intrp->intr_size);
3947 (void) hxge_ldgv_uninit(hxgep);
3949 return (HXGE_ERROR | HXGE_DDI_FAILED);
3952 ldgp->htable_idx = x;
3953 intrp->intr_added++;
3955 intrp->msi_intx_cnt = nactual;
3957 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3958 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3959 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3961 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3962 (void) hxge_intr_ldgv_init(hxgep);
3964 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3966 return (status);
3969 /*ARGSUSED*/
3970 static hxge_status_t
3971 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3973 dev_info_t *dip = hxgep->dip;
3974 p_hxge_ldg_t ldgp;
3975 p_hxge_intr_t intrp;
3976 uint_t *inthandler;
3977 void *arg1, *arg2;
3978 int behavior;
3979 int nintrs, navail;
3980 int nactual, nrequired;
3981 int inum = 0;
3982 int x, y;
3983 int ddi_status = DDI_SUCCESS;
3984 hxge_status_t status = HXGE_OK;
3986 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3987 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3989 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3990 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3991 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3992 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3993 "nintrs: %d", status, nintrs));
3994 return (HXGE_ERROR | HXGE_DDI_FAILED);
3997 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3998 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3999 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4000 "ddi_intr_get_navail() failed, status: 0x%x%, "
4001 "nintrs: %d", ddi_status, navail));
4002 return (HXGE_ERROR | HXGE_DDI_FAILED);
4005 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4006 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4007 nintrs, navail));
4009 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4010 DDI_INTR_ALLOC_NORMAL);
4011 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4012 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4013 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4014 navail, &nactual, behavior);
4015 if (ddi_status != DDI_SUCCESS || nactual == 0) {
4016 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4017 " ddi_intr_alloc() failed: %d", ddi_status));
4018 kmem_free(intrp->htable, intrp->intr_size);
4019 return (HXGE_ERROR | HXGE_DDI_FAILED);
4022 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4023 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4024 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4025 " ddi_intr_get_pri() failed: %d", ddi_status));
4026 /* Free already allocated interrupts */
4027 for (y = 0; y < nactual; y++) {
4028 (void) ddi_intr_free(intrp->htable[y]);
4031 kmem_free(intrp->htable, intrp->intr_size);
4032 return (HXGE_ERROR | HXGE_DDI_FAILED);
4035 nrequired = 0;
4036 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4037 if (status != HXGE_OK) {
4038 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4039 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4040 "failed: 0x%x", status));
4041 /* Free already allocated interrupts */
4042 for (y = 0; y < nactual; y++) {
4043 (void) ddi_intr_free(intrp->htable[y]);
4046 kmem_free(intrp->htable, intrp->intr_size);
4047 return (status);
4050 ldgp = hxgep->ldgvp->ldgp;
4051 for (x = 0; x < nrequired; x++, ldgp++) {
4052 ldgp->vector = (uint8_t)x;
4053 arg1 = ldgp->ldvp;
4054 arg2 = hxgep;
4055 if (ldgp->nldvs == 1) {
4056 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4057 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4058 "hxge_add_intrs_adv_type_fix: "
4059 "1-1 int handler(%d) ldg %d ldv %d "
4060 "arg1 $%p arg2 $%p\n",
4061 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4062 } else if (ldgp->nldvs > 1) {
4063 inthandler = (uint_t *)ldgp->sys_intr_handler;
4064 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4065 "hxge_add_intrs_adv_type_fix: "
4066 "shared ldv %d int handler(%d) ldv %d ldg %d"
4067 "arg1 0x%016llx arg2 0x%016llx\n",
4068 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4069 arg1, arg2));
4072 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4073 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4074 DDI_SUCCESS) {
4075 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4076 "==> hxge_add_intrs_adv_type_fix: failed #%d "
4077 "status 0x%x", x, ddi_status));
4078 for (y = 0; y < intrp->intr_added; y++) {
4079 (void) ddi_intr_remove_handler(
4080 intrp->htable[y]);
4082 for (y = 0; y < nactual; y++) {
4083 (void) ddi_intr_free(intrp->htable[y]);
4085 /* Free already allocated intr */
4086 kmem_free(intrp->htable, intrp->intr_size);
4088 (void) hxge_ldgv_uninit(hxgep);
4090 return (HXGE_ERROR | HXGE_DDI_FAILED);
4092 intrp->intr_added++;
4095 intrp->msi_intx_cnt = nactual;
4097 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4099 status = hxge_intr_ldgv_init(hxgep);
4101 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4103 return (status);
4106 /*ARGSUSED*/
4107 static void
4108 hxge_remove_intrs(p_hxge_t hxgep)
4110 int i, inum;
4111 p_hxge_intr_t intrp;
4113 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4114 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4115 if (!intrp->intr_registered) {
4116 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4117 "<== hxge_remove_intrs: interrupts not registered"));
4118 return;
4121 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4123 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4124 (void) ddi_intr_block_disable(intrp->htable,
4125 intrp->intr_added);
4126 } else {
4127 for (i = 0; i < intrp->intr_added; i++) {
4128 (void) ddi_intr_disable(intrp->htable[i]);
4132 for (inum = 0; inum < intrp->intr_added; inum++) {
4133 if (intrp->htable[inum]) {
4134 (void) ddi_intr_remove_handler(intrp->htable[inum]);
4138 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4139 if (intrp->htable[inum]) {
4140 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4141 "hxge_remove_intrs: ddi_intr_free inum %d "
4142 "msi_intx_cnt %d intr_added %d",
4143 inum, intrp->msi_intx_cnt, intrp->intr_added));
4145 (void) ddi_intr_free(intrp->htable[inum]);
4149 kmem_free(intrp->htable, intrp->intr_size);
4150 intrp->intr_registered = B_FALSE;
4151 intrp->intr_enabled = B_FALSE;
4152 intrp->msi_intx_cnt = 0;
4153 intrp->intr_added = 0;
4155 (void) hxge_ldgv_uninit(hxgep);
4157 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4160 /*ARGSUSED*/
4161 static void
4162 hxge_intrs_enable(p_hxge_t hxgep)
4164 p_hxge_intr_t intrp;
4165 int i;
4166 int status;
4168 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4170 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4172 if (!intrp->intr_registered) {
4173 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4174 "interrupts are not registered"));
4175 return;
4178 if (intrp->intr_enabled) {
4179 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4180 "<== hxge_intrs_enable: already enabled"));
4181 return;
4184 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4185 status = ddi_intr_block_enable(intrp->htable,
4186 intrp->intr_added);
4187 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4188 "block enable - status 0x%x total inums #%d\n",
4189 status, intrp->intr_added));
4190 } else {
4191 for (i = 0; i < intrp->intr_added; i++) {
4192 status = ddi_intr_enable(intrp->htable[i]);
4193 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4194 "ddi_intr_enable:enable - status 0x%x "
4195 "total inums %d enable inum #%d\n",
4196 status, intrp->intr_added, i));
4197 if (status == DDI_SUCCESS) {
4198 intrp->intr_enabled = B_TRUE;
4203 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4206 /*ARGSUSED*/
4207 static void
4208 hxge_intrs_disable(p_hxge_t hxgep)
4210 p_hxge_intr_t intrp;
4211 int i;
4213 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4215 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4217 if (!intrp->intr_registered) {
4218 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4219 "interrupts are not registered"));
4220 return;
4223 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4224 (void) ddi_intr_block_disable(intrp->htable,
4225 intrp->intr_added);
4226 } else {
4227 for (i = 0; i < intrp->intr_added; i++) {
4228 (void) ddi_intr_disable(intrp->htable[i]);
4232 intrp->intr_enabled = B_FALSE;
4233 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4236 static hxge_status_t
4237 hxge_mac_register(p_hxge_t hxgep)
4239 mac_register_t *macp;
4240 int status;
4242 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4244 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4245 return (HXGE_ERROR);
4247 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4248 macp->m_driver = hxgep;
4249 macp->m_dip = hxgep->dip;
4250 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4251 macp->m_callbacks = &hxge_m_callbacks;
4252 macp->m_min_sdu = 0;
4253 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4254 macp->m_margin = VLAN_TAGSZ;
4255 macp->m_priv_props = hxge_priv_props;
4256 macp->m_v12n = MAC_VIRT_LEVEL1;
4258 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4259 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4260 macp->m_src_addr[0],
4261 macp->m_src_addr[1],
4262 macp->m_src_addr[2],
4263 macp->m_src_addr[3],
4264 macp->m_src_addr[4],
4265 macp->m_src_addr[5]));
4267 status = mac_register(macp, &hxgep->mach);
4268 mac_free(macp);
4270 if (status != 0) {
4271 cmn_err(CE_WARN,
4272 "hxge_mac_register failed (status %d instance %d)",
4273 status, hxgep->instance);
4274 return (HXGE_ERROR);
4277 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4278 "(instance %d)", hxgep->instance));
4280 return (HXGE_OK);
4283 static int
4284 hxge_init_common_dev(p_hxge_t hxgep)
4286 p_hxge_hw_list_t hw_p;
4287 dev_info_t *p_dip;
4289 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4291 p_dip = hxgep->p_dip;
4292 MUTEX_ENTER(&hxge_common_lock);
4295 * Loop through existing per Hydra hardware list.
4297 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4298 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4299 "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4300 hw_p, p_dip));
4301 if (hw_p->parent_devp == p_dip) {
4302 hxgep->hxge_hw_p = hw_p;
4303 hw_p->ndevs++;
4304 hw_p->hxge_p = hxgep;
4305 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4306 "==> hxge_init_common_device: "
4307 "hw_p $%p parent dip $%p ndevs %d (found)",
4308 hw_p, p_dip, hw_p->ndevs));
4309 break;
4313 if (hw_p == NULL) {
4314 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4315 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4316 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4317 hw_p->parent_devp = p_dip;
4318 hw_p->magic = HXGE_MAGIC;
4319 hxgep->hxge_hw_p = hw_p;
4320 hw_p->ndevs++;
4321 hw_p->hxge_p = hxgep;
4322 hw_p->next = hxge_hw_list;
4324 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4325 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4326 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4328 hxge_hw_list = hw_p;
4330 MUTEX_EXIT(&hxge_common_lock);
4331 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4332 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4333 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4335 return (HXGE_OK);
4338 static void
4339 hxge_uninit_common_dev(p_hxge_t hxgep)
4341 p_hxge_hw_list_t hw_p, h_hw_p;
4342 dev_info_t *p_dip;
4344 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4345 if (hxgep->hxge_hw_p == NULL) {
4346 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4347 "<== hxge_uninit_common_dev (no common)"));
4348 return;
4351 MUTEX_ENTER(&hxge_common_lock);
4352 h_hw_p = hxge_hw_list;
4353 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4354 p_dip = hw_p->parent_devp;
4355 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4356 hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4357 hw_p->magic == HXGE_MAGIC) {
4358 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4359 "==> hxge_uninit_common_dev: "
4360 "hw_p $%p parent dip $%p ndevs %d (found)",
4361 hw_p, p_dip, hw_p->ndevs));
4363 hxgep->hxge_hw_p = NULL;
4364 if (hw_p->ndevs) {
4365 hw_p->ndevs--;
4367 hw_p->hxge_p = NULL;
4368 if (!hw_p->ndevs) {
4369 MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4370 MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4371 MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4372 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4373 "==> hxge_uninit_common_dev: "
4374 "hw_p $%p parent dip $%p ndevs %d (last)",
4375 hw_p, p_dip, hw_p->ndevs));
4377 if (hw_p == hxge_hw_list) {
4378 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4379 "==> hxge_uninit_common_dev:"
4380 "remove head "
4381 "hw_p $%p parent dip $%p "
4382 "ndevs %d (head)",
4383 hw_p, p_dip, hw_p->ndevs));
4384 hxge_hw_list = hw_p->next;
4385 } else {
4386 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4387 "==> hxge_uninit_common_dev:"
4388 "remove middle "
4389 "hw_p $%p parent dip $%p "
4390 "ndevs %d (middle)",
4391 hw_p, p_dip, hw_p->ndevs));
4392 h_hw_p->next = hw_p->next;
4395 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4397 break;
4398 } else {
4399 h_hw_p = hw_p;
4403 MUTEX_EXIT(&hxge_common_lock);
4404 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4405 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4407 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4410 #define HXGE_MSIX_ENTRIES 32
4411 #define HXGE_MSIX_WAIT_COUNT 10
4412 #define HXGE_MSIX_PARITY_CHECK_COUNT 30
4414 static void
4415 hxge_link_poll(void *arg)
4417 p_hxge_t hxgep = (p_hxge_t)arg;
4418 hpi_handle_t handle;
4419 cip_link_stat_t link_stat;
4420 hxge_timeout *to = &hxgep->timeout;
4422 handle = HXGE_DEV_HPI_HANDLE(hxgep);
4423 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4425 if (to->report_link_status ||
4426 (to->link_status != link_stat.bits.xpcs0_link_up)) {
4427 to->link_status = link_stat.bits.xpcs0_link_up;
4428 to->report_link_status = B_FALSE;
4430 if (link_stat.bits.xpcs0_link_up) {
4431 hxge_link_update(hxgep, LINK_STATE_UP);
4432 } else {
4433 hxge_link_update(hxgep, LINK_STATE_DOWN);
4437 /* Restart the link status timer to check the link status */
4438 MUTEX_ENTER(&to->lock);
4439 to->id = timeout(hxge_link_poll, arg, to->ticks);
4440 MUTEX_EXIT(&to->lock);
4443 static void
4444 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4446 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp;
4448 mac_link_update(hxgep->mach, state);
4449 if (state == LINK_STATE_UP) {
4450 statsp->mac_stats.link_speed = 10000;
4451 statsp->mac_stats.link_duplex = 2;
4452 statsp->mac_stats.link_up = 1;
4453 } else {
4454 statsp->mac_stats.link_speed = 0;
4455 statsp->mac_stats.link_duplex = 0;
4456 statsp->mac_stats.link_up = 0;
4460 static void
4461 hxge_msix_init(p_hxge_t hxgep)
4463 uint32_t data0;
4464 uint32_t data1;
4465 uint32_t data2;
4466 int i;
4467 uint32_t msix_entry0;
4468 uint32_t msix_entry1;
4469 uint32_t msix_entry2;
4470 uint32_t msix_entry3;
4472 /* Change to use MSIx bar instead of indirect access */
4473 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4474 data0 = 0xffffffff - i;
4475 data1 = 0xffffffff - i - 1;
4476 data2 = 0xffffffff - i - 2;
4478 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4479 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4480 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4481 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4484 /* Initialize ram data out buffer. */
4485 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4486 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4487 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4488 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4489 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4494 * The following function is to support
4495 * PSARC/2007/453 MSI-X interrupt limit override.
4497 static int
4498 hxge_create_msi_property(p_hxge_t hxgep)
4500 int nmsi;
4501 extern int ncpus;
4503 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4505 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4506 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4508 * The maximum MSI-X requested will be 8.
4509 * If the # of CPUs is less than 8, we will reqeust
4510 * # MSI-X based on the # of CPUs.
4512 if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4513 nmsi = HXGE_MSIX_REQUEST_10G;
4514 } else {
4515 nmsi = ncpus;
4518 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4519 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4520 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4521 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4523 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4524 return (nmsi);