832 need Intel 82579 Gigabit Ethernet PHY support in e1000g
[illumos-gate.git] / usr / src / uts / common / io / hxge / hxge_virtual.c
blobbbc65993d09db65bdf04da177f6a7346f4f1db15
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <hxge_impl.h>
27 #include <hxge_vmac.h>
28 #include <hxge_pfc.h>
29 #include <hpi_pfc.h>
31 static hxge_status_t hxge_get_mac_addr_properties(p_hxge_t);
32 static void hxge_use_cfg_hydra_properties(p_hxge_t);
33 static void hxge_use_cfg_dma_config(p_hxge_t);
34 static void hxge_use_cfg_class_config(p_hxge_t);
35 static void hxge_set_hw_dma_config(p_hxge_t);
36 static void hxge_set_hw_class_config(p_hxge_t);
37 static void hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv,
38 uint8_t endldg, int *ngrps);
40 extern uint16_t hxge_rcr_timeout;
41 extern uint16_t hxge_rcr_threshold;
43 extern uint32_t hxge_rbr_size;
44 extern uint32_t hxge_rcr_size;
46 extern uint_t hxge_rx_intr();
47 extern uint_t hxge_tx_intr();
48 extern uint_t hxge_vmac_intr();
49 extern uint_t hxge_syserr_intr();
50 extern uint_t hxge_pfc_intr();
53 * Entry point to populate configuration parameters into the master hxge
54 * data structure and to update the NDD parameter list.
56 hxge_status_t
57 hxge_get_config_properties(p_hxge_t hxgep)
59 hxge_status_t status = HXGE_OK;
61 HXGE_DEBUG_MSG((hxgep, VPD_CTL, " ==> hxge_get_config_properties"));
63 if (hxgep->hxge_hw_p == NULL) {
64 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
65 " hxge_get_config_properties: common hardware not set"));
66 return (HXGE_ERROR);
69 hxgep->classifier.tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
71 status = hxge_get_mac_addr_properties(hxgep);
72 if (status != HXGE_OK) {
73 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
74 " hxge_get_config_properties: mac addr properties failed"));
75 return (status);
78 HXGE_DEBUG_MSG((hxgep, VPD_CTL,
79 " ==> hxge_get_config_properties: Hydra"));
81 hxge_use_cfg_hydra_properties(hxgep);
83 HXGE_DEBUG_MSG((hxgep, VPD_CTL, " <== hxge_get_config_properties"));
84 return (HXGE_OK);
88 static void
89 hxge_set_hw_vlan_class_config(p_hxge_t hxgep)
91 int i;
92 p_hxge_param_t param_arr;
93 uint_t vlan_cnt;
94 int *vlan_cfg_val;
95 hxge_param_map_t *vmap;
96 char *prop;
97 p_hxge_class_pt_cfg_t p_class_cfgp;
98 uint32_t good_cfg[32];
99 int good_count = 0;
100 hxge_mv_cfg_t *vlan_tbl;
102 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_vlan_config"));
103 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
105 param_arr = hxgep->param_arr;
106 prop = param_arr[param_vlan_ids].fcode_name;
109 * uint32_t array, each array entry specifying a VLAN id
111 for (i = 0; i <= VLAN_ID_MAX; i++) {
112 p_class_cfgp->vlan_tbl[i].flag = 0;
115 vlan_tbl = (hxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
116 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
117 &vlan_cfg_val, &vlan_cnt) != DDI_PROP_SUCCESS) {
118 return;
121 for (i = 0; i < vlan_cnt; i++) {
122 vmap = (hxge_param_map_t *)&vlan_cfg_val[i];
123 if ((vmap->param_id) && (vmap->param_id <= VLAN_ID_MAX)) {
124 HXGE_DEBUG_MSG((hxgep, CFG2_CTL,
125 " hxge_vlan_config vlan id %d", vmap->param_id));
127 good_cfg[good_count] = vlan_cfg_val[i];
128 if (vlan_tbl[vmap->param_id].flag == 0)
129 good_count++;
131 vlan_tbl[vmap->param_id].flag = 1;
135 ddi_prop_free(vlan_cfg_val);
136 if (good_count != vlan_cnt) {
137 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
138 hxgep->dip, prop, (int *)good_cfg, good_count);
141 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_vlan_config"));
146 * Read param_vlan_ids and param_implicit_vlan_id properties from either
147 * hxge.conf or OBP. Update the soft properties. Populate these
148 * properties into the hxge data structure.
150 static void
151 hxge_use_cfg_vlan_class_config(p_hxge_t hxgep)
153 uint_t vlan_cnt;
154 int *vlan_cfg_val;
155 int status;
156 p_hxge_param_t param_arr;
157 char *prop;
158 uint32_t implicit_vlan_id = 0;
159 int *int_prop_val;
160 uint_t prop_len;
161 p_hxge_param_t pa;
163 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_vlan_config"));
164 param_arr = hxgep->param_arr;
165 prop = param_arr[param_vlan_ids].fcode_name;
167 status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
168 &vlan_cfg_val, &vlan_cnt);
169 if (status == DDI_PROP_SUCCESS) {
170 status = ddi_prop_update_int_array(DDI_DEV_T_NONE,
171 hxgep->dip, prop, vlan_cfg_val, vlan_cnt);
172 ddi_prop_free(vlan_cfg_val);
175 pa = &param_arr[param_implicit_vlan_id];
176 prop = pa->fcode_name;
177 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
178 &int_prop_val, &prop_len) == DDI_PROP_SUCCESS) {
179 implicit_vlan_id = (uint32_t)*int_prop_val;
180 if ((implicit_vlan_id >= pa->minimum) ||
181 (implicit_vlan_id <= pa->maximum)) {
182 status = ddi_prop_update_int(DDI_DEV_T_NONE, hxgep->dip,
183 prop, (int)implicit_vlan_id);
185 ddi_prop_free(int_prop_val);
188 hxge_set_hw_vlan_class_config(hxgep);
190 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_vlan_config"));
194 * Read in the configuration parameters from either hxge.conf or OBP and
195 * populate the master data structure hxge.
196 * Use these parameters to update the soft properties and the ndd array.
198 static void
199 hxge_use_cfg_hydra_properties(p_hxge_t hxgep)
201 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_hydra_properties"));
203 (void) hxge_use_cfg_dma_config(hxgep);
204 (void) hxge_use_cfg_vlan_class_config(hxgep);
205 (void) hxge_use_cfg_class_config(hxgep);
208 * Read in the hardware (fcode) properties and use these properties
209 * to update the ndd array.
211 (void) hxge_get_param_soft_properties(hxgep);
212 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_hydra_properties"));
217 * Read param_accept_jumbo, param_rxdma_intr_time, and param_rxdma_intr_pkts
218 * from either hxge.conf or OBP.
219 * Update the soft properties.
220 * Populate these properties into the hxge data structure for latter use.
222 static void
223 hxge_use_cfg_dma_config(p_hxge_t hxgep)
225 int tx_ndmas, rx_ndmas;
226 p_hxge_dma_pt_cfg_t p_dma_cfgp;
227 p_hxge_hw_pt_cfg_t p_cfgp;
228 dev_info_t *dip;
229 p_hxge_param_t param_arr;
230 char *prop;
231 int *prop_val;
232 uint_t prop_len;
234 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_dma_config"));
235 param_arr = hxgep->param_arr;
237 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
238 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
239 dip = hxgep->dip;
241 tx_ndmas = 4;
242 p_cfgp->start_tdc = 0;
243 p_cfgp->max_tdcs = hxgep->max_tdcs = tx_ndmas;
244 hxgep->tdc_mask = (tx_ndmas - 1);
245 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: "
246 "p_cfgp 0x%llx max_tdcs %d hxgep->max_tdcs %d",
247 p_cfgp, p_cfgp->max_tdcs, hxgep->max_tdcs));
249 rx_ndmas = 4;
250 p_cfgp->start_rdc = 0;
251 p_cfgp->max_rdcs = hxgep->max_rdcs = rx_ndmas;
253 p_cfgp->start_ldg = 0;
254 p_cfgp->max_ldgs = HXGE_INT_MAX_LDG;
256 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_default_dma_config: "
257 "p_cfgp 0x%llx max_rdcs %d hxgep->max_rdcs %d",
258 p_cfgp, p_cfgp->max_rdcs, hxgep->max_rdcs));
260 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: "
261 "p_cfgp 0x%016llx start_ldg %d hxgep->max_ldgs %d ",
262 p_cfgp, p_cfgp->start_ldg, p_cfgp->max_ldgs));
265 * add code for individual rdc properties
267 prop = param_arr[param_accept_jumbo].fcode_name;
269 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
270 &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
271 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
272 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
273 hxgep->dip, prop, prop_val, prop_len);
275 ddi_prop_free(prop_val);
278 prop = param_arr[param_rxdma_intr_time].fcode_name;
280 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
281 &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
282 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
283 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
284 hxgep->dip, prop, prop_val, prop_len);
286 ddi_prop_free(prop_val);
289 prop = param_arr[param_rxdma_intr_pkts].fcode_name;
291 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
292 &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
293 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
294 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
295 hxgep->dip, prop, prop_val, prop_len);
297 ddi_prop_free(prop_val);
300 hxge_set_hw_dma_config(hxgep);
301 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_use_cfg_dma_config"));
304 static void
305 hxge_use_cfg_class_config(p_hxge_t hxgep)
307 hxge_set_hw_class_config(hxgep);
310 static void
311 hxge_set_hw_dma_config(p_hxge_t hxgep)
313 p_hxge_dma_pt_cfg_t p_dma_cfgp;
314 p_hxge_hw_pt_cfg_t p_cfgp;
316 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_set_hw_dma_config"));
318 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
319 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
321 /* Transmit DMA Channels */
322 hxgep->ntdc = p_cfgp->max_tdcs;
324 /* Receive DMA Channels */
325 hxgep->nrdc = p_cfgp->max_rdcs;
327 p_dma_cfgp->rbr_size = hxge_rbr_size;
328 if (hxge_rcr_size > HXGE_RCR_MAX)
329 hxge_rcr_size = HXGE_RCR_MAX;
330 p_dma_cfgp->rcr_size = hxge_rcr_size;
332 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_dma_config"));
336 boolean_t
337 hxge_check_rxdma_port_member(p_hxge_t hxgep, uint8_t rdc)
339 p_hxge_dma_pt_cfg_t p_dma_cfgp;
340 p_hxge_hw_pt_cfg_t p_cfgp;
341 int status = B_TRUE;
343 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_rxdma_port_member"));
345 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
346 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
348 /* Receive DMA Channels */
349 if (rdc < p_cfgp->max_rdcs)
350 status = B_TRUE;
351 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_rxdma_port_member"));
353 return (status);
356 boolean_t
357 hxge_check_txdma_port_member(p_hxge_t hxgep, uint8_t tdc)
359 p_hxge_dma_pt_cfg_t p_dma_cfgp;
360 p_hxge_hw_pt_cfg_t p_cfgp;
361 int status = B_FALSE;
363 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_txdma_port_member"));
365 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
366 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
368 /* Receive DMA Channels */
369 if (tdc < p_cfgp->max_tdcs)
370 status = B_TRUE;
371 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_txdma_port_member"));
373 return (status);
378 * Read the L2 classes, L3 classes, and initial hash from either hxge.conf
379 * or OBP. Populate these properties into the hxge data structure for latter
380 * use. Note that we are not updating these soft properties.
382 static void
383 hxge_set_hw_class_config(p_hxge_t hxgep)
385 int i, j;
386 p_hxge_param_t param_arr;
387 int *int_prop_val;
388 uint32_t cfg_value;
389 char *prop;
390 p_hxge_class_pt_cfg_t p_class_cfgp;
391 int start_prop, end_prop;
392 uint_t prop_cnt;
394 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_class_config"));
396 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
398 param_arr = hxgep->param_arr;
401 * L2 class configuration. User configurable ether types
403 start_prop = param_class_cfg_ether_usr1;
404 end_prop = param_class_cfg_ether_usr2;
406 for (i = start_prop; i <= end_prop; i++) {
407 prop = param_arr[i].fcode_name;
408 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip,
409 0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
410 cfg_value = (uint32_t)*int_prop_val;
411 ddi_prop_free(int_prop_val);
412 } else {
413 cfg_value = (uint32_t)param_arr[i].value;
416 j = (i - start_prop) + TCAM_CLASS_ETYPE_1;
417 p_class_cfgp->class_cfg[j] = cfg_value;
421 * Use properties from either .conf or the NDD param array. Only bits
422 * 2 and 3 are significant
424 start_prop = param_class_opt_ipv4_tcp;
425 end_prop = param_class_opt_ipv6_sctp;
427 for (i = start_prop; i <= end_prop; i++) {
428 prop = param_arr[i].fcode_name;
429 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip,
430 0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
431 cfg_value = (uint32_t)*int_prop_val;
432 ddi_prop_free(int_prop_val);
433 } else {
434 cfg_value = (uint32_t)param_arr[i].value;
437 j = (i - start_prop) + TCAM_CLASS_TCP_IPV4;
438 p_class_cfgp->class_cfg[j] = cfg_value;
441 prop = param_arr[param_hash_init_value].fcode_name;
443 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
444 &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
445 cfg_value = (uint32_t)*int_prop_val;
446 ddi_prop_free(int_prop_val);
447 } else {
448 cfg_value = (uint32_t)param_arr[param_hash_init_value].value;
451 p_class_cfgp->init_hash = (uint32_t)cfg_value;
453 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_class_config"));
458 * Interrupts related interface functions.
460 hxge_status_t
461 hxge_ldgv_init(p_hxge_t hxgep, int *navail_p, int *nrequired_p)
463 uint8_t ldv, i, maxldvs, maxldgs, start, end, nldvs;
464 int ldg, endldg, ngrps;
465 uint8_t channel;
466 p_hxge_dma_pt_cfg_t p_dma_cfgp;
467 p_hxge_hw_pt_cfg_t p_cfgp;
468 p_hxge_ldgv_t ldgvp;
469 p_hxge_ldg_t ldgp, ptr;
470 p_hxge_ldv_t ldvp;
471 hxge_status_t status = HXGE_OK;
472 peu_intr_mask_t parity_err_mask;
474 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_init"));
475 if (!*navail_p) {
476 *nrequired_p = 0;
477 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
478 "<== hxge_ldgv_init:no avail"));
479 return (HXGE_ERROR);
481 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
482 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
484 /* each DMA channels */
485 nldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
487 /* vmac */
488 nldvs++;
490 /* pfc */
491 nldvs++;
493 /* system error interrupts. */
494 nldvs++;
496 maxldvs = nldvs;
497 maxldgs = p_cfgp->max_ldgs;
499 if (!maxldvs || !maxldgs) {
500 /* No devices configured. */
501 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_ldgv_init: "
502 "no logical devices or groups configured."));
503 return (HXGE_ERROR);
505 ldgvp = hxgep->ldgvp;
506 if (ldgvp == NULL) {
507 ldgvp = KMEM_ZALLOC(sizeof (hxge_ldgv_t), KM_SLEEP);
508 hxgep->ldgvp = ldgvp;
509 ldgvp->maxldgs = maxldgs;
510 ldgvp->maxldvs = maxldvs;
511 ldgp = ldgvp->ldgp =
512 KMEM_ZALLOC(sizeof (hxge_ldg_t) * maxldgs, KM_SLEEP);
513 ldvp = ldgvp->ldvp =
514 KMEM_ZALLOC(sizeof (hxge_ldv_t) * maxldvs, KM_SLEEP);
517 ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
518 ldgvp->tmres = HXGE_TIMER_RESO;
520 HXGE_DEBUG_MSG((hxgep, INT_CTL,
521 "==> hxge_ldgv_init: maxldvs %d maxldgs %d nldvs %d",
522 maxldvs, maxldgs, nldvs));
524 ldg = p_cfgp->start_ldg;
525 ptr = ldgp;
526 for (i = 0; i < maxldgs; i++) {
527 ptr->arm = B_TRUE;
528 ptr->vldg_index = i;
529 ptr->ldg_timer = HXGE_TIMER_LDG;
530 ptr->ldg = ldg++;
531 ptr->sys_intr_handler = hxge_intr;
532 ptr->nldvs = 0;
533 ptr->hxgep = hxgep;
534 HXGE_DEBUG_MSG((hxgep, INT_CTL,
535 "==> hxge_ldgv_init: maxldvs %d maxldgs %d ldg %d",
536 maxldvs, maxldgs, ptr->ldg));
537 HXGE_DEBUG_MSG((hxgep, INT_CTL,
538 "==> hxge_ldv_init: timer %d", ptr->ldg_timer));
539 ptr++;
542 ldg = p_cfgp->start_ldg;
543 if (maxldgs > *navail_p) {
544 ngrps = *navail_p;
545 } else {
546 ngrps = maxldgs;
548 endldg = ldg + ngrps;
551 * Receive DMA channels.
553 channel = p_cfgp->start_rdc;
554 start = p_cfgp->start_rdc + HXGE_RDMA_LD_START;
555 end = start + p_cfgp->max_rdcs;
556 nldvs = 0;
557 ldgvp->nldvs = 0;
558 ldgp->ldvp = NULL;
559 *nrequired_p = 0;
560 ptr = ldgp;
563 * Start with RDC to configure logical devices for each group.
565 for (i = 0, ldv = start; ldv < end; i++, ldv++) {
566 ldvp->is_rxdma = B_TRUE;
567 ldvp->ldv = ldv;
570 * If non-seq needs to change the following code
572 ldvp->channel = channel++;
573 ldvp->vdma_index = i;
574 ldvp->ldv_intr_handler = hxge_rx_intr;
575 ldvp->ldv_ldf_masks = 0;
576 ldvp->use_timer = B_FALSE;
577 ldvp->hxgep = hxgep;
578 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
579 nldvs++;
583 * Transmit DMA channels.
585 channel = p_cfgp->start_tdc;
586 start = p_cfgp->start_tdc + HXGE_TDMA_LD_START;
587 end = start + p_cfgp->max_tdcs;
588 for (i = 0, ldv = start; ldv < end; i++, ldv++) {
589 ldvp->is_txdma = B_TRUE;
590 ldvp->ldv = ldv;
591 ldvp->channel = channel++;
592 ldvp->vdma_index = i;
593 ldvp->ldv_intr_handler = hxge_tx_intr;
594 ldvp->ldv_ldf_masks = 0;
595 ldvp->use_timer = B_FALSE;
596 ldvp->hxgep = hxgep;
597 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
598 nldvs++;
602 * VMAC
604 ldvp->is_vmac = B_TRUE;
605 ldvp->ldv_intr_handler = hxge_vmac_intr;
606 ldvp->ldv_ldf_masks = 0;
607 ldv = HXGE_VMAC_LD;
608 ldvp->ldv = ldv;
609 ldvp->use_timer = B_FALSE;
610 ldvp->hxgep = hxgep;
611 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
612 nldvs++;
614 HXGE_DEBUG_MSG((hxgep, INT_CTL,
615 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
616 nldvs, *navail_p, *nrequired_p));
619 * PFC
621 ldvp->is_pfc = B_TRUE;
622 ldvp->ldv_intr_handler = hxge_pfc_intr;
623 ldvp->ldv_ldf_masks = 0;
624 ldv = HXGE_PFC_LD;
625 ldvp->ldv = ldv;
626 ldvp->use_timer = B_FALSE;
627 ldvp->hxgep = hxgep;
628 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
629 nldvs++;
631 HXGE_DEBUG_MSG((hxgep, INT_CTL,
632 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
633 nldvs, *navail_p, *nrequired_p));
636 * System error interrupts.
638 ldv = HXGE_SYS_ERROR_LD;
639 ldvp->ldv = ldv;
640 ldvp->is_syserr = B_TRUE;
641 ldvp->ldv_intr_handler = hxge_syserr_intr;
642 ldvp->ldv_ldf_masks = 0;
643 ldvp->hxgep = hxgep;
644 ldvp->use_timer = B_FALSE;
645 ldgvp->ldvp_syserr = ldvp;
647 /* Reset PEU error mask to allow PEU error interrupts */
649 * Keep the msix parity error mask here and remove it
650 * after ddi_intr_enable call to avoid a msix par err
652 parity_err_mask.value = 0;
653 parity_err_mask.bits.eic_msix_parerr_mask = 1;
654 HXGE_REG_WR32(hxgep->hpi_handle, PEU_INTR_MASK, parity_err_mask.value);
657 * Unmask the system interrupt states.
659 (void) hxge_fzc_sys_err_mask_set(hxgep, B_FALSE);
660 (void) hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
661 nldvs++;
663 ldgvp->ldg_intrs = *nrequired_p;
665 HXGE_DEBUG_MSG((hxgep, INT_CTL,
666 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
667 nldvs, *navail_p, *nrequired_p));
668 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_init"));
669 return (status);
672 hxge_status_t
673 hxge_ldgv_uninit(p_hxge_t hxgep)
675 p_hxge_ldgv_t ldgvp;
677 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_uninit"));
678 ldgvp = hxgep->ldgvp;
679 if (ldgvp == NULL) {
680 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
681 "<== hxge_ldgv_uninit: no logical group configured."));
682 return (HXGE_OK);
685 if (ldgvp->ldgp) {
686 KMEM_FREE(ldgvp->ldgp, sizeof (hxge_ldg_t) * ldgvp->maxldgs);
688 if (ldgvp->ldvp) {
689 KMEM_FREE(ldgvp->ldvp, sizeof (hxge_ldv_t) * ldgvp->maxldvs);
692 KMEM_FREE(ldgvp, sizeof (hxge_ldgv_t));
693 hxgep->ldgvp = NULL;
695 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_uninit"));
696 return (HXGE_OK);
699 hxge_status_t
700 hxge_intr_ldgv_init(p_hxge_t hxgep)
702 hxge_status_t status = HXGE_OK;
704 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_ldgv_init"));
706 * Configure the logical device group numbers, state vectors
707 * and interrupt masks for each logical device.
709 status = hxge_fzc_intr_init(hxgep);
712 * Configure logical device masks and timers.
714 status = hxge_intr_mask_mgmt(hxgep);
716 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_ldgv_init"));
717 return (status);
720 hxge_status_t
721 hxge_intr_mask_mgmt(p_hxge_t hxgep)
723 p_hxge_ldgv_t ldgvp;
724 p_hxge_ldg_t ldgp;
725 p_hxge_ldv_t ldvp;
726 hpi_handle_t handle;
727 int i, j;
728 hpi_status_t rs = HPI_SUCCESS;
730 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_mask_mgmt"));
732 if ((ldgvp = hxgep->ldgvp) == NULL) {
733 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
734 "<== hxge_intr_mask_mgmt: Null ldgvp"));
735 return (HXGE_ERROR);
737 handle = HXGE_DEV_HPI_HANDLE(hxgep);
738 ldgp = ldgvp->ldgp;
739 ldvp = ldgvp->ldvp;
740 if (ldgp == NULL || ldvp == NULL) {
741 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
742 "<== hxge_intr_mask_mgmt: Null ldgp or ldvp"));
743 return (HXGE_ERROR);
746 HXGE_DEBUG_MSG((hxgep, INT_CTL,
747 "==> hxge_intr_mask_mgmt: # of intrs %d ", ldgvp->ldg_intrs));
748 /* Initialize masks. */
749 HXGE_DEBUG_MSG((hxgep, INT_CTL,
750 "==> hxge_intr_mask_mgmt(Hydra): # intrs %d ", ldgvp->ldg_intrs));
751 for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
752 HXGE_DEBUG_MSG((hxgep, INT_CTL,
753 "==> hxge_intr_mask_mgmt(Hydra): # ldv %d in group %d",
754 ldgp->nldvs, ldgp->ldg));
755 for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
756 HXGE_DEBUG_MSG((hxgep, INT_CTL,
757 "==> hxge_intr_mask_mgmt: set ldv # %d "
758 "for ldg %d", ldvp->ldv, ldgp->ldg));
759 rs = hpi_intr_mask_set(handle, ldvp->ldv,
760 ldvp->ldv_ldf_masks);
761 if (rs != HPI_SUCCESS) {
762 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
763 "<== hxge_intr_mask_mgmt: set mask failed "
764 " rs 0x%x ldv %d mask 0x%x",
765 rs, ldvp->ldv, ldvp->ldv_ldf_masks));
766 return (HXGE_ERROR | rs);
768 HXGE_DEBUG_MSG((hxgep, INT_CTL,
769 "==> hxge_intr_mask_mgmt: set mask OK "
770 " rs 0x%x ldv %d mask 0x%x",
771 rs, ldvp->ldv, ldvp->ldv_ldf_masks));
775 ldgp = ldgvp->ldgp;
776 /* Configure timer and arm bit */
777 for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
778 rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
779 ldgp->arm, ldgp->ldg_timer);
780 if (rs != HPI_SUCCESS) {
781 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
782 "<== hxge_intr_mask_mgmt: set timer failed "
783 " rs 0x%x dg %d timer 0x%x",
784 rs, ldgp->ldg, ldgp->ldg_timer));
785 return (HXGE_ERROR | rs);
787 HXGE_DEBUG_MSG((hxgep, INT_CTL,
788 "==> hxge_intr_mask_mgmt: set timer OK "
789 " rs 0x%x ldg %d timer 0x%x",
790 rs, ldgp->ldg, ldgp->ldg_timer));
793 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_mask_mgmt"));
794 return (HXGE_OK);
797 hxge_status_t
798 hxge_intr_mask_mgmt_set(p_hxge_t hxgep, boolean_t on)
800 p_hxge_ldgv_t ldgvp;
801 p_hxge_ldg_t ldgp;
802 p_hxge_ldv_t ldvp;
803 hpi_handle_t handle;
804 int i, j;
805 hpi_status_t rs = HPI_SUCCESS;
807 HXGE_DEBUG_MSG((hxgep, INT_CTL,
808 "==> hxge_intr_mask_mgmt_set (%d)", on));
810 if ((ldgvp = hxgep->ldgvp) == NULL) {
811 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
812 "==> hxge_intr_mask_mgmt_set: Null ldgvp"));
813 return (HXGE_ERROR);
815 handle = HXGE_DEV_HPI_HANDLE(hxgep);
816 ldgp = ldgvp->ldgp;
817 ldvp = ldgvp->ldvp;
818 if (ldgp == NULL || ldvp == NULL) {
819 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
820 "<== hxge_intr_mask_mgmt_set: Null ldgp or ldvp"));
821 return (HXGE_ERROR);
824 /* set masks. */
825 for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
826 HXGE_DEBUG_MSG((hxgep, INT_CTL,
827 "==> hxge_intr_mask_mgmt_set: flag %d ldg %d"
828 "set mask nldvs %d", on, ldgp->ldg, ldgp->nldvs));
829 for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
830 HXGE_DEBUG_MSG((hxgep, INT_CTL,
831 "==> hxge_intr_mask_mgmt_set: "
832 "for %d %d flag %d", i, j, on));
833 if (on) {
834 ldvp->ldv_ldf_masks = 0;
835 HXGE_DEBUG_MSG((hxgep, INT_CTL,
836 "==> hxge_intr_mask_mgmt_set: "
837 "ON mask off"));
838 } else {
839 ldvp->ldv_ldf_masks = (uint8_t)LD_IM_MASK;
840 HXGE_DEBUG_MSG((hxgep, INT_CTL,
841 "==> hxge_intr_mask_mgmt_set:mask on"));
844 rs = hpi_intr_mask_set(handle, ldvp->ldv,
845 ldvp->ldv_ldf_masks);
846 if (rs != HPI_SUCCESS) {
847 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
848 "==> hxge_intr_mask_mgmt_set: "
849 "set mask failed rs 0x%x ldv %d mask 0x%x",
850 rs, ldvp->ldv, ldvp->ldv_ldf_masks));
851 return (HXGE_ERROR | rs);
853 HXGE_DEBUG_MSG((hxgep, INT_CTL,
854 "==> hxge_intr_mask_mgmt_set: flag %d"
855 "set mask OK ldv %d mask 0x%x",
856 on, ldvp->ldv, ldvp->ldv_ldf_masks));
860 ldgp = ldgvp->ldgp;
861 /* set the arm bit */
862 for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
863 if (on && !ldgp->arm) {
864 ldgp->arm = B_TRUE;
865 } else if (!on && ldgp->arm) {
866 ldgp->arm = B_FALSE;
868 rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
869 ldgp->arm, ldgp->ldg_timer);
870 if (rs != HPI_SUCCESS) {
871 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
872 "<== hxge_intr_mask_mgmt_set: "
873 "set timer failed rs 0x%x ldg %d timer 0x%x",
874 rs, ldgp->ldg, ldgp->ldg_timer));
875 return (HXGE_ERROR | rs);
877 HXGE_DEBUG_MSG((hxgep, INT_CTL,
878 "==> hxge_intr_mask_mgmt_set: OK (flag %d) "
879 "set timer ldg %d timer 0x%x",
880 on, ldgp->ldg, ldgp->ldg_timer));
883 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_mask_mgmt_set"));
884 return (HXGE_OK);
888 * For Big Endian systems, the mac address will be from OBP. For Little
889 * Endian (x64) systems, it will be retrieved from the card since it cannot
890 * be programmed into PXE.
891 * This function also populates the MMAC parameters.
893 static hxge_status_t
894 hxge_get_mac_addr_properties(p_hxge_t hxgep)
896 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_get_mac_addr_properties "));
898 (void) hxge_pfc_mac_addrs_get(hxgep);
899 hxgep->ouraddr = hxgep->factaddr;
901 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_get_mac_addr_properties "));
902 return (HXGE_OK);
905 static void
906 hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv,
907 uint8_t endldg, int *ngrps)
909 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup"));
910 /* Assign the group number for each device. */
911 (*ldvp)->ldg_assigned = (*ldgp)->ldg;
912 (*ldvp)->ldgp = *ldgp;
913 (*ldvp)->ldv = ldv;
915 HXGE_DEBUG_MSG((NULL, INT_CTL,
916 "==> hxge_ldgv_setup: ldv %d endldg %d ldg %d, ldvp $%p",
917 ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
919 (*ldgp)->nldvs++;
920 if ((*ldgp)->ldg == (endldg - 1)) {
921 if ((*ldgp)->ldvp == NULL) {
922 (*ldgp)->ldvp = *ldvp;
923 *ngrps += 1;
924 HXGE_DEBUG_MSG((NULL, INT_CTL,
925 "==> hxge_ldgv_setup: ngrps %d", *ngrps));
927 HXGE_DEBUG_MSG((NULL, INT_CTL,
928 "==> hxge_ldgv_setup: ldvp $%p ngrps %d",
929 *ldvp, *ngrps));
930 ++*ldvp;
931 } else {
932 (*ldgp)->ldvp = *ldvp;
933 *ngrps += 1;
934 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup(done): "
935 "ldv %d endldg %d ldg %d, ldvp $%p",
936 ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
937 (*ldvp) = ++*ldvp;
938 (*ldgp) = ++*ldgp;
939 HXGE_DEBUG_MSG((NULL, INT_CTL,
940 "==> hxge_ldgv_setup: new ngrps %d", *ngrps));
943 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup: "
944 "ldg %d nldvs %d ldv %d ldvp $%p endldg %d ngrps %d",
945 (*ldgp)->ldg, (*ldgp)->nldvs, ldv, ldvp, endldg, *ngrps));
947 HXGE_DEBUG_MSG((NULL, INT_CTL, "<== hxge_ldgv_setup"));