4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_rxdma.h>
29 #include <sys/nxge/nxge_hio.h>
31 #if !defined(_BIG_ENDIAN)
32 #include <npi_rx_rd32.h>
34 #include <npi_rx_rd64.h>
35 #include <npi_rx_wr64.h>
37 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
38 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
39 #define NXGE_ACTUAL_RDC(nxgep, rdc) \
40 (rdc + nxgep->pt_config.hw_config.start_rdc)
43 * Globals: tunable parameters (/etc/system or adb)
46 extern uint32_t nxge_rbr_size
;
47 extern uint32_t nxge_rcr_size
;
48 extern uint32_t nxge_rbr_spare_size
;
49 extern uint16_t nxge_rdc_buf_offset
;
51 extern uint32_t nxge_mblks_pending
;
54 * Tunable to reduce the amount of time spent in the
55 * ISR doing Rx Processing.
57 extern uint32_t nxge_max_rx_pkts
;
60 * Tunables to manage the receive buffer blocks.
62 * nxge_rx_threshold_hi: copy all buffers.
63 * nxge_rx_bcopy_size_type: receive buffer block size type.
64 * nxge_rx_threshold_lo: copy only up to tunable block size type.
66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi
;
67 extern nxge_rxbuf_type_t nxge_rx_buf_size_type
;
68 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo
;
70 extern uint32_t nxge_cksum_offload
;
72 static nxge_status_t
nxge_map_rxdma(p_nxge_t
, int);
73 static void nxge_unmap_rxdma(p_nxge_t
, int);
75 static nxge_status_t
nxge_rxdma_hw_start_common(p_nxge_t
);
77 static nxge_status_t
nxge_rxdma_hw_start(p_nxge_t
, int);
78 static void nxge_rxdma_hw_stop(p_nxge_t
, int);
80 static nxge_status_t
nxge_map_rxdma_channel(p_nxge_t
, uint16_t,
81 p_nxge_dma_common_t
*, p_rx_rbr_ring_t
*,
83 p_nxge_dma_common_t
*, p_rx_rcr_ring_t
*,
85 static void nxge_unmap_rxdma_channel(p_nxge_t
, uint16_t,
86 p_rx_rbr_ring_t
, p_rx_rcr_ring_t
, p_rx_mbox_t
);
88 static nxge_status_t
nxge_map_rxdma_channel_cfg_ring(p_nxge_t
,
90 p_nxge_dma_common_t
*, p_rx_rbr_ring_t
*,
91 p_rx_rcr_ring_t
*, p_rx_mbox_t
*);
92 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t
,
93 p_rx_rcr_ring_t
, p_rx_mbox_t
);
95 static nxge_status_t
nxge_map_rxdma_channel_buf_ring(p_nxge_t
,
97 p_nxge_dma_common_t
*,
98 p_rx_rbr_ring_t
*, uint32_t);
99 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t
,
102 static nxge_status_t
nxge_rxdma_start_channel(p_nxge_t
, uint16_t,
103 p_rx_rbr_ring_t
, p_rx_rcr_ring_t
, p_rx_mbox_t
);
104 static nxge_status_t
nxge_rxdma_stop_channel(p_nxge_t
, uint16_t);
107 nxge_rx_pkts(p_nxge_t
, p_rx_rcr_ring_t
, rx_dma_ctl_stat_t
, int);
109 static void nxge_receive_packet(p_nxge_t
,
113 mblk_t
**, mblk_t
**);
115 nxge_status_t
nxge_disable_rxdma_channel(p_nxge_t
, uint16_t);
117 static p_rx_msg_t
nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t
);
118 static void nxge_freeb(p_rx_msg_t
);
119 static nxge_status_t
nxge_rx_err_evnts(p_nxge_t
, int, rx_dma_ctl_stat_t
);
121 static nxge_status_t
nxge_rxdma_handle_port_errors(p_nxge_t
,
124 static nxge_status_t
nxge_rxbuf_index_info_init(p_nxge_t
,
129 nxge_rxdma_fatal_err_recover(p_nxge_t
, uint16_t);
132 nxge_rx_port_fatal_err_recover(p_nxge_t
);
134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t
);
137 nxge_init_rxdma_channels(p_nxge_t nxgep
)
139 nxge_grp_set_t
*set
= &nxgep
->rx_set
;
140 int i
, count
, channel
;
145 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_init_rxdma_channels"));
147 if (!isLDOMguest(nxgep
)) {
148 if (nxge_rxdma_hw_start_common(nxgep
) != NXGE_OK
) {
149 cmn_err(CE_NOTE
, "hw_start_common");
155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
156 * We only have 8 hardware RDC tables, but we may have
157 * up to 16 logical (software-defined) groups of RDCS,
158 * if we make use of layer 3 & 4 hardware classification.
160 for (i
= 0, count
= 0; i
< NXGE_LOGICAL_GROUP_MAX
; i
++) {
161 if ((1 << i
) & set
->lg
.map
) {
162 group
= set
->group
[i
];
164 nxgep
->pt_config
.hw_config
.def_mac_rxdma_grpid
+ i
;
165 map
= nxgep
->pt_config
.rdc_grps
[dev_gindex
].map
;
166 for (channel
= 0; channel
< NXGE_MAX_RDCS
; channel
++) {
167 if ((1 << channel
) & map
) {
168 if ((nxge_grp_dc_add(nxgep
,
169 group
, VP_BOUND_RX
, channel
)))
170 goto init_rxdma_channels_exit
;
174 if (++count
== set
->lg
.count
)
178 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_init_rxdma_channels"));
181 init_rxdma_channels_exit
:
182 for (i
= 0, count
= 0; i
< NXGE_LOGICAL_GROUP_MAX
; i
++) {
183 if ((1 << i
) & set
->lg
.map
) {
184 group
= set
->group
[i
];
186 nxgep
->pt_config
.hw_config
.def_mac_rxdma_grpid
+ i
;
187 map
= nxgep
->pt_config
.rdc_grps
[dev_gindex
].map
;
188 for (channel
= 0; channel
< NXGE_MAX_RDCS
; channel
++) {
189 if ((1 << channel
) & map
) {
190 nxge_grp_dc_remove(nxgep
,
191 VP_BOUND_RX
, channel
);
195 if (++count
== set
->lg
.count
)
199 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_init_rxdma_channels"));
204 nxge_init_rxdma_channel(p_nxge_t nxge
, int channel
)
206 nxge_status_t status
;
208 NXGE_DEBUG_MSG((nxge
, MEM2_CTL
, "==> nxge_init_rxdma_channel"));
210 status
= nxge_map_rxdma(nxge
, channel
);
211 if (status
!= NXGE_OK
) {
212 NXGE_ERROR_MSG((nxge
, NXGE_ERR_CTL
,
213 "<== nxge_init_rxdma: status 0x%x", status
));
218 if (isLDOMguest(nxge
)) {
220 p_rx_rcr_ring_t ring
= nxge
->rx_rcr_rings
->rcr_rings
[channel
];
222 status
= nxge_hio_rxdma_bind_intr(nxge
, ring
, channel
);
223 if (status
!= NXGE_OK
) {
224 nxge_unmap_rxdma(nxge
, channel
);
230 status
= nxge_rxdma_hw_start(nxge
, channel
);
231 if (status
!= NXGE_OK
) {
232 nxge_unmap_rxdma(nxge
, channel
);
235 if (!nxge
->statsp
->rdc_ksp
[channel
])
236 nxge_setup_rdc_kstats(nxge
, channel
);
238 NXGE_DEBUG_MSG((nxge
, MEM2_CTL
,
239 "<== nxge_init_rxdma_channel: status 0x%x", status
));
245 nxge_uninit_rxdma_channels(p_nxge_t nxgep
)
247 nxge_grp_set_t
*set
= &nxgep
->rx_set
;
250 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_uninit_rxdma_channels"));
252 if (set
->owned
.map
== 0) {
253 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
254 "nxge_uninit_rxdma_channels: no channels"));
258 for (rdc
= 0; rdc
< NXGE_MAX_RDCS
; rdc
++) {
259 if ((1 << rdc
) & set
->owned
.map
) {
260 nxge_grp_dc_remove(nxgep
, VP_BOUND_RX
, rdc
);
264 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_uninit_rxdma_channels"));
268 nxge_uninit_rxdma_channel(p_nxge_t nxgep
, int channel
)
270 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_uninit_rxdma_channel"));
272 if (nxgep
->statsp
->rdc_ksp
[channel
]) {
273 kstat_delete(nxgep
->statsp
->rdc_ksp
[channel
]);
274 nxgep
->statsp
->rdc_ksp
[channel
] = 0;
277 nxge_rxdma_hw_stop(nxgep
, channel
);
278 nxge_unmap_rxdma(nxgep
, channel
);
280 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_uinit_rxdma_channel"));
284 nxge_reset_rxdma_channel(p_nxge_t nxgep
, uint16_t channel
)
287 npi_status_t rs
= NPI_SUCCESS
;
288 nxge_status_t status
= NXGE_OK
;
290 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_reset_rxdma_channel"));
292 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
293 rs
= npi_rxdma_cfg_rdc_reset(handle
, channel
);
295 if (rs
!= NPI_SUCCESS
) {
296 status
= NXGE_ERROR
| rs
;
299 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_reset_rxdma_channel"));
305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep
)
307 nxge_grp_set_t
*set
= &nxgep
->rx_set
;
310 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_rxdma_regs_dump_channels"));
312 if (!isLDOMguest(nxgep
)) {
313 npi_handle_t handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
314 (void) npi_rxdma_dump_fzc_regs(handle
);
317 if (nxgep
->rx_rbr_rings
== 0 || nxgep
->rx_rbr_rings
->rbr_rings
== 0) {
318 NXGE_DEBUG_MSG((nxgep
, TX_CTL
,
319 "nxge_rxdma_regs_dump_channels: "
320 "NULL ring pointer(s)"));
324 if (set
->owned
.map
== 0) {
325 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
326 "nxge_rxdma_regs_dump_channels: no channels"));
330 for (rdc
= 0; rdc
< NXGE_MAX_RDCS
; rdc
++) {
331 if ((1 << rdc
) & set
->owned
.map
) {
332 rx_rbr_ring_t
*ring
=
333 nxgep
->rx_rbr_rings
->rbr_rings
[rdc
];
335 (void) nxge_dump_rxdma_channel(nxgep
, rdc
);
340 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "<== nxge_rxdma_regs_dump"));
344 nxge_dump_rxdma_channel(p_nxge_t nxgep
, uint8_t channel
)
347 npi_status_t rs
= NPI_SUCCESS
;
348 nxge_status_t status
= NXGE_OK
;
350 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_dump_rxdma_channel"));
352 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
353 rs
= npi_rxdma_dump_rdc_regs(handle
, channel
);
355 if (rs
!= NPI_SUCCESS
) {
356 status
= NXGE_ERROR
| rs
;
358 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_dump_rxdma_channel"));
363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep
, uint16_t channel
,
364 p_rx_dma_ent_msk_t mask_p
)
367 npi_status_t rs
= NPI_SUCCESS
;
368 nxge_status_t status
= NXGE_OK
;
370 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
371 "<== nxge_init_rxdma_channel_event_mask"));
373 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
374 rs
= npi_rxdma_event_mask(handle
, OP_SET
, channel
, mask_p
);
375 if (rs
!= NPI_SUCCESS
) {
376 status
= NXGE_ERROR
| rs
;
383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep
, uint16_t channel
,
384 p_rx_dma_ctl_stat_t cs_p
)
387 npi_status_t rs
= NPI_SUCCESS
;
388 nxge_status_t status
= NXGE_OK
;
390 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
391 "<== nxge_init_rxdma_channel_cntl_stat"));
393 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
394 rs
= npi_rxdma_control_status(handle
, OP_SET
, channel
, cs_p
);
396 if (rs
!= NPI_SUCCESS
) {
397 status
= NXGE_ERROR
| rs
;
404 * nxge_rxdma_cfg_rdcgrp_default_rdc
406 * Set the default RDC for an RDC Group (Table)
410 * rdcgrp The group to modify
411 * rdc The new default RDC.
415 * NPI/NXGE function calls:
416 * npi_rxdma_cfg_rdc_table_default_rdc()
418 * Registers accessed:
419 * RDC_TBL_REG: FZC_ZCP + 0x10000
425 nxge_rxdma_cfg_rdcgrp_default_rdc(
431 npi_status_t rs
= NPI_SUCCESS
;
432 p_nxge_dma_pt_cfg_t p_dma_cfgp
;
433 p_nxge_rdc_grp_t rdc_grp_p
;
434 uint8_t actual_rdcgrp
, actual_rdc
;
436 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
438 p_dma_cfgp
= (p_nxge_dma_pt_cfg_t
)&nxgep
->pt_config
;
440 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
443 * This has to be rewritten. Do we even allow this anymore?
445 rdc_grp_p
= &p_dma_cfgp
->rdc_grps
[rdcgrp
];
446 RDC_MAP_IN(rdc_grp_p
->map
, rdc
);
447 rdc_grp_p
->def_rdc
= rdc
;
449 actual_rdcgrp
= NXGE_ACTUAL_RDCGRP(nxgep
, rdcgrp
);
450 actual_rdc
= NXGE_ACTUAL_RDC(nxgep
, rdc
);
452 rs
= npi_rxdma_cfg_rdc_table_default_rdc(
453 handle
, actual_rdcgrp
, actual_rdc
);
455 if (rs
!= NPI_SUCCESS
) {
456 return (NXGE_ERROR
| rs
);
458 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep
, uint8_t port
, uint8_t rdc
)
469 npi_status_t rs
= NPI_SUCCESS
;
471 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
472 " ==> nxge_rxdma_cfg_port_default_rdc"));
474 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
475 actual_rdc
= rdc
; /* XXX Hack! */
476 rs
= npi_rxdma_cfg_default_port_rdc(handle
, port
, actual_rdc
);
479 if (rs
!= NPI_SUCCESS
) {
480 return (NXGE_ERROR
| rs
);
482 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
483 " <== nxge_rxdma_cfg_port_default_rdc"));
489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep
, uint8_t channel
,
492 npi_status_t rs
= NPI_SUCCESS
;
494 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
495 " ==> nxge_rxdma_cfg_rcr_threshold"));
496 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
498 rs
= npi_rxdma_cfg_rdc_rcr_threshold(handle
, channel
, pkts
);
500 if (rs
!= NPI_SUCCESS
) {
501 return (NXGE_ERROR
| rs
);
503 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
, " <== nxge_rxdma_cfg_rcr_threshold"));
508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep
, uint8_t channel
,
509 uint16_t tout
, uint8_t enable
)
511 npi_status_t rs
= NPI_SUCCESS
;
513 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
, " ==> nxge_rxdma_cfg_rcr_timeout"));
514 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
516 rs
= npi_rxdma_cfg_rdc_rcr_timeout_disable(handle
, channel
);
518 rs
= npi_rxdma_cfg_rdc_rcr_timeout(handle
, channel
,
522 if (rs
!= NPI_SUCCESS
) {
523 return (NXGE_ERROR
| rs
);
525 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
, " <== nxge_rxdma_cfg_rcr_timeout"));
530 nxge_enable_rxdma_channel(p_nxge_t nxgep
, uint16_t channel
,
531 p_rx_rbr_ring_t rbr_p
, p_rx_rcr_ring_t rcr_p
, p_rx_mbox_t mbox_p
)
534 rdc_desc_cfg_t rdc_desc
;
535 p_rcrcfig_b_t cfgb_p
;
536 npi_status_t rs
= NPI_SUCCESS
;
538 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_enable_rxdma_channel"));
539 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
541 * Use configuration data composed at init time.
542 * Write to hardware the receive ring configurations.
544 rdc_desc
.mbox_enable
= 1;
545 rdc_desc
.mbox_addr
= mbox_p
->mbox_addr
;
546 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
548 mbox_p
->mbox_addr
, rdc_desc
.mbox_addr
));
550 rdc_desc
.rbr_len
= rbr_p
->rbb_max
;
551 rdc_desc
.rbr_addr
= rbr_p
->rbr_addr
;
553 switch (nxgep
->rx_bksize_code
) {
555 rdc_desc
.page_size
= SIZE_4KB
;
558 rdc_desc
.page_size
= SIZE_8KB
;
561 rdc_desc
.page_size
= SIZE_16KB
;
564 rdc_desc
.page_size
= SIZE_32KB
;
568 rdc_desc
.size0
= rbr_p
->npi_pkt_buf_size0
;
571 rdc_desc
.size1
= rbr_p
->npi_pkt_buf_size1
;
574 rdc_desc
.size2
= rbr_p
->npi_pkt_buf_size2
;
577 rdc_desc
.full_hdr
= rcr_p
->full_hdr_flag
;
578 rdc_desc
.offset
= rcr_p
->sw_priv_hdr_len
;
580 rdc_desc
.rcr_len
= rcr_p
->comp_size
;
581 rdc_desc
.rcr_addr
= rcr_p
->rcr_addr
;
583 cfgb_p
= &(rcr_p
->rcr_cfgb
);
584 rdc_desc
.rcr_threshold
= cfgb_p
->bits
.ldw
.pthres
;
585 /* For now, disable this timeout in a guest domain. */
586 if (isLDOMguest(nxgep
)) {
587 rdc_desc
.rcr_timeout
= 0;
588 rdc_desc
.rcr_timeout_enable
= 0;
590 rdc_desc
.rcr_timeout
= cfgb_p
->bits
.ldw
.timeout
;
591 rdc_desc
.rcr_timeout_enable
= cfgb_p
->bits
.ldw
.entout
;
594 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_enable_rxdma_channel: "
595 "rbr_len qlen %d pagesize code %d rcr_len %d",
596 rdc_desc
.rbr_len
, rdc_desc
.page_size
, rdc_desc
.rcr_len
));
597 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_enable_rxdma_channel: "
598 "size 0 %d size 1 %d size 2 %d",
599 rbr_p
->npi_pkt_buf_size0
, rbr_p
->npi_pkt_buf_size1
,
600 rbr_p
->npi_pkt_buf_size2
));
602 if (nxgep
->niu_hw_type
== NIU_HW_TYPE_RF
)
603 rs
= npi_rxdma_cfg_rdc_ring(handle
, rbr_p
->rdc
,
606 rs
= npi_rxdma_cfg_rdc_ring(handle
, rbr_p
->rdc
,
608 if (rs
!= NPI_SUCCESS
) {
609 return (NXGE_ERROR
| rs
);
613 * Enable the timeout and threshold.
615 rs
= npi_rxdma_cfg_rdc_rcr_threshold(handle
, channel
,
616 rdc_desc
.rcr_threshold
);
617 if (rs
!= NPI_SUCCESS
) {
618 return (NXGE_ERROR
| rs
);
621 rs
= npi_rxdma_cfg_rdc_rcr_timeout(handle
, channel
,
622 rdc_desc
.rcr_timeout
);
623 if (rs
!= NPI_SUCCESS
) {
624 return (NXGE_ERROR
| rs
);
627 if (!isLDOMguest(nxgep
)) {
629 rs
= npi_rxdma_cfg_rdc_enable(handle
, channel
);
630 if (rs
!= NPI_SUCCESS
) {
631 return (NXGE_ERROR
| rs
);
635 /* Kick the DMA engine. */
636 npi_rxdma_rdc_rbr_kick(handle
, channel
, rbr_p
->rbb_max
);
638 if (!isLDOMguest(nxgep
)) {
639 /* Clear the rbr empty bit */
640 (void) npi_rxdma_channel_rbr_empty_clear(handle
, channel
);
643 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_enable_rxdma_channel"));
649 nxge_disable_rxdma_channel(p_nxge_t nxgep
, uint16_t channel
)
652 npi_status_t rs
= NPI_SUCCESS
;
654 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_disable_rxdma_channel"));
655 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
657 /* disable the DMA */
658 rs
= npi_rxdma_cfg_rdc_disable(handle
, channel
);
659 if (rs
!= NPI_SUCCESS
) {
660 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
661 "<== nxge_disable_rxdma_channel:failed (0x%x)",
663 return (NXGE_ERROR
| rs
);
666 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_disable_rxdma_channel"));
671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep
, uint8_t channel
)
674 nxge_status_t status
= NXGE_OK
;
676 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
677 "<== nxge_init_rxdma_channel_rcrflush"));
679 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
680 npi_rxdma_rdc_rcr_flush(handle
, channel
);
682 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
683 "<== nxge_init_rxdma_channel_rcrflsh"));
688 #define MID_INDEX(l, r) ((r + l + 1) >> 1)
692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
693 #define BOTH_LEFT (TO_LEFT + TO_LEFT)
694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT)
695 #define NO_HINT 0xffffffff
699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep
, p_rx_rbr_ring_t rbr_p
,
700 uint8_t pktbufsz_type
, uint64_t *pkt_buf_addr_pp
,
701 uint64_t **pkt_buf_addr_p
, uint32_t *bufoffset
, uint32_t *msg_index
)
706 rxring_info_t
*ring_info
;
707 int base_side
, end_side
;
708 int r_index
, l_index
, anchor_index
;
709 int found
, search_done
;
710 uint32_t offset
, chunk_size
, block_size
, page_size_mask
;
711 uint32_t chunk_index
, block_index
, total_index
;
712 int max_iterations
, iteration
;
713 rxbuf_index_info_t
*bufinfo
;
715 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
, "==> nxge_rxbuf_pp_to_vp"));
717 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
722 pktbuf_pp
= (uint64_t)(uint32_t)pkt_buf_addr_pp
;
724 pktbuf_pp
= (uint64_t)pkt_buf_addr_pp
;
727 switch (pktbufsz_type
) {
729 bufsize
= rbr_p
->pkt_buf_size0
;
732 bufsize
= rbr_p
->pkt_buf_size1
;
735 bufsize
= rbr_p
->pkt_buf_size2
;
737 case RCR_SINGLE_BLOCK
:
745 if (rbr_p
->num_blocks
== 1) {
747 ring_info
= rbr_p
->ring_info
;
748 bufinfo
= (rxbuf_index_info_t
*)ring_info
->buffer
;
749 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
750 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
751 "buf_pp $%p btype %d anchor_index %d "
761 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
762 "==> nxge_rxbuf_pp_to_vp: "
763 "buf_pp $%p btype %d anchor_index %d",
768 ring_info
= rbr_p
->ring_info
;
770 bufinfo
= (rxbuf_index_info_t
*)ring_info
->buffer
;
772 max_iterations
= ring_info
->max_iterations
;
774 * First check if this block has been seen
775 * recently. This is indicated by a hint which
776 * is initialized when the first buffer of the block
777 * is seen. The hint is reset when the last buffer of
778 * the block has been processed.
779 * As three block sizes are supported, three hints
780 * are kept. The idea behind the hints is that once
781 * the hardware uses a block for a buffer of that
782 * size, it will use it exclusively for that size
783 * and will use it until it is exhausted. It is assumed
784 * that there would a single block being used for the same
785 * buffer sizes at any given time.
787 if (ring_info
->hint
[pktbufsz_type
] != NO_HINT
) {
788 anchor_index
= ring_info
->hint
[pktbufsz_type
];
789 dvma_addr
= bufinfo
[anchor_index
].dvma_addr
;
790 chunk_size
= bufinfo
[anchor_index
].buf_size
;
791 if ((pktbuf_pp
>= dvma_addr
) &&
792 (pktbuf_pp
< (dvma_addr
+ chunk_size
))) {
795 * check if this is the last buffer in the block
796 * If so, then reset the hint for the size;
799 if ((pktbuf_pp
+ bufsize
) >= (dvma_addr
+ chunk_size
))
800 ring_info
->hint
[pktbufsz_type
] = NO_HINT
;
804 if (found
== B_FALSE
) {
805 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
806 "==> nxge_rxbuf_pp_to_vp: (!found)"
807 "buf_pp $%p btype %d anchor_index %d",
813 * This is the first buffer of the block of this
814 * size. Need to search the whole information
816 * the search algorithm uses a binary tree search
817 * algorithm. It assumes that the information is
818 * already sorted with increasing order
819 * info[0] < info[1] < info[2] .... < info[n-1]
820 * where n is the size of the information array
822 r_index
= rbr_p
->num_blocks
- 1;
824 search_done
= B_FALSE
;
825 anchor_index
= MID_INDEX(r_index
, l_index
);
826 while (search_done
== B_FALSE
) {
827 if ((r_index
== l_index
) ||
828 (iteration
>= max_iterations
))
829 search_done
= B_TRUE
;
830 end_side
= TO_RIGHT
; /* to the right */
831 base_side
= TO_LEFT
; /* to the left */
832 /* read the DVMA address information and sort it */
833 dvma_addr
= bufinfo
[anchor_index
].dvma_addr
;
834 chunk_size
= bufinfo
[anchor_index
].buf_size
;
835 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
836 "==> nxge_rxbuf_pp_to_vp: (searching)"
837 "buf_pp $%p btype %d "
838 "anchor_index %d chunk_size %d dvmaaddr $%p",
845 if (pktbuf_pp
>= dvma_addr
)
846 base_side
= TO_RIGHT
; /* to the right */
847 if (pktbuf_pp
< (dvma_addr
+ chunk_size
))
848 end_side
= TO_LEFT
; /* to the left */
850 switch (base_side
+ end_side
) {
854 search_done
= B_TRUE
;
855 if ((pktbuf_pp
+ bufsize
) <
856 (dvma_addr
+ chunk_size
))
857 ring_info
->hint
[pktbufsz_type
] =
858 bufinfo
[anchor_index
].buf_index
;
861 /* not found: go to the right */
862 l_index
= anchor_index
+ 1;
863 anchor_index
= MID_INDEX(r_index
, l_index
);
867 /* not found: go to the left */
868 r_index
= anchor_index
- 1;
869 anchor_index
= MID_INDEX(r_index
, l_index
);
871 default: /* should not come here */
877 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
878 "==> nxge_rxbuf_pp_to_vp: (search done)"
879 "buf_pp $%p btype %d anchor_index %d",
885 if (found
== B_FALSE
) {
886 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
887 "==> nxge_rxbuf_pp_to_vp: (search failed)"
888 "buf_pp $%p btype %d anchor_index %d",
896 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
897 "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
898 "buf_pp $%p btype %d bufsize %d anchor_index %d",
904 /* index of the first block in this chunk */
905 chunk_index
= bufinfo
[anchor_index
].start_index
;
906 dvma_addr
= bufinfo
[anchor_index
].dvma_addr
;
907 page_size_mask
= ring_info
->block_size_mask
;
909 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
910 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
911 "buf_pp $%p btype %d bufsize %d "
912 "anchor_index %d chunk_index %d dvma $%p",
920 offset
= pktbuf_pp
- dvma_addr
; /* offset within the chunk */
921 block_size
= rbr_p
->block_size
; /* System block(page) size */
923 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
924 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
925 "buf_pp $%p btype %d bufsize %d "
926 "anchor_index %d chunk_index %d dvma $%p "
927 "offset %d block_size %d",
937 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
, "==> getting total index"));
939 block_index
= (offset
/ block_size
); /* index within chunk */
940 total_index
= chunk_index
+ block_index
;
943 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
944 "==> nxge_rxbuf_pp_to_vp: "
945 "total_index %d dvma_addr $%p "
946 "offset %d block_size %d "
948 total_index
, dvma_addr
,
952 *pkt_buf_addr_p
= (uint64_t *)((uint32_t)bufinfo
[anchor_index
].kaddr
+
955 *pkt_buf_addr_p
= (uint64_t *)((uint64_t)bufinfo
[anchor_index
].kaddr
+
959 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
960 "==> nxge_rxbuf_pp_to_vp: "
961 "total_index %d dvma_addr $%p "
962 "offset %d block_size %d "
964 "*pkt_buf_addr_p $%p",
965 total_index
, dvma_addr
,
971 *msg_index
= total_index
;
972 *bufoffset
= (offset
& page_size_mask
);
974 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
975 "==> nxge_rxbuf_pp_to_vp: get msg index: "
976 "msg_index %d bufoffset_index %d",
980 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
, "<== nxge_rxbuf_pp_to_vp"));
986 * used by quick sort (qsort) function
987 * to perform comparison
990 nxge_sort_compare(const void *p1
, const void *p2
)
993 rxbuf_index_info_t
*a
, *b
;
995 a
= (rxbuf_index_info_t
*)p1
;
996 b
= (rxbuf_index_info_t
*)p2
;
998 if (a
->dvma_addr
> b
->dvma_addr
)
1000 if (a
->dvma_addr
< b
->dvma_addr
)
1008 * grabbed this sort implementation from common/syscall/avl.c
1012 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
1013 * v = Ptr to array/vector of objs
1014 * n = # objs in the array
1015 * s = size of each obj (must be multiples of a word size)
1016 * f = ptr to function to compare two objs
1017 * returns (-1 = less than, 0 = equal, 1 = greater than
1020 nxge_ksort(caddr_t v
, int n
, int s
, int (*f
)())
1023 unsigned int *p1
, *p2
;
1027 if (v
== NULL
|| n
<= 1)
1029 /* Sanity check on arguments */
1030 ASSERT(((uintptr_t)v
& 0x3) == 0 && (s
& 0x3) == 0);
1033 for (g
= n
/ 2; g
> 0; g
/= 2) {
1034 for (i
= g
; i
< n
; i
++) {
1035 for (j
= i
- g
; j
>= 0 &&
1036 (*f
)(v
+ j
* s
, v
+ (j
+ g
) * s
) == 1;
1038 p1
= (unsigned *)(v
+ j
* s
);
1039 p2
= (unsigned *)(v
+ (j
+ g
) * s
);
1040 for (ii
= 0; ii
< s
/ 4; ii
++) {
1051 * Initialize data structures required for rxdma
1052 * buffer dvma->vmem address lookup
1055 static nxge_status_t
1056 nxge_rxbuf_index_info_init(p_nxge_t nxgep
, p_rx_rbr_ring_t rbrp
)
1060 rxring_info_t
*ring_info
;
1061 int max_iteration
= 0, max_index
= 0;
1063 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "==> nxge_rxbuf_index_info_init"));
1065 ring_info
= rbrp
->ring_info
;
1066 ring_info
->hint
[0] = NO_HINT
;
1067 ring_info
->hint
[1] = NO_HINT
;
1068 ring_info
->hint
[2] = NO_HINT
;
1069 max_index
= rbrp
->num_blocks
;
1071 /* read the DVMA address information and sort it */
1072 /* do init of the information array */
1075 NXGE_DEBUG_MSG((nxgep
, DMA2_CTL
,
1076 " nxge_rxbuf_index_info_init Sort ptrs"));
1078 /* sort the array */
1079 nxge_ksort((void *)ring_info
->buffer
, max_index
,
1080 sizeof (rxbuf_index_info_t
), nxge_sort_compare
);
1084 for (index
= 0; index
< max_index
; index
++) {
1085 NXGE_DEBUG_MSG((nxgep
, DMA2_CTL
,
1086 " nxge_rxbuf_index_info_init: sorted chunk %d "
1087 " ioaddr $%p kaddr $%p size %x",
1088 index
, ring_info
->buffer
[index
].dvma_addr
,
1089 ring_info
->buffer
[index
].kaddr
,
1090 ring_info
->buffer
[index
].buf_size
));
1094 while (max_index
>= (1ULL << max_iteration
))
1096 ring_info
->max_iterations
= max_iteration
+ 1;
1097 NXGE_DEBUG_MSG((nxgep
, DMA2_CTL
,
1098 " nxge_rxbuf_index_info_init Find max iter %d",
1099 ring_info
->max_iterations
));
1101 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_rxbuf_index_info_init"));
1107 nxge_dump_rcr_entry(p_nxge_t nxgep
, p_rcr_entry_t entry_p
)
1114 bptr
= entry_p
->bits
.hdw
.pkt_buf_addr
;
1116 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1118 "\trcr entry 0x%0llx "
1119 "\trcr entry 0x%08x "
1120 "\trcr entry 0x%08x "
1123 "\tpkt_type = 0x%x\n"
1124 "\tzero_copy = %d\n"
1127 "\terror = 0x%04x\n"
1128 "\tdcf_err = 0x%01x\n"
1130 "\tpktbufsize = %d\n"
1131 "\tpkt_buf_addr = $%p\n"
1132 "\tpkt_buf_addr (<< 6) = $%p\n",
1134 *(int64_t *)entry_p
,
1135 *(int32_t *)entry_p
,
1136 *(int32_t *)((char *)entry_p
+ 32),
1138 entry_p
->bits
.hdw
.multi
,
1139 entry_p
->bits
.hdw
.pkt_type
,
1140 entry_p
->bits
.hdw
.zero_copy
,
1141 entry_p
->bits
.hdw
.noport
,
1142 entry_p
->bits
.hdw
.promis
,
1143 entry_p
->bits
.hdw
.error
,
1144 entry_p
->bits
.hdw
.dcf_err
,
1145 entry_p
->bits
.hdw
.l2_len
,
1146 entry_p
->bits
.hdw
.pktbufsz
,
1148 entry_p
->bits
.ldw
.pkt_buf_addr
));
1150 pp
= (entry_p
->value
& RCR_PKT_BUF_ADDR_MASK
) <<
1151 RCR_PKT_BUF_ADDR_SHIFT
;
1153 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "rcr pp 0x%llx l2 len %d",
1154 pp
, (*(int64_t *)entry_p
>> 40) & 0x3fff));
1159 nxge_rxdma_regs_dump(p_nxge_t nxgep
, int rdc
)
1161 npi_handle_t handle
;
1162 rbr_stat_t rbr_stat
;
1167 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1168 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc
));
1170 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
1174 (void) npi_rxdma_rdc_rbr_head_get(handle
, rdc
, &hd_addr
);
1176 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1177 (void *)(uint32_t)hd_addr
.addr
);
1179 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1180 (void *)hd_addr
.addr
);
1184 (void) npi_rxdma_rdc_rbr_stat_get(handle
, rdc
, &rbr_stat
);
1185 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat
.bits
.ldw
.qlen
);
1189 (void) npi_rxdma_rdc_rcr_tail_get(handle
, rdc
, &tail_addr
);
1191 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1192 (void *)(uint32_t)tail_addr
.addr
);
1194 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1195 (void *)tail_addr
.addr
);
1199 (void) npi_rxdma_rdc_rcr_qlen_get(handle
, rdc
, &qlen
);
1200 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen
);
1202 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1203 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc
));
1207 nxge_rxdma_hw_mode(p_nxge_t nxgep
, boolean_t enable
)
1209 nxge_grp_set_t
*set
= &nxgep
->rx_set
;
1210 nxge_status_t status
;
1214 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
1215 "==> nxge_rxdma_hw_mode: mode %d", enable
));
1217 if (!(nxgep
->drv_state
& STATE_HW_INITIALIZED
)) {
1218 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1219 "<== nxge_rxdma_mode: not initialized"));
1220 return (NXGE_ERROR
);
1223 if (nxgep
->rx_rbr_rings
== 0 || nxgep
->rx_rbr_rings
->rbr_rings
== 0) {
1224 NXGE_DEBUG_MSG((nxgep
, TX_CTL
,
1225 "<== nxge_tx_port_fatal_err_recover: "
1226 "NULL ring pointer(s)"));
1227 return (NXGE_ERROR
);
1230 if (set
->owned
.map
== 0) {
1231 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1232 "nxge_rxdma_regs_dump_channels: no channels"));
1236 for (rdc
= 0; rdc
< NXGE_MAX_RDCS
; rdc
++) {
1237 if ((1 << rdc
) & set
->owned
.map
) {
1238 rx_rbr_ring_t
*ring
=
1239 nxgep
->rx_rbr_rings
->rbr_rings
[rdc
];
1240 npi_handle_t handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
1243 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
1244 "==> nxge_rxdma_hw_mode: "
1245 "channel %d (enable)", rdc
));
1246 rs
= npi_rxdma_cfg_rdc_enable
1249 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
1250 "==> nxge_rxdma_hw_mode: "
1251 "channel %d disable)", rdc
));
1252 rs
= npi_rxdma_cfg_rdc_disable
1259 status
= ((rs
== NPI_SUCCESS
) ? NXGE_OK
: NXGE_ERROR
| rs
);
1261 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
1262 "<== nxge_rxdma_hw_mode: status 0x%x", status
));
1268 nxge_rxdma_enable_channel(p_nxge_t nxgep
, uint16_t channel
)
1270 npi_handle_t handle
;
1272 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
1273 "==> nxge_rxdma_enable_channel: channel %d", channel
));
1275 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
1276 (void) npi_rxdma_cfg_rdc_enable(handle
, channel
);
1278 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_rxdma_enable_channel"));
1282 nxge_rxdma_disable_channel(p_nxge_t nxgep
, uint16_t channel
)
1284 npi_handle_t handle
;
1286 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
,
1287 "==> nxge_rxdma_disable_channel: channel %d", channel
));
1289 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
1290 (void) npi_rxdma_cfg_rdc_disable(handle
, channel
);
1292 NXGE_DEBUG_MSG((nxgep
, DMA_CTL
, "<== nxge_rxdma_disable_channel"));
1296 nxge_hw_start_rx(p_nxge_t nxgep
)
1298 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "==> nxge_hw_start_rx"));
1300 (void) nxge_rxdma_hw_mode(nxgep
, NXGE_DMA_START
);
1301 (void) nxge_rx_mac_enable(nxgep
);
1303 NXGE_DEBUG_MSG((nxgep
, DDI_CTL
, "<== nxge_hw_start_rx"));
1308 nxge_fixup_rxdma_rings(p_nxge_t nxgep
)
1310 nxge_grp_set_t
*set
= &nxgep
->rx_set
;
1313 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_fixup_rxdma_rings"));
1315 if (nxgep
->rx_rbr_rings
== 0 || nxgep
->rx_rbr_rings
->rbr_rings
== 0) {
1316 NXGE_DEBUG_MSG((nxgep
, TX_CTL
,
1317 "<== nxge_tx_port_fatal_err_recover: "
1318 "NULL ring pointer(s)"));
1322 if (set
->owned
.map
== 0) {
1323 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1324 "nxge_rxdma_regs_dump_channels: no channels"));
1328 for (rdc
= 0; rdc
< NXGE_MAX_RDCS
; rdc
++) {
1329 if ((1 << rdc
) & set
->owned
.map
) {
1330 rx_rbr_ring_t
*ring
=
1331 nxgep
->rx_rbr_rings
->rbr_rings
[rdc
];
1333 nxge_rxdma_hw_stop(nxgep
, rdc
);
1334 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1335 "==> nxge_fixup_rxdma_rings: "
1336 "channel %d ring $%px",
1338 (void) nxge_rxdma_fix_channel(nxgep
, rdc
);
1343 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "<== nxge_fixup_rxdma_rings"));
1347 nxge_rxdma_fix_channel(p_nxge_t nxgep
, uint16_t channel
)
1350 p_rx_rbr_rings_t rx_rbr_rings
;
1351 p_rx_rbr_ring_t
*rbr_rings
;
1352 p_rx_rcr_rings_t rx_rcr_rings
;
1353 p_rx_rcr_ring_t
*rcr_rings
;
1354 p_rx_mbox_areas_t rx_mbox_areas_p
;
1355 p_rx_mbox_t
*rx_mbox_p
;
1356 p_nxge_dma_pool_t dma_buf_poolp
;
1357 p_nxge_dma_pool_t dma_cntl_poolp
;
1358 p_rx_rbr_ring_t rbrp
;
1359 p_rx_rcr_ring_t rcrp
;
1361 p_nxge_dma_common_t dmap
;
1362 nxge_status_t status
= NXGE_OK
;
1364 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_rxdma_fix_channel"));
1366 (void) nxge_rxdma_stop_channel(nxgep
, channel
);
1368 dma_buf_poolp
= nxgep
->rx_buf_pool_p
;
1369 dma_cntl_poolp
= nxgep
->rx_cntl_pool_p
;
1371 if (!dma_buf_poolp
->buf_allocated
|| !dma_cntl_poolp
->buf_allocated
) {
1372 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
1373 "<== nxge_rxdma_fix_channel: buf not allocated"));
1377 ndmas
= dma_buf_poolp
->ndmas
;
1379 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
1380 "<== nxge_rxdma_fix_channel: no dma allocated"));
1384 rx_rbr_rings
= nxgep
->rx_rbr_rings
;
1385 rx_rcr_rings
= nxgep
->rx_rcr_rings
;
1386 rbr_rings
= rx_rbr_rings
->rbr_rings
;
1387 rcr_rings
= rx_rcr_rings
->rcr_rings
;
1388 rx_mbox_areas_p
= nxgep
->rx_mbox_areas_p
;
1389 rx_mbox_p
= rx_mbox_areas_p
->rxmbox_areas
;
1391 /* Reinitialize the receive block and completion rings */
1392 rbrp
= (p_rx_rbr_ring_t
)rbr_rings
[channel
],
1393 rcrp
= (p_rx_rcr_ring_t
)rcr_rings
[channel
],
1394 mboxp
= (p_rx_mbox_t
)rx_mbox_p
[channel
];
1396 rbrp
->rbr_wr_index
= (rbrp
->rbb_max
- 1);
1397 rbrp
->rbr_rd_index
= 0;
1398 rcrp
->comp_rd_index
= 0;
1399 rcrp
->comp_wt_index
= 0;
1401 dmap
= (p_nxge_dma_common_t
)&rcrp
->rcr_desc
;
1402 bzero((caddr_t
)dmap
->kaddrp
, dmap
->alength
);
1404 status
= nxge_rxdma_start_channel(nxgep
, channel
,
1406 if (status
!= NXGE_OK
) {
1407 goto nxge_rxdma_fix_channel_fail
;
1410 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1411 "<== nxge_rxdma_fix_channel: success (0x%08x)", status
));
1414 nxge_rxdma_fix_channel_fail
:
1415 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1416 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status
));
1420 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep
, uint16_t channel
)
1422 nxge_grp_set_t
*set
= &nxgep
->rx_set
;
1425 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1426 "==> nxge_rxdma_get_rbr_ring: channel %d", channel
));
1428 if (nxgep
->rx_rbr_rings
== 0 || nxgep
->rx_rbr_rings
->rbr_rings
== 0) {
1429 NXGE_DEBUG_MSG((nxgep
, TX_CTL
,
1430 "<== nxge_rxdma_get_rbr_ring: "
1431 "NULL ring pointer(s)"));
1435 if (set
->owned
.map
== 0) {
1436 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1437 "<== nxge_rxdma_get_rbr_ring: no channels"));
1441 for (rdc
= 0; rdc
< NXGE_MAX_RDCS
; rdc
++) {
1442 if ((1 << rdc
) & set
->owned
.map
) {
1443 rx_rbr_ring_t
*ring
=
1444 nxgep
->rx_rbr_rings
->rbr_rings
[rdc
];
1446 if (channel
== ring
->rdc
) {
1447 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1448 "==> nxge_rxdma_get_rbr_ring: "
1449 "channel %d ring $%p", rdc
, ring
));
1456 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1457 "<== nxge_rxdma_get_rbr_ring: not found"));
1463 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep
, uint16_t channel
)
1465 nxge_grp_set_t
*set
= &nxgep
->rx_set
;
1468 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1469 "==> nxge_rxdma_get_rcr_ring: channel %d", channel
));
1471 if (nxgep
->rx_rcr_rings
== 0 || nxgep
->rx_rcr_rings
->rcr_rings
== 0) {
1472 NXGE_DEBUG_MSG((nxgep
, TX_CTL
,
1473 "<== nxge_rxdma_get_rcr_ring: "
1474 "NULL ring pointer(s)"));
1478 if (set
->owned
.map
== 0) {
1479 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1480 "<== nxge_rxdma_get_rbr_ring: no channels"));
1484 for (rdc
= 0; rdc
< NXGE_MAX_RDCS
; rdc
++) {
1485 if ((1 << rdc
) & set
->owned
.map
) {
1486 rx_rcr_ring_t
*ring
=
1487 nxgep
->rx_rcr_rings
->rcr_rings
[rdc
];
1489 if (channel
== ring
->rdc
) {
1490 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1491 "==> nxge_rxdma_get_rcr_ring: "
1492 "channel %d ring $%p", rdc
, ring
));
1499 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1500 "<== nxge_rxdma_get_rcr_ring: not found"));
1506 * Static functions start here.
1509 nxge_allocb(size_t size
, uint32_t pri
, p_nxge_dma_common_t dmabuf_p
)
1511 p_rx_msg_t nxge_mp
= NULL
;
1512 p_nxge_dma_common_t dmamsg_p
;
1515 nxge_mp
= KMEM_ZALLOC(sizeof (rx_msg_t
), KM_NOSLEEP
);
1516 if (nxge_mp
== NULL
) {
1517 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
,
1518 "Allocation of a rx msg failed."));
1519 goto nxge_allocb_exit
;
1522 nxge_mp
->use_buf_pool
= B_FALSE
;
1524 nxge_mp
->use_buf_pool
= B_TRUE
;
1525 dmamsg_p
= (p_nxge_dma_common_t
)&nxge_mp
->buf_dma
;
1526 *dmamsg_p
= *dmabuf_p
;
1527 dmamsg_p
->nblocks
= 1;
1528 dmamsg_p
->block_size
= size
;
1529 dmamsg_p
->alength
= size
;
1530 buffer
= (uchar_t
*)dmabuf_p
->kaddrp
;
1532 dmabuf_p
->kaddrp
= (void *)
1533 ((char *)dmabuf_p
->kaddrp
+ size
);
1534 dmabuf_p
->ioaddr_pp
= (void *)
1535 ((char *)dmabuf_p
->ioaddr_pp
+ size
);
1536 dmabuf_p
->alength
-= size
;
1537 dmabuf_p
->offset
+= size
;
1538 dmabuf_p
->dma_cookie
.dmac_laddress
+= size
;
1539 dmabuf_p
->dma_cookie
.dmac_size
-= size
;
1542 buffer
= KMEM_ALLOC(size
, KM_NOSLEEP
);
1543 if (buffer
== NULL
) {
1544 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
,
1545 "Allocation of a receive page failed."));
1546 goto nxge_allocb_fail1
;
1550 nxge_mp
->rx_mblk_p
= desballoc(buffer
, size
, pri
, &nxge_mp
->freeb
);
1551 if (nxge_mp
->rx_mblk_p
== NULL
) {
1552 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
, "desballoc failed."));
1553 goto nxge_allocb_fail2
;
1556 nxge_mp
->buffer
= buffer
;
1557 nxge_mp
->block_size
= size
;
1558 nxge_mp
->freeb
.free_func
= (void (*)())nxge_freeb
;
1559 nxge_mp
->freeb
.free_arg
= (caddr_t
)nxge_mp
;
1560 nxge_mp
->ref_cnt
= 1;
1561 nxge_mp
->free
= B_TRUE
;
1562 nxge_mp
->rx_use_bcopy
= B_FALSE
;
1564 atomic_inc_32(&nxge_mblks_pending
);
1566 goto nxge_allocb_exit
;
1569 if (!nxge_mp
->use_buf_pool
) {
1570 KMEM_FREE(buffer
, size
);
1574 KMEM_FREE(nxge_mp
, sizeof (rx_msg_t
));
1582 nxge_dupb(p_rx_msg_t nxge_mp
, uint_t offset
, size_t size
)
1586 NXGE_DEBUG_MSG((NULL
, MEM_CTL
, "==> nxge_dupb"));
1587 NXGE_DEBUG_MSG((NULL
, MEM_CTL
, "nxge_mp = $%p "
1590 nxge_mp
, offset
, size
));
1592 mp
= desballoc(&nxge_mp
->buffer
[offset
], size
,
1593 0, &nxge_mp
->freeb
);
1595 NXGE_DEBUG_MSG((NULL
, RX_CTL
, "desballoc failed"));
1596 goto nxge_dupb_exit
;
1598 atomic_inc_32(&nxge_mp
->ref_cnt
);
1602 NXGE_DEBUG_MSG((NULL
, MEM_CTL
, "<== nxge_dupb mp = $%p",
1608 nxge_dupb_bcopy(p_rx_msg_t nxge_mp
, uint_t offset
, size_t size
)
1613 mp
= allocb(size
+ NXGE_RXBUF_EXTRA
, 0);
1615 NXGE_DEBUG_MSG((NULL
, RX_CTL
, "desballoc failed"));
1616 goto nxge_dupb_bcopy_exit
;
1618 dp
= mp
->b_rptr
= mp
->b_rptr
+ NXGE_RXBUF_EXTRA
;
1619 bcopy((void *)&nxge_mp
->buffer
[offset
], dp
, size
);
1620 mp
->b_wptr
= dp
+ size
;
1622 nxge_dupb_bcopy_exit
:
1623 NXGE_DEBUG_MSG((NULL
, MEM_CTL
, "<== nxge_dupb mp = $%p",
1628 void nxge_post_page(p_nxge_t nxgep
, p_rx_rbr_ring_t rx_rbr_p
,
1629 p_rx_msg_t rx_msg_p
);
1632 nxge_post_page(p_nxge_t nxgep
, p_rx_rbr_ring_t rx_rbr_p
, p_rx_msg_t rx_msg_p
)
1634 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_post_page"));
1636 /* Reuse this buffer */
1637 rx_msg_p
->free
= B_FALSE
;
1638 rx_msg_p
->cur_usage_cnt
= 0;
1639 rx_msg_p
->max_usage_cnt
= 0;
1640 rx_msg_p
->pkt_buf_size
= 0;
1642 if (rx_rbr_p
->rbr_use_bcopy
) {
1643 rx_msg_p
->rx_use_bcopy
= B_FALSE
;
1644 atomic_dec_32(&rx_rbr_p
->rbr_consumed
);
1648 * Get the rbr header pointer and its offset index.
1650 MUTEX_ENTER(&rx_rbr_p
->post_lock
);
1651 rx_rbr_p
->rbr_wr_index
= ((rx_rbr_p
->rbr_wr_index
+ 1) &
1652 rx_rbr_p
->rbr_wrap_mask
);
1653 rx_rbr_p
->rbr_desc_vp
[rx_rbr_p
->rbr_wr_index
] = rx_msg_p
->shifted_addr
;
1654 MUTEX_EXIT(&rx_rbr_p
->post_lock
);
1655 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep
),
1658 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1659 "<== nxge_post_page (channel %d post_next_index %d)",
1660 rx_rbr_p
->rdc
, rx_rbr_p
->rbr_wr_index
));
1662 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "<== nxge_post_page"));
1666 nxge_freeb(p_rx_msg_t rx_msg_p
)
1669 uchar_t
*buffer
= NULL
;
1671 boolean_t free_state
= B_FALSE
;
1673 rx_rbr_ring_t
*ring
= rx_msg_p
->rx_rbr_p
;
1675 NXGE_DEBUG_MSG((NULL
, MEM2_CTL
, "==> nxge_freeb"));
1676 NXGE_DEBUG_MSG((NULL
, MEM2_CTL
,
1677 "nxge_freeb:rx_msg_p = $%p (block pending %d)",
1678 rx_msg_p
, nxge_mblks_pending
));
1681 * First we need to get the free state, then
1682 * atomic decrement the reference count to prevent
1683 * the race condition with the interrupt thread that
1684 * is processing a loaned up buffer block.
1686 free_state
= rx_msg_p
->free
;
1687 ref_cnt
= atomic_dec_32_nv(&rx_msg_p
->ref_cnt
);
1689 atomic_dec_32(&nxge_mblks_pending
);
1690 buffer
= rx_msg_p
->buffer
;
1691 size
= rx_msg_p
->block_size
;
1692 NXGE_DEBUG_MSG((NULL
, MEM2_CTL
, "nxge_freeb: "
1693 "will free: rx_msg_p = $%p (block pending %d)",
1694 rx_msg_p
, nxge_mblks_pending
));
1696 if (!rx_msg_p
->use_buf_pool
) {
1697 KMEM_FREE(buffer
, size
);
1700 KMEM_FREE(rx_msg_p
, sizeof (rx_msg_t
));
1704 * Decrement the receive buffer ring's reference
1707 atomic_dec_32(&ring
->rbr_ref_cnt
);
1710 * Free the receive buffer ring, if
1711 * 1. all the receive buffers have been freed
1712 * 2. and we are in the proper state (that is,
1713 * we are not UNMAPPING).
1715 if (ring
->rbr_ref_cnt
== 0 &&
1716 ring
->rbr_state
== RBR_UNMAPPED
) {
1718 * Free receive data buffers,
1719 * buffer index information
1721 * the message block ring.
1723 NXGE_DEBUG_MSG((NULL
, RX_CTL
,
1724 "nxge_freeb:rx_msg_p = $%p "
1725 "(block pending %d) free buffers",
1726 rx_msg_p
, nxge_mblks_pending
));
1727 nxge_rxdma_databuf_free(ring
);
1728 if (ring
->ring_info
) {
1729 KMEM_FREE(ring
->ring_info
,
1730 sizeof (rxring_info_t
));
1733 if (ring
->rx_msg_ring
) {
1734 KMEM_FREE(ring
->rx_msg_ring
,
1736 sizeof (p_rx_msg_t
));
1738 KMEM_FREE(ring
, sizeof (*ring
));
1747 if (free_state
&& (ref_cnt
== 1) && ring
) {
1748 NXGE_DEBUG_MSG((NULL
, RX_CTL
,
1749 "nxge_freeb: post page $%p:", rx_msg_p
));
1750 if (ring
->rbr_state
== RBR_POSTING
)
1751 nxge_post_page(rx_msg_p
->nxgep
, ring
, rx_msg_p
);
1754 NXGE_DEBUG_MSG((NULL
, MEM2_CTL
, "<== nxge_freeb"));
1758 nxge_rx_intr(void *arg1
, void *arg2
)
1760 p_nxge_ldv_t ldvp
= (p_nxge_ldv_t
)arg1
;
1761 p_nxge_t nxgep
= (p_nxge_t
)arg2
;
1764 npi_handle_t handle
;
1765 rx_dma_ctl_stat_t cs
;
1766 p_rx_rcr_ring_t rcrp
;
1770 NXGE_DEBUG_MSG((NULL
, INT_CTL
,
1771 "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1773 return (DDI_INTR_CLAIMED
);
1776 if (arg2
== NULL
|| (void *)ldvp
->nxgep
!= arg2
) {
1777 nxgep
= ldvp
->nxgep
;
1780 if ((!(nxgep
->drv_state
& STATE_HW_INITIALIZED
)) ||
1781 (nxgep
->nxge_mac_state
!= NXGE_MAC_STARTED
)) {
1782 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
1783 "<== nxge_rx_intr: interface not started or intialized"));
1784 return (DDI_INTR_CLAIMED
);
1787 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1788 "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1792 * Get the PIO handle.
1794 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
1797 * Get the ring to enable us to process packets.
1799 rcrp
= nxgep
->rx_rcr_rings
->rcr_rings
[ldvp
->vdma_index
];
1802 * The RCR ring lock must be held when packets
1803 * are being processed and the hardware registers are
1804 * being read or written to prevent race condition
1805 * among the interrupt thread, the polling thread
1806 * (will cause fatal errors such as rcrincon bit set)
1807 * and the setting of the poll_flag.
1809 MUTEX_ENTER(&rcrp
->lock
);
1812 * Get the control and status for this channel.
1814 channel
= ldvp
->channel
;
1817 if (!isLDOMguest(nxgep
) && (!rcrp
->started
)) {
1818 NXGE_DEBUG_MSG((nxgep
, INT_CTL
,
1819 "<== nxge_rx_intr: channel is not started"));
1822 * We received an interrupt before the ring is started.
1824 RXDMA_REG_READ64(handle
, RX_DMA_CTL_STAT_REG
, channel
,
1826 cs
.value
&= RX_DMA_CTL_STAT_WR1C
;
1827 cs
.bits
.hdw
.mex
= 1;
1828 RXDMA_REG_WRITE64(handle
, RX_DMA_CTL_STAT_REG
, channel
,
1832 * Rearm this logical group if this is a single device
1835 if (ldgp
->nldvs
== 1) {
1836 if (isLDOMguest(nxgep
)) {
1837 nxge_hio_ldgimgn(nxgep
, ldgp
);
1842 mgm
.bits
.ldw
.arm
= 1;
1843 mgm
.bits
.ldw
.timer
= ldgp
->ldg_timer
;
1845 NXGE_REG_WR64(handle
,
1846 LDGIMGN_REG
+ LDSV_OFFSET(ldgp
->ldg
),
1850 MUTEX_EXIT(&rcrp
->lock
);
1851 return (DDI_INTR_CLAIMED
);
1854 ASSERT(rcrp
->ldgp
== ldgp
);
1855 ASSERT(rcrp
->ldvp
== ldvp
);
1857 RXDMA_REG_READ64(handle
, RX_DMA_CTL_STAT_REG
, channel
, &cs
.value
);
1859 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_rx_intr:channel %d "
1860 "cs 0x%016llx rcrto 0x%x rcrthres %x",
1864 cs
.bits
.hdw
.rcrthres
));
1866 if (!rcrp
->poll_flag
) {
1867 mp
= nxge_rx_pkts(nxgep
, rcrp
, cs
, -1);
1871 if (cs
.value
& RX_DMA_CTL_STAT_ERROR
) {
1872 (void) nxge_rx_err_evnts(nxgep
, channel
, cs
);
1876 * Enable the mailbox update interrupt if we want
1877 * to use mailbox. We probably don't need to use
1878 * mailbox as it only saves us one pio read.
1879 * Also write 1 to rcrthres and rcrto to clear
1880 * these two edge triggered bits.
1882 cs
.value
&= RX_DMA_CTL_STAT_WR1C
;
1883 cs
.bits
.hdw
.mex
= rcrp
->poll_flag
? 0 : 1;
1884 RXDMA_REG_WRITE64(handle
, RX_DMA_CTL_STAT_REG
, channel
,
1888 * If the polling mode is enabled, disable the interrupt.
1890 if (rcrp
->poll_flag
) {
1891 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
1892 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1893 "(disabling interrupts)", channel
, ldgp
, ldvp
));
1896 * Disarm this logical group if this is a single device
1899 if (ldgp
->nldvs
== 1) {
1900 if (isLDOMguest(nxgep
)) {
1901 ldgp
->arm
= B_FALSE
;
1902 nxge_hio_ldgimgn(nxgep
, ldgp
);
1906 mgm
.bits
.ldw
.arm
= 0;
1907 NXGE_REG_WR64(handle
,
1908 LDGIMGN_REG
+ LDSV_OFFSET(ldgp
->ldg
),
1914 * Rearm this logical group if this is a single device
1917 if (ldgp
->nldvs
== 1) {
1918 if (isLDOMguest(nxgep
)) {
1919 nxge_hio_ldgimgn(nxgep
, ldgp
);
1924 mgm
.bits
.ldw
.arm
= 1;
1925 mgm
.bits
.ldw
.timer
= ldgp
->ldg_timer
;
1927 NXGE_REG_WR64(handle
,
1928 LDGIMGN_REG
+ LDSV_OFFSET(ldgp
->ldg
),
1933 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
1934 "==> nxge_rx_intr: rdc %d ldgp $%p "
1935 "exiting ISR (and call mac_rx_ring)", channel
, ldgp
));
1937 MUTEX_EXIT(&rcrp
->lock
);
1940 mac_rx_ring(nxgep
->mach
, rcrp
->rcr_mac_handle
, mp
,
1943 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1944 return (DDI_INTR_CLAIMED
);
1948 * This routine is the main packet receive processing function.
1949 * It gets the packet type, error code, and buffer related
1950 * information from the receive completion entry.
1951 * How many completion entries to process is based on the number of packets
1952 * queued by the hardware, a hardware maintained tail pointer
1953 * and a configurable receive packet count.
1955 * A chain of message blocks will be created as result of processing
1956 * the completion entries. This chain of message blocks will be returned and
1957 * a hardware control status register will be updated with the number of
1958 * packets were removed from the hardware queue.
1960 * The RCR ring lock is held when entering this function.
1963 nxge_rx_pkts(p_nxge_t nxgep
, p_rx_rcr_ring_t rcr_p
, rx_dma_ctl_stat_t cs
,
1964 int bytes_to_pickup
)
1966 npi_handle_t handle
;
1968 uint32_t comp_rd_index
;
1969 p_rcr_entry_t rcr_desc_rd_head_p
;
1970 p_rcr_entry_t rcr_desc_rd_head_pp
;
1971 p_mblk_t nmp
, mp_cont
, head_mp
, *tail_mp
;
1972 uint16_t qlen
, nrcr_read
, npkt_read
;
1975 rcrcfig_b_t rcr_cfg_b
;
1977 #if defined(_BIG_ENDIAN)
1978 npi_status_t rs
= NPI_SUCCESS
;
1981 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
, "==> nxge_rx_pkts: "
1982 "channel %d", rcr_p
->rdc
));
1984 if (!(nxgep
->drv_state
& STATE_HW_INITIALIZED
)) {
1987 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
1988 channel
= rcr_p
->rdc
;
1990 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
1991 "==> nxge_rx_pkts: START: rcr channel %d "
1992 "head_p $%p head_pp $%p index %d ",
1993 channel
, rcr_p
->rcr_desc_rd_head_p
,
1994 rcr_p
->rcr_desc_rd_head_pp
,
1995 rcr_p
->comp_rd_index
));
1998 #if !defined(_BIG_ENDIAN)
1999 qlen
= RXDMA_REG_READ32(handle
, RCRSTAT_A_REG
, channel
) & 0xffff;
2001 rs
= npi_rxdma_rdc_rcr_qlen_get(handle
, channel
, &qlen
);
2002 if (rs
!= NPI_SUCCESS
) {
2003 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_rx_pkts: "
2004 "channel %d, get qlen failed 0x%08x",
2009 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_rx_pkts:rcr channel %d "
2010 "qlen %d", channel
, qlen
));
2015 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2016 "==> nxge_rx_pkts:rcr channel %d "
2017 "qlen %d (no pkts)", channel
, qlen
));
2022 comp_rd_index
= rcr_p
->comp_rd_index
;
2024 rcr_desc_rd_head_p
= rcr_p
->rcr_desc_rd_head_p
;
2025 rcr_desc_rd_head_pp
= rcr_p
->rcr_desc_rd_head_pp
;
2026 nrcr_read
= npkt_read
= 0;
2029 * Number of packets queued
2030 * (The jumbo or multi packet will be counted as only one
2031 * packets and it may take up more than one completion entry).
2033 qlen_hw
= (qlen
< nxge_max_rx_pkts
) ?
2034 qlen
: nxge_max_rx_pkts
;
2037 nmp
= mp_cont
= NULL
;
2043 nxge_dump_rcr_entry(nxgep
, rcr_desc_rd_head_p
);
2046 * Process one completion ring entry.
2048 nxge_receive_packet(nxgep
,
2049 rcr_p
, rcr_desc_rd_head_p
, &multi
, &nmp
, &mp_cont
);
2052 * message chaining modes
2056 if (!multi
&& !mp_cont
) { /* frame fits a partition */
2058 tail_mp
= &nmp
->b_next
;
2059 totallen
+= MBLKL(nmp
);
2061 } else if (multi
&& !mp_cont
) { /* first segment */
2063 tail_mp
= &nmp
->b_cont
;
2064 totallen
+= MBLKL(nmp
);
2065 } else if (multi
&& mp_cont
) { /* mid of multi segs */
2067 tail_mp
= &mp_cont
->b_cont
;
2068 totallen
+= MBLKL(mp_cont
);
2069 } else if (!multi
&& mp_cont
) { /* last segment */
2071 tail_mp
= &nmp
->b_next
;
2072 totallen
+= MBLKL(mp_cont
);
2076 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2077 "==> nxge_rx_pkts: loop: rcr channel %d "
2078 "before updating: multi %d "
2081 "head_pp $%p index %d ",
2084 nrcr_read
, npkt_read
, rcr_desc_rd_head_pp
,
2093 * Update the next read entry.
2095 comp_rd_index
= NEXT_ENTRY(comp_rd_index
,
2096 rcr_p
->comp_wrap_mask
);
2098 rcr_desc_rd_head_p
= NEXT_ENTRY_PTR(rcr_desc_rd_head_p
,
2099 rcr_p
->rcr_desc_first_p
,
2100 rcr_p
->rcr_desc_last_p
);
2104 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2105 "<== nxge_rx_pkts: (SAM, process one packet) "
2108 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2109 "==> nxge_rx_pkts: loop: rcr channel %d "
2113 "head_pp $%p index %d ",
2116 nrcr_read
, npkt_read
, rcr_desc_rd_head_pp
,
2119 if ((bytes_to_pickup
!= -1) &&
2120 (totallen
>= bytes_to_pickup
)) {
2125 rcr_p
->rcr_desc_rd_head_pp
= rcr_desc_rd_head_pp
;
2126 rcr_p
->comp_rd_index
= comp_rd_index
;
2127 rcr_p
->rcr_desc_rd_head_p
= rcr_desc_rd_head_p
;
2128 if ((nxgep
->intr_timeout
!= rcr_p
->intr_timeout
) ||
2129 (nxgep
->intr_threshold
!= rcr_p
->intr_threshold
)) {
2131 rcr_p
->intr_timeout
= (nxgep
->intr_timeout
<
2132 NXGE_RDC_RCR_TIMEOUT_MIN
) ? NXGE_RDC_RCR_TIMEOUT_MIN
:
2133 nxgep
->intr_timeout
;
2135 rcr_p
->intr_threshold
= (nxgep
->intr_threshold
<
2136 NXGE_RDC_RCR_THRESHOLD_MIN
) ? NXGE_RDC_RCR_THRESHOLD_MIN
:
2137 nxgep
->intr_threshold
;
2139 rcr_cfg_b
.value
= 0x0ULL
;
2140 rcr_cfg_b
.bits
.ldw
.entout
= 1;
2141 rcr_cfg_b
.bits
.ldw
.timeout
= rcr_p
->intr_timeout
;
2142 rcr_cfg_b
.bits
.ldw
.pthres
= rcr_p
->intr_threshold
;
2144 RXDMA_REG_WRITE64(handle
, RCRCFIG_B_REG
,
2145 channel
, rcr_cfg_b
.value
);
2148 cs
.bits
.ldw
.pktread
= npkt_read
;
2149 cs
.bits
.ldw
.ptrread
= nrcr_read
;
2150 RXDMA_REG_WRITE64(handle
, RX_DMA_CTL_STAT_REG
,
2152 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2153 "==> nxge_rx_pkts: EXIT: rcr channel %d "
2154 "head_pp $%p index %016llx ",
2156 rcr_p
->rcr_desc_rd_head_pp
,
2157 rcr_p
->comp_rd_index
));
2159 * Update RCR buffer pointer read and number of packets
2163 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
, "<== nxge_rx_pkts: return"
2164 "channel %d", rcr_p
->rdc
));
2170 nxge_receive_packet(p_nxge_t nxgep
,
2171 p_rx_rcr_ring_t rcr_p
, p_rcr_entry_t rcr_desc_rd_head_p
,
2172 boolean_t
*multi_p
, mblk_t
**mp
, mblk_t
**mp_cont
)
2174 p_mblk_t nmp
= NULL
;
2179 boolean_t first_entry
= B_TRUE
;
2180 boolean_t is_tcp_udp
= B_FALSE
;
2181 boolean_t buffer_free
= B_FALSE
;
2182 boolean_t error_send_up
= B_FALSE
;
2186 uint8_t pktbufsz_type
;
2188 uint64_t *pkt_buf_addr_pp
;
2189 uint64_t *pkt_buf_addr_p
;
2190 uint32_t buf_offset
;
2192 uint32_t error_disp_cnt
;
2194 p_rx_rbr_ring_t rx_rbr_p
;
2195 p_rx_msg_t
*rx_msg_ring_p
;
2196 p_rx_msg_t rx_msg_p
;
2197 uint16_t sw_offset_bytes
= 0, hdr_size
= 0;
2198 nxge_status_t status
= NXGE_OK
;
2199 boolean_t is_valid
= B_FALSE
;
2200 p_nxge_rx_ring_stats_t rdc_stats
;
2201 uint32_t bytes_read
;
2204 boolean_t pkt_too_long_err
= B_FALSE
;
2208 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
, "==> nxge_receive_packet"));
2209 first_entry
= (*mp
== NULL
) ? B_TRUE
: B_FALSE
;
2211 rcr_entry
= *((uint64_t *)rcr_desc_rd_head_p
);
2213 multi
= (rcr_entry
& RCR_MULTI_MASK
);
2214 dcf_err
= (rcr_entry
& RCR_DCF_ERROR_MASK
);
2215 pkt_type
= (rcr_entry
& RCR_PKT_TYPE_MASK
);
2217 error_type
= ((rcr_entry
& RCR_ERROR_MASK
) >> RCR_ERROR_SHIFT
);
2218 frag
= (rcr_entry
& RCR_FRAG_MASK
);
2220 l2_len
= ((rcr_entry
& RCR_L2_LEN_MASK
) >> RCR_L2_LEN_SHIFT
);
2222 pktbufsz_type
= ((rcr_entry
& RCR_PKTBUFSZ_MASK
) >>
2223 RCR_PKTBUFSZ_SHIFT
);
2225 pkt_buf_addr_pp
= (uint64_t *)(uint32_t)((rcr_entry
&
2226 RCR_PKT_BUF_ADDR_MASK
) << RCR_PKT_BUF_ADDR_SHIFT
);
2228 pkt_buf_addr_pp
= (uint64_t *)((rcr_entry
& RCR_PKT_BUF_ADDR_MASK
) <<
2229 RCR_PKT_BUF_ADDR_SHIFT
);
2232 channel
= rcr_p
->rdc
;
2234 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2235 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2236 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2237 "error_type 0x%x pkt_type 0x%x "
2238 "pktbufsz_type %d ",
2240 rcr_entry
, pkt_buf_addr_pp
, l2_len
,
2246 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2247 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2248 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2249 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p
,
2250 rcr_entry
, pkt_buf_addr_pp
, l2_len
,
2255 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2256 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2257 "full pkt_buf_addr_pp $%p l2_len %d",
2258 rcr_entry
, pkt_buf_addr_pp
, l2_len
));
2260 /* get the stats ptr */
2261 rdc_stats
= rcr_p
->rdc_stats
;
2265 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2266 "<== nxge_receive_packet: failed: l2 length is 0."));
2271 * Software workaround for BMAC hardware limitation that allows
2272 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2273 * instead of 0x2400 for jumbo.
2275 if (l2_len
> nxgep
->mac
.maxframesize
) {
2276 pkt_too_long_err
= B_TRUE
;
2279 /* Hardware sends us 4 bytes of CRC as no stripping is done. */
2280 l2_len
-= ETHERFCSL
;
2282 /* shift 6 bits to get the full io address */
2284 pkt_buf_addr_pp
= (uint64_t *)((uint32_t)pkt_buf_addr_pp
<<
2285 RCR_PKT_BUF_ADDR_SHIFT_FULL
);
2287 pkt_buf_addr_pp
= (uint64_t *)((uint64_t)pkt_buf_addr_pp
<<
2288 RCR_PKT_BUF_ADDR_SHIFT_FULL
);
2290 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2291 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2292 "full pkt_buf_addr_pp $%p l2_len %d",
2293 rcr_entry
, pkt_buf_addr_pp
, l2_len
));
2295 rx_rbr_p
= rcr_p
->rx_rbr_p
;
2296 rx_msg_ring_p
= rx_rbr_p
->rx_msg_ring
;
2299 hdr_size
= (rcr_p
->full_hdr_flag
? RXDMA_HDR_SIZE_FULL
:
2300 RXDMA_HDR_SIZE_DEFAULT
);
2302 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2303 "==> nxge_receive_packet: first entry 0x%016llx "
2304 "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2305 rcr_entry
, pkt_buf_addr_pp
, l2_len
,
2309 MUTEX_ENTER(&rx_rbr_p
->lock
);
2311 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2312 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2313 "full pkt_buf_addr_pp $%p l2_len %d",
2314 rcr_entry
, pkt_buf_addr_pp
, l2_len
));
2317 * Packet buffer address in the completion entry points
2318 * to the starting buffer address (offset 0).
2319 * Use the starting buffer address to locate the corresponding
2322 status
= nxge_rxbuf_pp_to_vp(nxgep
, rx_rbr_p
,
2323 pktbufsz_type
, pkt_buf_addr_pp
, &pkt_buf_addr_p
,
2327 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2328 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2329 "full pkt_buf_addr_pp $%p l2_len %d",
2330 rcr_entry
, pkt_buf_addr_pp
, l2_len
));
2332 if (status
!= NXGE_OK
) {
2333 MUTEX_EXIT(&rx_rbr_p
->lock
);
2334 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2335 "<== nxge_receive_packet: found vaddr failed %d",
2340 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2341 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
2342 "full pkt_buf_addr_pp $%p l2_len %d",
2343 rcr_entry
, pkt_buf_addr_pp
, l2_len
));
2345 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2346 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2347 "full pkt_buf_addr_pp $%p l2_len %d",
2348 msg_index
, rcr_entry
, pkt_buf_addr_pp
, l2_len
));
2350 rx_msg_p
= rx_msg_ring_p
[msg_index
];
2352 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2353 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2354 "full pkt_buf_addr_pp $%p l2_len %d",
2355 msg_index
, rcr_entry
, pkt_buf_addr_pp
, l2_len
));
2357 switch (pktbufsz_type
) {
2358 case RCR_PKTBUFSZ_0
:
2359 bsize
= rx_rbr_p
->pkt_buf_size0_bytes
;
2360 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2361 "==> nxge_receive_packet: 0 buf %d", bsize
));
2363 case RCR_PKTBUFSZ_1
:
2364 bsize
= rx_rbr_p
->pkt_buf_size1_bytes
;
2365 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2366 "==> nxge_receive_packet: 1 buf %d", bsize
));
2368 case RCR_PKTBUFSZ_2
:
2369 bsize
= rx_rbr_p
->pkt_buf_size2_bytes
;
2370 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2371 "==> nxge_receive_packet: 2 buf %d", bsize
));
2373 case RCR_SINGLE_BLOCK
:
2374 bsize
= rx_msg_p
->block_size
;
2375 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2376 "==> nxge_receive_packet: single %d", bsize
));
2380 MUTEX_EXIT(&rx_rbr_p
->lock
);
2384 switch (nxge_rdc_buf_offset
) {
2385 case SW_OFFSET_NO_OFFSET
:
2386 sw_offset_bytes
= 0;
2389 sw_offset_bytes
= 64;
2392 sw_offset_bytes
= 128;
2395 sw_offset_bytes
= 192;
2398 sw_offset_bytes
= 256;
2401 sw_offset_bytes
= 320;
2404 sw_offset_bytes
= 384;
2407 sw_offset_bytes
= 448;
2410 sw_offset_bytes
= 0;
2414 DMA_COMMON_SYNC_OFFSET(rx_msg_p
->buf_dma
,
2415 (buf_offset
+ sw_offset_bytes
),
2416 (hdr_size
+ l2_len
),
2417 DDI_DMA_SYNC_FORCPU
);
2419 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2420 "==> nxge_receive_packet: after first dump:usage count"));
2422 if (rx_msg_p
->cur_usage_cnt
== 0) {
2423 if (rx_rbr_p
->rbr_use_bcopy
) {
2424 atomic_inc_32(&rx_rbr_p
->rbr_consumed
);
2425 if (rx_rbr_p
->rbr_consumed
<
2426 rx_rbr_p
->rbr_threshold_hi
) {
2427 if (rx_rbr_p
->rbr_threshold_lo
== 0 ||
2428 ((rx_rbr_p
->rbr_consumed
>=
2429 rx_rbr_p
->rbr_threshold_lo
) &&
2430 (rx_rbr_p
->rbr_bufsize_type
>=
2432 rx_msg_p
->rx_use_bcopy
= B_TRUE
;
2435 rx_msg_p
->rx_use_bcopy
= B_TRUE
;
2438 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2439 "==> nxge_receive_packet: buf %d (new block) ",
2442 rx_msg_p
->pkt_buf_size_code
= pktbufsz_type
;
2443 rx_msg_p
->pkt_buf_size
= bsize
;
2444 rx_msg_p
->cur_usage_cnt
= 1;
2445 if (pktbufsz_type
== RCR_SINGLE_BLOCK
) {
2446 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2447 "==> nxge_receive_packet: buf %d "
2451 * Buffer can be reused once the free function
2454 rx_msg_p
->max_usage_cnt
= 1;
2455 buffer_free
= B_TRUE
;
2457 rx_msg_p
->max_usage_cnt
= rx_msg_p
->block_size
/bsize
;
2458 if (rx_msg_p
->max_usage_cnt
== 1) {
2459 buffer_free
= B_TRUE
;
2463 rx_msg_p
->cur_usage_cnt
++;
2464 if (rx_msg_p
->cur_usage_cnt
== rx_msg_p
->max_usage_cnt
) {
2465 buffer_free
= B_TRUE
;
2469 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2470 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
2472 rx_msg_p
->cur_usage_cnt
, rx_msg_p
->max_usage_cnt
));
2474 if ((error_type
) || (dcf_err
) || (pkt_too_long_err
)) {
2475 rdc_stats
->ierrors
++;
2477 rdc_stats
->dcf_err
++;
2479 if (!rdc_stats
->dcf_err
) {
2480 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2481 "nxge_receive_packet: channel %d dcf_err rcr"
2482 " 0x%llx", channel
, rcr_entry
));
2485 NXGE_FM_REPORT_ERROR(nxgep
, nxgep
->mac
.portnum
, NULL
,
2486 NXGE_FM_EREPORT_RDMC_DCF_ERR
);
2487 } else if (pkt_too_long_err
) {
2488 rdc_stats
->pkt_too_long_err
++;
2489 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, " nxge_receive_packet:"
2490 " channel %d packet length [%d] > "
2491 "maxframesize [%d]", channel
, l2_len
+ ETHERFCSL
,
2492 nxgep
->mac
.maxframesize
));
2494 /* Update error stats */
2495 error_disp_cnt
= NXGE_ERROR_SHOW_MAX
;
2496 rdc_stats
->errlog
.compl_err_type
= error_type
;
2498 switch (error_type
) {
2500 * Do not send FMA ereport for RCR_L2_ERROR and
2501 * RCR_L4_CSUM_ERROR because most likely they indicate
2502 * back pressure rather than HW failures.
2505 rdc_stats
->l2_err
++;
2506 if (rdc_stats
->l2_err
<
2508 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2509 " nxge_receive_packet:"
2510 " channel %d RCR L2_ERROR",
2514 case RCR_L4_CSUM_ERROR
:
2515 error_send_up
= B_TRUE
;
2516 rdc_stats
->l4_cksum_err
++;
2517 if (rdc_stats
->l4_cksum_err
<
2519 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2520 " nxge_receive_packet:"
2522 " RCR L4_CSUM_ERROR", channel
));
2526 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2527 * RCR_ZCP_SOFT_ERROR because they reflect the same
2528 * FFLP and ZCP errors that have been reported by
2529 * nxge_fflp.c and nxge_zcp.c.
2531 case RCR_FFLP_SOFT_ERROR
:
2532 error_send_up
= B_TRUE
;
2533 rdc_stats
->fflp_soft_err
++;
2534 if (rdc_stats
->fflp_soft_err
<
2536 NXGE_ERROR_MSG((nxgep
,
2538 " nxge_receive_packet:"
2540 " RCR FFLP_SOFT_ERROR", channel
));
2543 case RCR_ZCP_SOFT_ERROR
:
2544 error_send_up
= B_TRUE
;
2545 rdc_stats
->fflp_soft_err
++;
2546 if (rdc_stats
->zcp_soft_err
<
2548 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2549 " nxge_receive_packet: Channel %d"
2550 " RCR ZCP_SOFT_ERROR", channel
));
2553 rdc_stats
->rcr_unknown_err
++;
2554 if (rdc_stats
->rcr_unknown_err
2556 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2557 " nxge_receive_packet: Channel %d"
2558 " RCR entry 0x%llx error 0x%x",
2559 rcr_entry
, channel
, error_type
));
2566 * Update and repost buffer block if max usage
2569 if (error_send_up
== B_FALSE
) {
2570 atomic_inc_32(&rx_msg_p
->ref_cnt
);
2571 if (buffer_free
== B_TRUE
) {
2572 rx_msg_p
->free
= B_TRUE
;
2575 MUTEX_EXIT(&rx_rbr_p
->lock
);
2576 nxge_freeb(rx_msg_p
);
2581 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2582 "==> nxge_receive_packet: DMA sync second "));
2584 bytes_read
= rcr_p
->rcvd_pkt_bytes
;
2585 skip_len
= sw_offset_bytes
+ hdr_size
;
2586 if (!rx_msg_p
->rx_use_bcopy
) {
2588 * For loaned up buffers, the driver reference count
2589 * will be incremented first and then the free state.
2591 if ((nmp
= nxge_dupb(rx_msg_p
, buf_offset
, bsize
)) != NULL
) {
2593 nmp
->b_rptr
= &nmp
->b_rptr
[skip_len
];
2594 if (l2_len
< bsize
- skip_len
) {
2595 nmp
->b_wptr
= &nmp
->b_rptr
[l2_len
];
2597 nmp
->b_wptr
= &nmp
->b_rptr
[bsize
2601 if (l2_len
- bytes_read
< bsize
) {
2603 &nmp
->b_rptr
[l2_len
- bytes_read
];
2605 nmp
->b_wptr
= &nmp
->b_rptr
[bsize
];
2611 nmp
= nxge_dupb_bcopy(rx_msg_p
, buf_offset
+ skip_len
,
2612 l2_len
< bsize
- skip_len
?
2613 l2_len
: bsize
- skip_len
);
2615 nmp
= nxge_dupb_bcopy(rx_msg_p
, buf_offset
,
2616 l2_len
- bytes_read
< bsize
?
2617 l2_len
- bytes_read
: bsize
);
2623 * Jumbo packets may be received with more than one
2624 * buffer, increment ipackets for the first entry only.
2626 rdc_stats
->ipackets
++;
2628 /* Update ibytes for kstat. */
2629 rdc_stats
->ibytes
+= skip_len
2630 + l2_len
< bsize
? l2_len
: bsize
;
2632 * Update the number of bytes read so far for the
2635 bytes_read
= nmp
->b_wptr
- nmp
->b_rptr
;
2637 rdc_stats
->ibytes
+= l2_len
- bytes_read
< bsize
?
2638 l2_len
- bytes_read
: bsize
;
2639 bytes_read
+= nmp
->b_wptr
- nmp
->b_rptr
;
2642 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2643 "==> nxge_receive_packet after dupb: "
2646 "nmp $%p rptr $%p wptr $%p "
2647 "buf_offset %d bzise %d l2_len %d skip_len %d",
2648 rx_rbr_p
->rbr_consumed
,
2650 nmp
, nmp
->b_rptr
, nmp
->b_wptr
,
2651 buf_offset
, bsize
, l2_len
, skip_len
));
2653 cmn_err(CE_WARN
, "!nxge_receive_packet: "
2654 "update stats (error)");
2655 atomic_inc_32(&rx_msg_p
->ref_cnt
);
2656 if (buffer_free
== B_TRUE
) {
2657 rx_msg_p
->free
= B_TRUE
;
2659 MUTEX_EXIT(&rx_rbr_p
->lock
);
2660 nxge_freeb(rx_msg_p
);
2664 if (buffer_free
== B_TRUE
) {
2665 rx_msg_p
->free
= B_TRUE
;
2668 is_valid
= (nmp
!= NULL
);
2670 rcr_p
->rcvd_pkt_bytes
= bytes_read
;
2672 MUTEX_EXIT(&rx_rbr_p
->lock
);
2674 if (rx_msg_p
->free
&& rx_msg_p
->rx_use_bcopy
) {
2675 atomic_inc_32(&rx_msg_p
->ref_cnt
);
2676 nxge_freeb(rx_msg_p
);
2690 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2691 * If a packet is not fragmented and no error bit is set, then
2692 * L4 checksum is OK.
2695 if (is_valid
&& !multi
) {
2697 * If the checksum flag nxge_chksum_offload
2698 * is 1, TCP and UDP packets can be sent
2699 * up with good checksum. If the checksum flag
2700 * is set to 0, checksum reporting will apply to
2701 * TCP packets only (workaround for a hardware bug).
2702 * If the checksum flag nxge_cksum_offload is
2703 * greater than 1, both TCP and UDP packets
2704 * will not be reported its hardware checksum results.
2706 if (nxge_cksum_offload
== 1) {
2707 is_tcp_udp
= ((pkt_type
== RCR_PKT_IS_TCP
||
2708 pkt_type
== RCR_PKT_IS_UDP
) ?
2710 } else if (!nxge_cksum_offload
) {
2711 /* TCP checksum only. */
2712 is_tcp_udp
= ((pkt_type
== RCR_PKT_IS_TCP
) ?
2716 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_receive_packet: "
2717 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
2718 is_valid
, multi
, is_tcp_udp
, frag
, error_type
));
2720 if (is_tcp_udp
&& !frag
&& !error_type
) {
2721 mac_hcksum_set(nmp
, 0, 0, 0, 0, HCK_FULLCKSUM_OK
);
2722 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
2723 "==> nxge_receive_packet: Full tcp/udp cksum "
2724 "is_valid 0x%x multi 0x%llx pkt %d frag %d "
2726 is_valid
, multi
, is_tcp_udp
, frag
, error_type
));
2730 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
,
2731 "==> nxge_receive_packet: *mp 0x%016llx", *mp
));
2733 *multi_p
= (multi
== RCR_MULTI_MASK
);
2734 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "<== nxge_receive_packet: "
2735 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2736 *multi_p
, nmp
, *mp
, *mp_cont
));
2740 * Enable polling for a ring. Interrupt for the ring is disabled when
2741 * the nxge interrupt comes (see nxge_rx_intr).
2744 nxge_enable_poll(void *arg
)
2746 p_nxge_ring_handle_t ring_handle
= (p_nxge_ring_handle_t
)arg
;
2747 p_rx_rcr_ring_t ringp
;
2752 if (ring_handle
== NULL
) {
2753 ASSERT(ring_handle
!= NULL
);
2757 nxgep
= ring_handle
->nxgep
;
2758 channel
= nxgep
->pt_config
.hw_config
.start_rdc
+ ring_handle
->index
;
2759 ringp
= nxgep
->rx_rcr_rings
->rcr_rings
[channel
];
2760 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2761 "==> nxge_enable_poll: rdc %d ", ringp
->rdc
));
2764 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2765 "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2770 MUTEX_ENTER(&ringp
->lock
);
2771 /* enable polling */
2772 if (ringp
->poll_flag
== 0) {
2773 ringp
->poll_flag
= 1;
2774 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2775 "==> nxge_enable_poll: rdc %d set poll flag to 1",
2779 MUTEX_EXIT(&ringp
->lock
);
2783 * Disable polling for a ring and enable its interrupt.
2786 nxge_disable_poll(void *arg
)
2788 p_nxge_ring_handle_t ring_handle
= (p_nxge_ring_handle_t
)arg
;
2789 p_rx_rcr_ring_t ringp
;
2793 if (ring_handle
== NULL
) {
2794 ASSERT(ring_handle
!= NULL
);
2798 nxgep
= ring_handle
->nxgep
;
2799 channel
= nxgep
->pt_config
.hw_config
.start_rdc
+ ring_handle
->index
;
2800 ringp
= nxgep
->rx_rcr_rings
->rcr_rings
[channel
];
2802 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2803 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp
->rdc
));
2805 MUTEX_ENTER(&ringp
->lock
);
2807 /* disable polling: enable interrupt */
2808 if (ringp
->poll_flag
) {
2809 npi_handle_t handle
;
2810 rx_dma_ctl_stat_t cs
;
2815 * Get the control and status for this channel.
2817 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
2818 channel
= ringp
->rdc
;
2819 RXDMA_REG_READ64(handle
, RX_DMA_CTL_STAT_REG
,
2820 channel
, &cs
.value
);
2823 * Enable mailbox update
2824 * Since packets were not read and the hardware uses
2825 * bits pktread and ptrread to update the queue
2826 * length, we need to set both bits to 0.
2828 cs
.bits
.ldw
.pktread
= 0;
2829 cs
.bits
.ldw
.ptrread
= 0;
2830 cs
.bits
.hdw
.mex
= 1;
2831 RXDMA_REG_WRITE64(handle
, RX_DMA_CTL_STAT_REG
, channel
,
2835 * Rearm this logical group if this is a single device
2840 ringp
->poll_flag
= 0;
2841 MUTEX_EXIT(&ringp
->lock
);
2842 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2843 "==> nxge_disable_poll: no ldgp rdc %d "
2844 "(still set poll to 0", ringp
->rdc
));
2847 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2848 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2850 if (ldgp
->nldvs
== 1) {
2851 if (isLDOMguest(nxgep
)) {
2853 nxge_hio_ldgimgn(nxgep
, ldgp
);
2857 mgm
.bits
.ldw
.arm
= 1;
2858 mgm
.bits
.ldw
.timer
= ldgp
->ldg_timer
;
2859 NXGE_REG_WR64(handle
,
2860 LDGIMGN_REG
+ LDSV_OFFSET(ldgp
->ldg
),
2864 ringp
->poll_flag
= 0;
2867 MUTEX_EXIT(&ringp
->lock
);
2872 * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2875 nxge_rx_poll(void *arg
, int bytes_to_pickup
)
2877 p_nxge_ring_handle_t ring_handle
= (p_nxge_ring_handle_t
)arg
;
2878 p_rx_rcr_ring_t rcr_p
;
2880 npi_handle_t handle
;
2881 rx_dma_ctl_stat_t cs
;
2886 nxgep
= ring_handle
->nxgep
;
2889 * Get the control and status for this channel.
2891 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
2892 channel
= nxgep
->pt_config
.hw_config
.start_rdc
+ ring_handle
->index
;
2893 rcr_p
= nxgep
->rx_rcr_rings
->rcr_rings
[channel
];
2894 MUTEX_ENTER(&rcr_p
->lock
);
2895 ASSERT(rcr_p
->poll_flag
== 1);
2897 RXDMA_REG_READ64(handle
, RX_DMA_CTL_STAT_REG
, rcr_p
->rdc
, &cs
.value
);
2899 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2900 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2901 rcr_p
->rdc
, rcr_p
->poll_flag
));
2902 mblk
= nxge_rx_pkts(nxgep
, rcr_p
, cs
, bytes_to_pickup
);
2906 if (ldvp
&& (cs
.value
& RX_DMA_CTL_STAT_ERROR
)) {
2907 (void) nxge_rx_err_evnts(nxgep
, ldvp
->vdma_index
, cs
);
2910 MUTEX_EXIT(&rcr_p
->lock
);
2912 NXGE_DEBUG_MSG((nxgep
, NXGE_ERR_CTL
,
2913 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p
->rdc
, mblk
));
2919 static nxge_status_t
2920 nxge_rx_err_evnts(p_nxge_t nxgep
, int channel
, rx_dma_ctl_stat_t cs
)
2922 p_nxge_rx_ring_stats_t rdc_stats
;
2923 npi_handle_t handle
;
2925 boolean_t rxchan_fatal
= B_FALSE
;
2926 boolean_t rxport_fatal
= B_FALSE
;
2928 nxge_status_t status
= NXGE_OK
;
2929 uint32_t error_disp_cnt
= NXGE_ERROR_SHOW_MAX
;
2930 NXGE_DEBUG_MSG((nxgep
, INT_CTL
, "==> nxge_rx_err_evnts"));
2932 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
2933 portn
= nxgep
->mac
.portnum
;
2934 rdc_stats
= &nxgep
->statsp
->rdc_stats
[channel
];
2936 if (cs
.bits
.hdw
.rbr_tmout
) {
2937 rdc_stats
->rx_rbr_tmout
++;
2938 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
2939 NXGE_FM_EREPORT_RDMC_RBR_TMOUT
);
2940 rxchan_fatal
= B_TRUE
;
2941 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2942 "==> nxge_rx_err_evnts: rx_rbr_timeout"));
2944 if (cs
.bits
.hdw
.rsp_cnt_err
) {
2945 rdc_stats
->rsp_cnt_err
++;
2946 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
2947 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR
);
2948 rxchan_fatal
= B_TRUE
;
2949 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2950 "==> nxge_rx_err_evnts(channel %d): "
2951 "rsp_cnt_err", channel
));
2953 if (cs
.bits
.hdw
.byte_en_bus
) {
2954 rdc_stats
->byte_en_bus
++;
2955 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
2956 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS
);
2957 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2958 "==> nxge_rx_err_evnts(channel %d): "
2959 "fatal error: byte_en_bus", channel
));
2960 rxchan_fatal
= B_TRUE
;
2962 if (cs
.bits
.hdw
.rsp_dat_err
) {
2963 rdc_stats
->rsp_dat_err
++;
2964 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
2965 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR
);
2966 rxchan_fatal
= B_TRUE
;
2967 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2968 "==> nxge_rx_err_evnts(channel %d): "
2969 "fatal error: rsp_dat_err", channel
));
2971 if (cs
.bits
.hdw
.rcr_ack_err
) {
2972 rdc_stats
->rcr_ack_err
++;
2973 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
2974 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR
);
2975 rxchan_fatal
= B_TRUE
;
2976 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2977 "==> nxge_rx_err_evnts(channel %d): "
2978 "fatal error: rcr_ack_err", channel
));
2980 if (cs
.bits
.hdw
.dc_fifo_err
) {
2981 rdc_stats
->dc_fifo_err
++;
2982 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
2983 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR
);
2984 /* This is not a fatal error! */
2985 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2986 "==> nxge_rx_err_evnts(channel %d): "
2987 "dc_fifo_err", channel
));
2988 rxport_fatal
= B_TRUE
;
2990 if ((cs
.bits
.hdw
.rcr_sha_par
) || (cs
.bits
.hdw
.rbr_pre_par
)) {
2991 if ((rs
= npi_rxdma_ring_perr_stat_get(handle
,
2992 &rdc_stats
->errlog
.pre_par
,
2993 &rdc_stats
->errlog
.sha_par
))
2995 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
2996 "==> nxge_rx_err_evnts(channel %d): "
2997 "rcr_sha_par: get perr", channel
));
2998 return (NXGE_ERROR
| rs
);
3000 if (cs
.bits
.hdw
.rcr_sha_par
) {
3001 rdc_stats
->rcr_sha_par
++;
3002 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
3003 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR
);
3004 rxchan_fatal
= B_TRUE
;
3005 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3006 "==> nxge_rx_err_evnts(channel %d): "
3007 "fatal error: rcr_sha_par", channel
));
3009 if (cs
.bits
.hdw
.rbr_pre_par
) {
3010 rdc_stats
->rbr_pre_par
++;
3011 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
3012 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR
);
3013 rxchan_fatal
= B_TRUE
;
3014 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3015 "==> nxge_rx_err_evnts(channel %d): "
3016 "fatal error: rbr_pre_par", channel
));
3020 * The Following 4 status bits are for information, the system
3021 * is running fine. There is no need to send FMA ereports or
3024 if (cs
.bits
.hdw
.port_drop_pkt
) {
3025 rdc_stats
->port_drop_pkt
++;
3027 if (cs
.bits
.hdw
.wred_drop
) {
3028 rdc_stats
->wred_drop
++;
3030 if (cs
.bits
.hdw
.rbr_pre_empty
) {
3031 rdc_stats
->rbr_pre_empty
++;
3033 if (cs
.bits
.hdw
.rcr_shadow_full
) {
3034 rdc_stats
->rcr_shadow_full
++;
3036 if (cs
.bits
.hdw
.config_err
) {
3037 rdc_stats
->config_err
++;
3038 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
3039 NXGE_FM_EREPORT_RDMC_CONFIG_ERR
);
3040 rxchan_fatal
= B_TRUE
;
3041 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3042 "==> nxge_rx_err_evnts(channel %d): "
3043 "config error", channel
));
3045 if (cs
.bits
.hdw
.rcrincon
) {
3046 rdc_stats
->rcrincon
++;
3047 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
3048 NXGE_FM_EREPORT_RDMC_RCRINCON
);
3049 rxchan_fatal
= B_TRUE
;
3050 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3051 "==> nxge_rx_err_evnts(channel %d): "
3052 "fatal error: rcrincon error", channel
));
3054 if (cs
.bits
.hdw
.rcrfull
) {
3055 rdc_stats
->rcrfull
++;
3056 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
3057 NXGE_FM_EREPORT_RDMC_RCRFULL
);
3058 rxchan_fatal
= B_TRUE
;
3059 if (rdc_stats
->rcrfull
< error_disp_cnt
)
3060 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3061 "==> nxge_rx_err_evnts(channel %d): "
3062 "fatal error: rcrfull error", channel
));
3064 if (cs
.bits
.hdw
.rbr_empty
) {
3066 * This bit is for information, there is no need
3067 * send FMA ereport or log a message.
3069 rdc_stats
->rbr_empty
++;
3071 if (cs
.bits
.hdw
.rbrfull
) {
3072 rdc_stats
->rbrfull
++;
3073 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
3074 NXGE_FM_EREPORT_RDMC_RBRFULL
);
3075 rxchan_fatal
= B_TRUE
;
3076 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3077 "==> nxge_rx_err_evnts(channel %d): "
3078 "fatal error: rbr_full error", channel
));
3080 if (cs
.bits
.hdw
.rbrlogpage
) {
3081 rdc_stats
->rbrlogpage
++;
3082 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
3083 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE
);
3084 rxchan_fatal
= B_TRUE
;
3085 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3086 "==> nxge_rx_err_evnts(channel %d): "
3087 "fatal error: rbr logical page error", channel
));
3089 if (cs
.bits
.hdw
.cfiglogpage
) {
3090 rdc_stats
->cfiglogpage
++;
3091 NXGE_FM_REPORT_ERROR(nxgep
, portn
, channel
,
3092 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE
);
3093 rxchan_fatal
= B_TRUE
;
3094 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3095 "==> nxge_rx_err_evnts(channel %d): "
3096 "fatal error: cfig logical page error", channel
));
3100 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3101 " nxge_rx_err_evnts: fatal error on Port #%d\n",
3103 if (isLDOMguest(nxgep
)) {
3104 status
= NXGE_ERROR
;
3106 status
= nxge_ipp_fatal_err_recover(nxgep
);
3107 if (status
== NXGE_OK
) {
3108 FM_SERVICE_RESTORED(nxgep
);
3114 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3115 " nxge_rx_err_evnts: fatal error on Channel #%d\n",
3117 if (isLDOMguest(nxgep
)) {
3118 status
= NXGE_ERROR
;
3120 status
= nxge_rxdma_fatal_err_recover(nxgep
, channel
);
3121 if (status
== NXGE_OK
) {
3122 FM_SERVICE_RESTORED(nxgep
);
3127 NXGE_DEBUG_MSG((nxgep
, RX2_CTL
, "<== nxge_rx_err_evnts"));
3133 * nxge_rdc_hvio_setup
3135 * This code appears to setup some Hypervisor variables.
3142 * What does NIU_LP_WORKAROUND mean?
3144 * NPI/NXGE function calls:
3150 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3152 nxge_rdc_hvio_setup(
3153 nxge_t
*nxgep
, int channel
)
3155 nxge_dma_common_t
*dma_common
;
3156 nxge_dma_common_t
*dma_control
;
3157 rx_rbr_ring_t
*ring
;
3159 ring
= nxgep
->rx_rbr_rings
->rbr_rings
[channel
];
3160 dma_common
= nxgep
->rx_buf_pool_p
->dma_buf_pool_p
[channel
];
3162 ring
->hv_set
= B_FALSE
;
3164 ring
->hv_rx_buf_base_ioaddr_pp
= (uint64_t)
3165 dma_common
->orig_ioaddr_pp
;
3166 ring
->hv_rx_buf_ioaddr_size
= (uint64_t)
3167 dma_common
->orig_alength
;
3169 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_map_rxdma_channel: "
3170 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
3171 channel
, ring
->hv_rx_buf_base_ioaddr_pp
,
3172 dma_common
->ioaddr_pp
, ring
->hv_rx_buf_ioaddr_size
,
3173 dma_common
->orig_alength
, dma_common
->orig_alength
));
3175 dma_control
= nxgep
->rx_cntl_pool_p
->dma_buf_pool_p
[channel
];
3177 ring
->hv_rx_cntl_base_ioaddr_pp
=
3178 (uint64_t)dma_control
->orig_ioaddr_pp
;
3179 ring
->hv_rx_cntl_ioaddr_size
=
3180 (uint64_t)dma_control
->orig_alength
;
3182 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_map_rxdma_channel: "
3183 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
3184 channel
, ring
->hv_rx_cntl_base_ioaddr_pp
,
3185 dma_control
->ioaddr_pp
, ring
->hv_rx_cntl_ioaddr_size
,
3186 dma_control
->orig_alength
, dma_control
->orig_alength
));
3193 * Map an RDC into our kernel space.
3197 * channel The channel to map.
3200 * 1. Allocate & initialise a memory pool, if necessary.
3201 * 2. Allocate however many receive buffers are required.
3202 * 3. Setup buffers, descriptors, and mailbox.
3204 * NPI/NXGE function calls:
3205 * nxge_alloc_rx_mem_pool()
3207 * nxge_map_rxdma_channel()
3209 * Registers accessed:
3214 static nxge_status_t
3215 nxge_map_rxdma(p_nxge_t nxgep
, int channel
)
3217 nxge_dma_common_t
**data
;
3218 nxge_dma_common_t
**control
;
3219 rx_rbr_ring_t
**rbr_ring
;
3220 rx_rcr_ring_t
**rcr_ring
;
3221 rx_mbox_t
**mailbox
;
3224 nxge_status_t status
;
3226 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_map_rxdma"));
3228 if (!nxgep
->rx_buf_pool_p
) {
3229 if (nxge_alloc_rx_mem_pool(nxgep
) != NXGE_OK
) {
3230 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3231 "<== nxge_map_rxdma: buf not allocated"));
3232 return (NXGE_ERROR
);
3236 if (nxge_alloc_rxb(nxgep
, channel
) != NXGE_OK
)
3237 return (NXGE_ERROR
);
3240 * Map descriptors from the buffer polls for each dma channel.
3244 * Set up and prepare buffer blocks, descriptors
3247 data
= &nxgep
->rx_buf_pool_p
->dma_buf_pool_p
[channel
];
3248 rbr_ring
= &nxgep
->rx_rbr_rings
->rbr_rings
[channel
];
3249 chunks
= nxgep
->rx_buf_pool_p
->num_chunks
[channel
];
3251 control
= &nxgep
->rx_cntl_pool_p
->dma_buf_pool_p
[channel
];
3252 rcr_ring
= &nxgep
->rx_rcr_rings
->rcr_rings
[channel
];
3254 mailbox
= &nxgep
->rx_mbox_areas_p
->rxmbox_areas
[channel
];
3256 status
= nxge_map_rxdma_channel(nxgep
, channel
, data
, rbr_ring
,
3257 chunks
, control
, rcr_ring
, mailbox
);
3258 if (status
!= NXGE_OK
) {
3259 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3260 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
3265 nxgep
->rx_rbr_rings
->rbr_rings
[channel
]->index
= (uint16_t)channel
;
3266 nxgep
->rx_rcr_rings
->rcr_rings
[channel
]->index
= (uint16_t)channel
;
3267 nxgep
->rx_rcr_rings
->rcr_rings
[channel
]->rdc_stats
=
3268 &nxgep
->statsp
->rdc_stats
[channel
];
3270 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3271 if (!isLDOMguest(nxgep
))
3272 nxge_rdc_hvio_setup(nxgep
, channel
);
3275 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3276 "<== nxge_map_rxdma: (status 0x%x channel %d)", status
, channel
));
3282 nxge_unmap_rxdma(p_nxge_t nxgep
, int channel
)
3284 rx_rbr_ring_t
*rbr_ring
;
3285 rx_rcr_ring_t
*rcr_ring
;
3288 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_unmap_rxdma(%d)", channel
));
3290 if (!nxgep
->rx_rbr_rings
|| !nxgep
->rx_rcr_rings
||
3291 !nxgep
->rx_mbox_areas_p
)
3294 rbr_ring
= nxgep
->rx_rbr_rings
->rbr_rings
[channel
];
3295 rcr_ring
= nxgep
->rx_rcr_rings
->rcr_rings
[channel
];
3296 mailbox
= nxgep
->rx_mbox_areas_p
->rxmbox_areas
[channel
];
3298 if (!rbr_ring
|| !rcr_ring
|| !mailbox
)
3301 (void) nxge_unmap_rxdma_channel(
3302 nxgep
, channel
, rbr_ring
, rcr_ring
, mailbox
);
3304 nxge_free_rxb(nxgep
, channel
);
3306 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_unmap_rxdma"));
3310 nxge_map_rxdma_channel(p_nxge_t nxgep
, uint16_t channel
,
3311 p_nxge_dma_common_t
*dma_buf_p
, p_rx_rbr_ring_t
*rbr_p
,
3312 uint32_t num_chunks
,
3313 p_nxge_dma_common_t
*dma_cntl_p
, p_rx_rcr_ring_t
*rcr_p
,
3314 p_rx_mbox_t
*rx_mbox_p
)
3316 int status
= NXGE_OK
;
3319 * Set up and prepare buffer blocks, descriptors
3322 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3323 "==> nxge_map_rxdma_channel (channel %d)", channel
));
3325 * Receive buffer blocks
3327 status
= nxge_map_rxdma_channel_buf_ring(nxgep
, channel
,
3328 dma_buf_p
, rbr_p
, num_chunks
);
3329 if (status
!= NXGE_OK
) {
3330 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3331 "==> nxge_map_rxdma_channel (channel %d): "
3332 "map buffer failed 0x%x", channel
, status
));
3333 goto nxge_map_rxdma_channel_exit
;
3337 * Receive block ring, completion ring and mailbox.
3339 status
= nxge_map_rxdma_channel_cfg_ring(nxgep
, channel
,
3340 dma_cntl_p
, rbr_p
, rcr_p
, rx_mbox_p
);
3341 if (status
!= NXGE_OK
) {
3342 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3343 "==> nxge_map_rxdma_channel (channel %d): "
3344 "map config failed 0x%x", channel
, status
));
3345 goto nxge_map_rxdma_channel_fail2
;
3348 goto nxge_map_rxdma_channel_exit
;
3350 nxge_map_rxdma_channel_fail3
:
3352 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3353 "==> nxge_map_rxdma_channel: free rbr/rcr "
3354 "(status 0x%x channel %d)",
3356 nxge_unmap_rxdma_channel_cfg_ring(nxgep
,
3357 *rcr_p
, *rx_mbox_p
);
3359 nxge_map_rxdma_channel_fail2
:
3360 /* Free buffer blocks */
3361 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3362 "==> nxge_map_rxdma_channel: free rx buffers"
3363 "(nxgep 0x%x status 0x%x channel %d)",
3364 nxgep
, status
, channel
));
3365 nxge_unmap_rxdma_channel_buf_ring(nxgep
, *rbr_p
);
3367 status
= NXGE_ERROR
;
3369 nxge_map_rxdma_channel_exit
:
3370 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3371 "<== nxge_map_rxdma_channel: "
3372 "(nxgep 0x%x status 0x%x channel %d)",
3373 nxgep
, status
, channel
));
3380 nxge_unmap_rxdma_channel(p_nxge_t nxgep
, uint16_t channel
,
3381 p_rx_rbr_ring_t rbr_p
, p_rx_rcr_ring_t rcr_p
, p_rx_mbox_t rx_mbox_p
)
3383 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3384 "==> nxge_unmap_rxdma_channel (channel %d)", channel
));
3387 * unmap receive block ring, completion ring and mailbox.
3389 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep
,
3392 /* unmap buffer blocks */
3393 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep
, rbr_p
);
3395 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_unmap_rxdma_channel"));
3399 static nxge_status_t
3400 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep
, uint16_t dma_channel
,
3401 p_nxge_dma_common_t
*dma_cntl_p
, p_rx_rbr_ring_t
*rbr_p
,
3402 p_rx_rcr_ring_t
*rcr_p
, p_rx_mbox_t
*rx_mbox_p
)
3404 p_rx_rbr_ring_t rbrp
;
3405 p_rx_rcr_ring_t rcrp
;
3407 p_nxge_dma_common_t cntl_dmap
;
3408 p_nxge_dma_common_t dmap
;
3409 p_rx_msg_t
*rx_msg_ring
;
3410 p_rx_msg_t rx_msg_p
;
3411 p_rbr_cfig_a_t rcfga_p
;
3412 p_rbr_cfig_b_t rcfgb_p
;
3413 p_rcrcfig_a_t cfga_p
;
3414 p_rcrcfig_b_t cfgb_p
;
3415 p_rxdma_cfig1_t cfig1_p
;
3416 p_rxdma_cfig2_t cfig2_p
;
3417 p_rbr_kick_t kick_p
;
3419 uint32_t *rbr_vaddrp
;
3421 nxge_status_t status
= NXGE_OK
;
3423 uint32_t nxge_port_rcr_size
;
3425 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3426 "==> nxge_map_rxdma_channel_cfg_ring"));
3428 cntl_dmap
= *dma_cntl_p
;
3430 /* Map in the receive block ring */
3432 dmap
= (p_nxge_dma_common_t
)&rbrp
->rbr_desc
;
3433 nxge_setup_dma_common(dmap
, cntl_dmap
, rbrp
->rbb_max
, 4);
3435 * Zero out buffer block ring descriptors.
3437 bzero((caddr_t
)dmap
->kaddrp
, dmap
->alength
);
3439 rcfga_p
= &(rbrp
->rbr_cfga
);
3440 rcfgb_p
= &(rbrp
->rbr_cfgb
);
3441 kick_p
= &(rbrp
->rbr_kick
);
3445 rbrp
->rbr_addr
= dmap
->dma_cookie
.dmac_laddress
;
3446 rcfga_p
->value
= (rbrp
->rbr_addr
&
3447 (RBR_CFIG_A_STDADDR_MASK
|
3448 RBR_CFIG_A_STDADDR_BASE_MASK
));
3449 rcfga_p
->value
|= ((uint64_t)rbrp
->rbb_max
<< RBR_CFIG_A_LEN_SHIFT
);
3451 rcfgb_p
->bits
.ldw
.bufsz0
= rbrp
->pkt_buf_size0
;
3452 rcfgb_p
->bits
.ldw
.vld0
= 1;
3453 rcfgb_p
->bits
.ldw
.bufsz1
= rbrp
->pkt_buf_size1
;
3454 rcfgb_p
->bits
.ldw
.vld1
= 1;
3455 rcfgb_p
->bits
.ldw
.bufsz2
= rbrp
->pkt_buf_size2
;
3456 rcfgb_p
->bits
.ldw
.vld2
= 1;
3457 rcfgb_p
->bits
.ldw
.bksize
= nxgep
->rx_bksize_code
;
3460 * For each buffer block, enter receive block address to the ring.
3462 rbr_vaddrp
= (uint32_t *)dmap
->kaddrp
;
3463 rbrp
->rbr_desc_vp
= (uint32_t *)dmap
->kaddrp
;
3464 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3465 "==> nxge_map_rxdma_channel_cfg_ring: channel %d "
3466 "rbr_vaddrp $%p", dma_channel
, rbr_vaddrp
));
3468 rx_msg_ring
= rbrp
->rx_msg_ring
;
3469 for (i
= 0; i
< rbrp
->tnblocks
; i
++) {
3470 rx_msg_p
= rx_msg_ring
[i
];
3471 rx_msg_p
->nxgep
= nxgep
;
3472 rx_msg_p
->rx_rbr_p
= rbrp
;
3474 ((rx_msg_p
->buf_dma
.dma_cookie
.dmac_laddress
3475 >> RBR_BKADDR_SHIFT
));
3476 rx_msg_p
->free
= B_FALSE
;
3477 rx_msg_p
->max_usage_cnt
= 0xbaddcafe;
3479 *rbr_vaddrp
++ = bkaddr
;
3482 kick_p
->bits
.ldw
.bkadd
= rbrp
->rbb_max
;
3483 rbrp
->rbr_wr_index
= (rbrp
->rbb_max
- 1);
3485 rbrp
->rbr_rd_index
= 0;
3487 rbrp
->rbr_consumed
= 0;
3488 rbrp
->rbr_use_bcopy
= B_TRUE
;
3489 rbrp
->rbr_bufsize_type
= RCR_PKTBUFSZ_0
;
3491 * Do bcopy on packets greater than bcopy size once
3492 * the lo threshold is reached.
3493 * This lo threshold should be less than the hi threshold.
3495 * Do bcopy on every packet once the hi threshold is reached.
3497 if (nxge_rx_threshold_lo
>= nxge_rx_threshold_hi
) {
3498 /* default it to use hi */
3499 nxge_rx_threshold_lo
= nxge_rx_threshold_hi
;
3502 if (nxge_rx_buf_size_type
> NXGE_RBR_TYPE2
) {
3503 nxge_rx_buf_size_type
= NXGE_RBR_TYPE2
;
3505 rbrp
->rbr_bufsize_type
= nxge_rx_buf_size_type
;
3507 switch (nxge_rx_threshold_hi
) {
3509 case NXGE_RX_COPY_NONE
:
3510 /* Do not do bcopy at all */
3511 rbrp
->rbr_use_bcopy
= B_FALSE
;
3512 rbrp
->rbr_threshold_hi
= rbrp
->rbb_max
;
3515 case NXGE_RX_COPY_1
:
3516 case NXGE_RX_COPY_2
:
3517 case NXGE_RX_COPY_3
:
3518 case NXGE_RX_COPY_4
:
3519 case NXGE_RX_COPY_5
:
3520 case NXGE_RX_COPY_6
:
3521 case NXGE_RX_COPY_7
:
3522 rbrp
->rbr_threshold_hi
=
3524 (nxge_rx_threshold_hi
)/NXGE_RX_BCOPY_SCALE
;
3527 case NXGE_RX_COPY_ALL
:
3528 rbrp
->rbr_threshold_hi
= 0;
3532 switch (nxge_rx_threshold_lo
) {
3534 case NXGE_RX_COPY_NONE
:
3535 /* Do not do bcopy at all */
3536 if (rbrp
->rbr_use_bcopy
) {
3537 rbrp
->rbr_use_bcopy
= B_FALSE
;
3539 rbrp
->rbr_threshold_lo
= rbrp
->rbb_max
;
3542 case NXGE_RX_COPY_1
:
3543 case NXGE_RX_COPY_2
:
3544 case NXGE_RX_COPY_3
:
3545 case NXGE_RX_COPY_4
:
3546 case NXGE_RX_COPY_5
:
3547 case NXGE_RX_COPY_6
:
3548 case NXGE_RX_COPY_7
:
3549 rbrp
->rbr_threshold_lo
=
3551 (nxge_rx_threshold_lo
)/NXGE_RX_BCOPY_SCALE
;
3554 case NXGE_RX_COPY_ALL
:
3555 rbrp
->rbr_threshold_lo
= 0;
3559 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
3560 "nxge_map_rxdma_channel_cfg_ring: channel %d "
3562 "rbrp->rbr_bufsize_type %d "
3563 "rbb_threshold_hi %d "
3564 "rbb_threshold_lo %d",
3567 rbrp
->rbr_bufsize_type
,
3568 rbrp
->rbr_threshold_hi
,
3569 rbrp
->rbr_threshold_lo
));
3571 rbrp
->page_valid
.value
= 0;
3572 rbrp
->page_mask_1
.value
= rbrp
->page_mask_2
.value
= 0;
3573 rbrp
->page_value_1
.value
= rbrp
->page_value_2
.value
= 0;
3574 rbrp
->page_reloc_1
.value
= rbrp
->page_reloc_2
.value
= 0;
3575 rbrp
->page_hdl
.value
= 0;
3577 rbrp
->page_valid
.bits
.ldw
.page0
= 1;
3578 rbrp
->page_valid
.bits
.ldw
.page1
= 1;
3580 /* Map in the receive completion ring */
3581 rcrp
= (p_rx_rcr_ring_t
)
3582 KMEM_ZALLOC(sizeof (rx_rcr_ring_t
), KM_SLEEP
);
3583 rcrp
->rdc
= dma_channel
;
3585 nxge_port_rcr_size
= nxgep
->nxge_port_rcr_size
;
3586 rcrp
->comp_size
= nxge_port_rcr_size
;
3587 rcrp
->comp_wrap_mask
= nxge_port_rcr_size
- 1;
3589 rcrp
->max_receive_pkts
= nxge_max_rx_pkts
;
3591 dmap
= (p_nxge_dma_common_t
)&rcrp
->rcr_desc
;
3592 nxge_setup_dma_common(dmap
, cntl_dmap
, rcrp
->comp_size
,
3593 sizeof (rcr_entry_t
));
3594 rcrp
->comp_rd_index
= 0;
3595 rcrp
->comp_wt_index
= 0;
3596 rcrp
->rcr_desc_rd_head_p
= rcrp
->rcr_desc_first_p
=
3597 (p_rcr_entry_t
)DMA_COMMON_VPTR(rcrp
->rcr_desc
);
3599 rcrp
->rcr_desc_rd_head_pp
= rcrp
->rcr_desc_first_pp
=
3600 (p_rcr_entry_t
)(uint32_t)DMA_COMMON_IOADDR(rcrp
->rcr_desc
);
3602 rcrp
->rcr_desc_rd_head_pp
= rcrp
->rcr_desc_first_pp
=
3603 (p_rcr_entry_t
)DMA_COMMON_IOADDR(rcrp
->rcr_desc
);
3606 rcrp
->rcr_desc_last_p
= rcrp
->rcr_desc_rd_head_p
+
3607 (nxge_port_rcr_size
- 1);
3608 rcrp
->rcr_desc_last_pp
= rcrp
->rcr_desc_rd_head_pp
+
3609 (nxge_port_rcr_size
- 1);
3611 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3612 "==> nxge_map_rxdma_channel_cfg_ring: "
3615 "rcr_desc_rd_head_p $%p "
3616 "rcr_desc_rd_head_pp $%p "
3617 "rcr_desc_rd_last_p $%p "
3618 "rcr_desc_rd_last_pp $%p ",
3621 rcrp
->rcr_desc_rd_head_p
,
3622 rcrp
->rcr_desc_rd_head_pp
,
3623 rcrp
->rcr_desc_last_p
,
3624 rcrp
->rcr_desc_last_pp
));
3627 * Zero out buffer block ring descriptors.
3629 bzero((caddr_t
)dmap
->kaddrp
, dmap
->alength
);
3631 rcrp
->intr_timeout
= (nxgep
->intr_timeout
<
3632 NXGE_RDC_RCR_TIMEOUT_MIN
) ? NXGE_RDC_RCR_TIMEOUT_MIN
:
3633 nxgep
->intr_timeout
;
3635 rcrp
->intr_threshold
= (nxgep
->intr_threshold
<
3636 NXGE_RDC_RCR_THRESHOLD_MIN
) ? NXGE_RDC_RCR_THRESHOLD_MIN
:
3637 nxgep
->intr_threshold
;
3639 rcrp
->full_hdr_flag
= B_FALSE
;
3641 rcrp
->sw_priv_hdr_len
= nxge_rdc_buf_offset
;
3644 cfga_p
= &(rcrp
->rcr_cfga
);
3645 cfgb_p
= &(rcrp
->rcr_cfgb
);
3648 rcrp
->rcr_addr
= dmap
->dma_cookie
.dmac_laddress
;
3649 cfga_p
->value
= (rcrp
->rcr_addr
&
3650 (RCRCFIG_A_STADDR_MASK
|
3651 RCRCFIG_A_STADDR_BASE_MASK
));
3653 rcfga_p
->value
|= ((uint64_t)rcrp
->comp_size
<<
3654 RCRCFIG_A_LEN_SHIF
);
3657 * Timeout should be set based on the system clock divider.
3658 * A timeout value of 1 assumes that the
3659 * granularity (1000) is 3 microseconds running at 300MHz.
3661 cfgb_p
->bits
.ldw
.pthres
= rcrp
->intr_threshold
;
3662 cfgb_p
->bits
.ldw
.timeout
= rcrp
->intr_timeout
;
3663 cfgb_p
->bits
.ldw
.entout
= 1;
3665 /* Map in the mailbox */
3666 mboxp
= (p_rx_mbox_t
)
3667 KMEM_ZALLOC(sizeof (rx_mbox_t
), KM_SLEEP
);
3668 dmap
= (p_nxge_dma_common_t
)&mboxp
->rx_mbox
;
3669 nxge_setup_dma_common(dmap
, cntl_dmap
, 1, sizeof (rxdma_mailbox_t
));
3670 cfig1_p
= (p_rxdma_cfig1_t
)&mboxp
->rx_cfg1
;
3671 cfig2_p
= (p_rxdma_cfig2_t
)&mboxp
->rx_cfg2
;
3672 cfig1_p
->value
= cfig2_p
->value
= 0;
3674 mboxp
->mbox_addr
= dmap
->dma_cookie
.dmac_laddress
;
3675 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3676 "==> nxge_map_rxdma_channel_cfg_ring: "
3677 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
3678 dma_channel
, cfig1_p
->value
, cfig2_p
->value
,
3681 dmaaddrp
= (uint32_t)(dmap
->dma_cookie
.dmac_laddress
>> 32
3683 cfig1_p
->bits
.ldw
.mbaddr_h
= dmaaddrp
;
3686 dmaaddrp
= (uint32_t)(dmap
->dma_cookie
.dmac_laddress
& 0xffffffff);
3687 dmaaddrp
= (uint32_t)(dmap
->dma_cookie
.dmac_laddress
&
3688 RXDMA_CFIG2_MBADDR_L_MASK
);
3690 cfig2_p
->bits
.ldw
.mbaddr
= (dmaaddrp
>> RXDMA_CFIG2_MBADDR_L_SHIFT
);
3692 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3693 "==> nxge_map_rxdma_channel_cfg_ring: "
3694 "channel %d damaddrp $%p "
3695 "cfg1 0x%016llx cfig2 0x%016llx",
3696 dma_channel
, dmaaddrp
,
3697 cfig1_p
->value
, cfig2_p
->value
));
3699 cfig2_p
->bits
.ldw
.full_hdr
= rcrp
->full_hdr_flag
;
3700 if (nxgep
->niu_hw_type
== NIU_HW_TYPE_RF
) {
3701 switch (rcrp
->sw_priv_hdr_len
) {
3702 case SW_OFFSET_NO_OFFSET
:
3706 cfig2_p
->bits
.ldw
.offset
=
3707 rcrp
->sw_priv_hdr_len
;
3708 cfig2_p
->bits
.ldw
.offset256
= 0;
3714 cfig2_p
->bits
.ldw
.offset
=
3715 rcrp
->sw_priv_hdr_len
& 0x3;
3716 cfig2_p
->bits
.ldw
.offset256
= 1;
3719 cfig2_p
->bits
.ldw
.offset
= SW_OFFSET_NO_OFFSET
;
3720 cfig2_p
->bits
.ldw
.offset256
= 0;
3723 cfig2_p
->bits
.ldw
.offset
= rcrp
->sw_priv_hdr_len
;
3726 rbrp
->rx_rcr_p
= rcrp
;
3727 rcrp
->rx_rbr_p
= rbrp
;
3731 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3732 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status
));
3739 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep
,
3740 p_rx_rcr_ring_t rcr_p
, p_rx_mbox_t rx_mbox_p
)
3742 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3743 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
3746 KMEM_FREE(rcr_p
, sizeof (rx_rcr_ring_t
));
3747 KMEM_FREE(rx_mbox_p
, sizeof (rx_mbox_t
));
3749 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3750 "<== nxge_unmap_rxdma_channel_cfg_ring"));
3753 static nxge_status_t
3754 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep
, uint16_t channel
,
3755 p_nxge_dma_common_t
*dma_buf_p
,
3756 p_rx_rbr_ring_t
*rbr_p
, uint32_t num_chunks
)
3758 p_rx_rbr_ring_t rbrp
;
3759 p_nxge_dma_common_t dma_bufp
, tmp_bufp
;
3760 p_rx_msg_t
*rx_msg_ring
;
3761 p_rx_msg_t rx_msg_p
;
3764 rxring_info_t
*ring_info
;
3765 nxge_status_t status
= NXGE_OK
;
3767 uint32_t size
, bsize
, nblocks
, nmsgs
;
3769 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3770 "==> nxge_map_rxdma_channel_buf_ring: channel %d",
3773 dma_bufp
= tmp_bufp
= *dma_buf_p
;
3774 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3775 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
3776 "chunks bufp 0x%016llx",
3777 channel
, num_chunks
, dma_bufp
));
3780 for (i
= 0; i
< num_chunks
; i
++, tmp_bufp
++) {
3781 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3782 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3783 "bufp 0x%016llx nblocks %d nmsgs %d",
3784 channel
, tmp_bufp
, tmp_bufp
->nblocks
, nmsgs
));
3785 nmsgs
+= tmp_bufp
->nblocks
;
3788 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3789 "<== nxge_map_rxdma_channel_buf_ring: channel %d "
3792 status
= NXGE_ERROR
;
3793 goto nxge_map_rxdma_channel_buf_ring_exit
;
3796 rbrp
= (p_rx_rbr_ring_t
)KMEM_ZALLOC(sizeof (*rbrp
), KM_SLEEP
);
3798 size
= nmsgs
* sizeof (p_rx_msg_t
);
3799 rx_msg_ring
= KMEM_ZALLOC(size
, KM_SLEEP
);
3800 ring_info
= (rxring_info_t
*)KMEM_ZALLOC(sizeof (rxring_info_t
),
3803 MUTEX_INIT(&rbrp
->lock
, NULL
, MUTEX_DRIVER
,
3804 (void *)nxgep
->interrupt_cookie
);
3805 MUTEX_INIT(&rbrp
->post_lock
, NULL
, MUTEX_DRIVER
,
3806 (void *)nxgep
->interrupt_cookie
);
3807 rbrp
->rdc
= channel
;
3808 rbrp
->num_blocks
= num_chunks
;
3809 rbrp
->tnblocks
= nmsgs
;
3810 rbrp
->rbb_max
= nmsgs
;
3811 rbrp
->rbr_max_size
= nmsgs
;
3812 rbrp
->rbr_wrap_mask
= (rbrp
->rbb_max
- 1);
3815 * Buffer sizes suggested by NIU architect.
3819 rbrp
->pkt_buf_size0
= RBR_BUFSZ0_256B
;
3820 rbrp
->pkt_buf_size0_bytes
= RBR_BUFSZ0_256_BYTES
;
3821 rbrp
->npi_pkt_buf_size0
= SIZE_256B
;
3823 rbrp
->pkt_buf_size1
= RBR_BUFSZ1_1K
;
3824 rbrp
->pkt_buf_size1_bytes
= RBR_BUFSZ1_1K_BYTES
;
3825 rbrp
->npi_pkt_buf_size1
= SIZE_1KB
;
3827 rbrp
->block_size
= nxgep
->rx_default_block_size
;
3829 if (!nxgep
->mac
.is_jumbo
) {
3830 rbrp
->pkt_buf_size2
= RBR_BUFSZ2_2K
;
3831 rbrp
->pkt_buf_size2_bytes
= RBR_BUFSZ2_2K_BYTES
;
3832 rbrp
->npi_pkt_buf_size2
= SIZE_2KB
;
3834 if (rbrp
->block_size
>= 0x2000) {
3835 rbrp
->pkt_buf_size2
= RBR_BUFSZ2_8K
;
3836 rbrp
->pkt_buf_size2_bytes
= RBR_BUFSZ2_8K_BYTES
;
3837 rbrp
->npi_pkt_buf_size2
= SIZE_8KB
;
3839 rbrp
->pkt_buf_size2
= RBR_BUFSZ2_4K
;
3840 rbrp
->pkt_buf_size2_bytes
= RBR_BUFSZ2_4K_BYTES
;
3841 rbrp
->npi_pkt_buf_size2
= SIZE_4KB
;
3845 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3846 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3847 "actual rbr max %d rbb_max %d nmsgs %d "
3848 "rbrp->block_size %d default_block_size %d "
3849 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
3850 channel
, rbrp
->rbr_max_size
, rbrp
->rbb_max
, nmsgs
,
3851 rbrp
->block_size
, nxgep
->rx_default_block_size
,
3852 nxge_rbr_size
, nxge_rbr_spare_size
));
3854 /* Map in buffers from the buffer pool. */
3856 for (i
= 0; i
< rbrp
->num_blocks
; i
++, dma_bufp
++) {
3857 bsize
= dma_bufp
->block_size
;
3858 nblocks
= dma_bufp
->nblocks
;
3860 ring_info
->buffer
[i
].dvma_addr
= (uint32_t)dma_bufp
->ioaddr_pp
;
3862 ring_info
->buffer
[i
].dvma_addr
= (uint64_t)dma_bufp
->ioaddr_pp
;
3864 ring_info
->buffer
[i
].buf_index
= i
;
3865 ring_info
->buffer
[i
].buf_size
= dma_bufp
->alength
;
3866 ring_info
->buffer
[i
].start_index
= index
;
3868 ring_info
->buffer
[i
].kaddr
= (uint32_t)dma_bufp
->kaddrp
;
3870 ring_info
->buffer
[i
].kaddr
= (uint64_t)dma_bufp
->kaddrp
;
3873 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3874 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3876 " nblocks %d chunk_size %x block_size 0x%x "
3877 "dma_bufp $%p", channel
, i
,
3878 dma_bufp
->nblocks
, ring_info
->buffer
[i
].buf_size
, bsize
,
3881 for (j
= 0; j
< nblocks
; j
++) {
3882 if ((rx_msg_p
= nxge_allocb(bsize
, BPRI_LO
,
3883 dma_bufp
)) == NULL
) {
3884 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
3885 "allocb failed (index %d i %d j %d)",
3887 goto nxge_map_rxdma_channel_buf_ring_fail1
;
3889 rx_msg_ring
[index
] = rx_msg_p
;
3890 rx_msg_p
->block_index
= index
;
3891 rx_msg_p
->shifted_addr
= (uint32_t)
3892 ((rx_msg_p
->buf_dma
.dma_cookie
.dmac_laddress
>>
3895 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3896 "index %d j %d rx_msg_p $%p mblk %p",
3897 index
, j
, rx_msg_p
, rx_msg_p
->rx_mblk_p
));
3899 mblk_p
= rx_msg_p
->rx_mblk_p
;
3900 mblk_p
->b_wptr
= mblk_p
->b_rptr
+ bsize
;
3902 rbrp
->rbr_ref_cnt
++;
3904 rx_msg_p
->buf_dma
.dma_channel
= channel
;
3907 rbrp
->rbr_alloc_type
= DDI_MEM_ALLOC
;
3908 if (dma_bufp
->contig_alloc_type
) {
3909 rbrp
->rbr_alloc_type
= CONTIG_MEM_ALLOC
;
3912 if (dma_bufp
->kmem_alloc_type
) {
3913 rbrp
->rbr_alloc_type
= KMEM_ALLOC
;
3916 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3917 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3919 " nblocks %d chunk_size %x block_size 0x%x "
3922 dma_bufp
->nblocks
, ring_info
->buffer
[i
].buf_size
, bsize
,
3925 if (i
< rbrp
->num_blocks
) {
3926 goto nxge_map_rxdma_channel_buf_ring_fail1
;
3929 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3930 "nxge_map_rxdma_channel_buf_ring: done buf init "
3931 "channel %d msg block entries %d",
3933 ring_info
->block_size_mask
= bsize
- 1;
3934 rbrp
->rx_msg_ring
= rx_msg_ring
;
3935 rbrp
->dma_bufp
= dma_buf_p
;
3936 rbrp
->ring_info
= ring_info
;
3938 status
= nxge_rxbuf_index_info_init(nxgep
, rbrp
);
3939 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3940 " nxge_map_rxdma_channel_buf_ring: "
3941 "channel %d done buf info init", channel
));
3944 * Finally, permit nxge_freeb() to call nxge_post_page().
3946 rbrp
->rbr_state
= RBR_POSTING
;
3949 goto nxge_map_rxdma_channel_buf_ring_exit
;
3951 nxge_map_rxdma_channel_buf_ring_fail1
:
3952 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3953 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3957 for (; index
>= 0; index
--) {
3958 rx_msg_p
= rx_msg_ring
[index
];
3959 if (rx_msg_p
!= NULL
) {
3960 freeb(rx_msg_p
->rx_mblk_p
);
3961 rx_msg_ring
[index
] = NULL
;
3964 nxge_map_rxdma_channel_buf_ring_fail
:
3965 MUTEX_DESTROY(&rbrp
->post_lock
);
3966 MUTEX_DESTROY(&rbrp
->lock
);
3967 KMEM_FREE(ring_info
, sizeof (rxring_info_t
));
3968 KMEM_FREE(rx_msg_ring
, size
);
3969 KMEM_FREE(rbrp
, sizeof (rx_rbr_ring_t
));
3971 status
= NXGE_ERROR
;
3973 nxge_map_rxdma_channel_buf_ring_exit
:
3974 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3975 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status
));
3982 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep
,
3983 p_rx_rbr_ring_t rbr_p
)
3985 p_rx_msg_t
*rx_msg_ring
;
3986 p_rx_msg_t rx_msg_p
;
3987 rxring_info_t
*ring_info
;
3994 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
3995 "==> nxge_unmap_rxdma_channel_buf_ring"));
3996 if (rbr_p
== NULL
) {
3997 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
3998 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
4001 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4002 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
4005 rx_msg_ring
= rbr_p
->rx_msg_ring
;
4006 ring_info
= rbr_p
->ring_info
;
4008 if (rx_msg_ring
== NULL
|| ring_info
== NULL
) {
4009 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4010 "<== nxge_unmap_rxdma_channel_buf_ring: "
4011 "rx_msg_ring $%p ring_info $%p",
4012 rx_msg_p
, ring_info
));
4017 num_chunks
= rbr_p
->num_blocks
;
4019 size
= rbr_p
->tnblocks
* sizeof (p_rx_msg_t
);
4020 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4021 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
4022 "tnblocks %d (max %d) size ptrs %d ",
4023 rbr_p
->rdc
, num_chunks
,
4024 rbr_p
->tnblocks
, rbr_p
->rbr_max_size
, size
));
4026 for (i
= 0; i
< rbr_p
->tnblocks
; i
++) {
4027 rx_msg_p
= rx_msg_ring
[i
];
4028 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4029 " nxge_unmap_rxdma_channel_buf_ring: "
4032 if (rx_msg_p
!= NULL
) {
4033 freeb(rx_msg_p
->rx_mblk_p
);
4034 rx_msg_ring
[i
] = NULL
;
4039 * We no longer may use the mutex <post_lock>. By setting
4040 * <rbr_state> to anything but POSTING, we prevent
4041 * nxge_post_page() from accessing a dead mutex.
4043 rbr_p
->rbr_state
= RBR_UNMAPPING
;
4044 MUTEX_DESTROY(&rbr_p
->post_lock
);
4046 MUTEX_DESTROY(&rbr_p
->lock
);
4048 if (rbr_p
->rbr_ref_cnt
== 0) {
4050 * This is the normal state of affairs.
4051 * Need to free the following buffers:
4057 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
4058 "unmap_rxdma_buf_ring: No outstanding - freeing "));
4059 nxge_rxdma_databuf_free(rbr_p
);
4060 KMEM_FREE(ring_info
, sizeof (rxring_info_t
));
4061 KMEM_FREE(rx_msg_ring
, size
);
4062 KMEM_FREE(rbr_p
, sizeof (*rbr_p
));
4065 * Some of our buffers are still being used.
4066 * Therefore, tell nxge_freeb() this ring is
4067 * unmapped, so it may free <rbr_p> for us.
4069 rbr_p
->rbr_state
= RBR_UNMAPPED
;
4070 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4071 "unmap_rxdma_buf_ring: %d %s outstanding.",
4073 rbr_p
->rbr_ref_cnt
== 1 ? "msg" : "msgs"));
4076 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4077 "<== nxge_unmap_rxdma_channel_buf_ring"));
4081 * nxge_rxdma_hw_start_common
4088 * NPI/NXGE function calls:
4089 * nxge_init_fzc_rx_common();
4090 * nxge_init_fzc_rxdma_port();
4092 * Registers accessed:
4097 static nxge_status_t
4098 nxge_rxdma_hw_start_common(p_nxge_t nxgep
)
4100 nxge_status_t status
= NXGE_OK
;
4102 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_hw_start_common"));
4105 * Load the sharable parameters by writing to the
4106 * function zero control registers. These FZC registers
4107 * should be initialized only once for the entire chip.
4109 (void) nxge_init_fzc_rx_common(nxgep
);
4112 * Initialize the RXDMA port specific FZC control configurations.
4113 * These FZC registers are pertaining to each port.
4115 (void) nxge_init_fzc_rxdma_port(nxgep
);
4117 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_hw_start_common"));
4122 static nxge_status_t
4123 nxge_rxdma_hw_start(p_nxge_t nxgep
, int channel
)
4126 p_rx_rbr_rings_t rx_rbr_rings
;
4127 p_rx_rbr_ring_t
*rbr_rings
;
4128 p_rx_rcr_rings_t rx_rcr_rings
;
4129 p_rx_rcr_ring_t
*rcr_rings
;
4130 p_rx_mbox_areas_t rx_mbox_areas_p
;
4131 p_rx_mbox_t
*rx_mbox_p
;
4132 nxge_status_t status
= NXGE_OK
;
4134 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_hw_start"));
4136 rx_rbr_rings
= nxgep
->rx_rbr_rings
;
4137 rx_rcr_rings
= nxgep
->rx_rcr_rings
;
4138 if (rx_rbr_rings
== NULL
|| rx_rcr_rings
== NULL
) {
4139 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
4140 "<== nxge_rxdma_hw_start: NULL ring pointers"));
4141 return (NXGE_ERROR
);
4143 ndmas
= rx_rbr_rings
->ndmas
;
4145 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
4146 "<== nxge_rxdma_hw_start: no dma channel allocated"));
4147 return (NXGE_ERROR
);
4150 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4151 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas
));
4153 rbr_rings
= rx_rbr_rings
->rbr_rings
;
4154 rcr_rings
= rx_rcr_rings
->rcr_rings
;
4155 rx_mbox_areas_p
= nxgep
->rx_mbox_areas_p
;
4156 if (rx_mbox_areas_p
) {
4157 rx_mbox_p
= rx_mbox_areas_p
->rxmbox_areas
;
4161 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4162 "==> nxge_rxdma_hw_start (ndmas %d) channel %d",
4164 status
= nxge_rxdma_start_channel(nxgep
, channel
,
4165 (p_rx_rbr_ring_t
)rbr_rings
[i
],
4166 (p_rx_rcr_ring_t
)rcr_rings
[i
],
4167 (p_rx_mbox_t
)rx_mbox_p
[i
]);
4168 if (status
!= NXGE_OK
) {
4169 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4170 "==> nxge_rxdma_hw_start: disable "
4171 "(status 0x%x channel %d)", status
, channel
));
4175 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_hw_start: "
4176 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4177 rx_rbr_rings
, rx_rcr_rings
));
4179 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4180 "==> nxge_rxdma_hw_start: (status 0x%x)", status
));
4186 nxge_rxdma_hw_stop(p_nxge_t nxgep
, int channel
)
4188 p_rx_rbr_rings_t rx_rbr_rings
;
4189 p_rx_rcr_rings_t rx_rcr_rings
;
4191 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_hw_stop"));
4193 rx_rbr_rings
= nxgep
->rx_rbr_rings
;
4194 rx_rcr_rings
= nxgep
->rx_rcr_rings
;
4195 if (rx_rbr_rings
== NULL
|| rx_rcr_rings
== NULL
) {
4196 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
4197 "<== nxge_rxdma_hw_stop: NULL ring pointers"));
4201 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4202 "==> nxge_rxdma_hw_stop(channel %d)",
4204 (void) nxge_rxdma_stop_channel(nxgep
, channel
);
4206 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_hw_stop: "
4207 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4208 rx_rbr_rings
, rx_rcr_rings
));
4210 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_rxdma_hw_stop"));
4214 static nxge_status_t
4215 nxge_rxdma_start_channel(p_nxge_t nxgep
, uint16_t channel
,
4216 p_rx_rbr_ring_t rbr_p
, p_rx_rcr_ring_t rcr_p
, p_rx_mbox_t mbox_p
)
4219 npi_handle_t handle
;
4220 npi_status_t rs
= NPI_SUCCESS
;
4221 rx_dma_ctl_stat_t cs
;
4222 rx_dma_ent_msk_t ent_mask
;
4223 nxge_status_t status
= NXGE_OK
;
4225 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_start_channel"));
4227 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
4229 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "nxge_rxdma_start_channel: "
4230 "npi handle addr $%p acc $%p",
4231 nxgep
->npi_handle
.regp
, nxgep
->npi_handle
.regh
));
4233 /* Reset RXDMA channel, but not if you're a guest. */
4234 if (!isLDOMguest(nxgep
)) {
4235 rs
= npi_rxdma_cfg_rdc_reset(handle
, channel
);
4236 if (rs
!= NPI_SUCCESS
) {
4237 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4238 "==> nxge_init_fzc_rdc: "
4239 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
4241 return (NXGE_ERROR
| rs
);
4244 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4245 "==> nxge_rxdma_start_channel: reset done: channel %d",
4249 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4250 if (isLDOMguest(nxgep
))
4251 (void) nxge_rdc_lp_conf(nxgep
, channel
);
4255 * Initialize the RXDMA channel specific FZC control
4256 * configurations. These FZC registers are pertaining
4257 * to each RX channel (logical pages).
4259 if (!isLDOMguest(nxgep
)) {
4260 status
= nxge_init_fzc_rxdma_channel(nxgep
, channel
);
4261 if (status
!= NXGE_OK
) {
4262 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4263 "==> nxge_rxdma_start_channel: "
4264 "init fzc rxdma failed (0x%08x channel %d)",
4269 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4270 "==> nxge_rxdma_start_channel: fzc done"));
4273 /* Set up the interrupt event masks. */
4275 ent_mask
.value
|= RX_DMA_ENT_MSK_RBREMPTY_MASK
;
4276 rs
= npi_rxdma_event_mask(handle
, OP_SET
, channel
,
4278 if (rs
!= NPI_SUCCESS
) {
4279 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4280 "==> nxge_rxdma_start_channel: "
4281 "init rxdma event masks failed "
4282 "(0x%08x channel %d)",
4284 return (NXGE_ERROR
| rs
);
4287 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4288 "==> nxge_rxdma_start_channel: "
4289 "event done: channel %d (mask 0x%016llx)",
4290 channel
, ent_mask
.value
));
4292 /* Initialize the receive DMA control and status register */
4294 cs
.bits
.hdw
.mex
= 1;
4295 cs
.bits
.hdw
.rcrthres
= 1;
4296 cs
.bits
.hdw
.rcrto
= 1;
4297 cs
.bits
.hdw
.rbr_empty
= 1;
4298 status
= nxge_init_rxdma_channel_cntl_stat(nxgep
, channel
, &cs
);
4299 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_start_channel: "
4300 "channel %d rx_dma_cntl_stat 0x%0016llx", channel
, cs
.value
));
4301 if (status
!= NXGE_OK
) {
4302 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4303 "==> nxge_rxdma_start_channel: "
4304 "init rxdma control register failed (0x%08x channel %d",
4309 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_start_channel: "
4310 "control done - channel %d cs 0x%016llx", channel
, cs
.value
));
4313 * Load RXDMA descriptors, buffers, mailbox,
4314 * initialise the receive DMA channels and
4315 * enable each DMA channel.
4317 status
= nxge_enable_rxdma_channel(nxgep
,
4318 channel
, rbr_p
, rcr_p
, mbox_p
);
4320 if (status
!= NXGE_OK
) {
4321 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4322 " nxge_rxdma_start_channel: "
4323 " enable rxdma failed (0x%08x channel %d)",
4328 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4329 "==> nxge_rxdma_start_channel: enabled channel %d"));
4331 if (isLDOMguest(nxgep
)) {
4332 /* Add interrupt handler for this channel. */
4333 status
= nxge_hio_intr_add(nxgep
, VP_BOUND_RX
, channel
);
4334 if (status
!= NXGE_OK
) {
4335 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4336 " nxge_rxdma_start_channel: "
4337 " nxge_hio_intr_add failed (0x%08x channel %d)",
4344 ent_mask
.value
|= (RX_DMA_ENT_MSK_WRED_DROP_MASK
|
4345 RX_DMA_ENT_MSK_PTDROP_PKT_MASK
);
4346 rs
= npi_rxdma_event_mask(handle
, OP_SET
, channel
,
4348 if (rs
!= NPI_SUCCESS
) {
4349 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
,
4350 "==> nxge_rxdma_start_channel: "
4351 "init rxdma event masks failed (0x%08x channel %d)",
4353 return (NXGE_ERROR
| rs
);
4356 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "==> nxge_rxdma_start_channel: "
4357 "control done - channel %d cs 0x%016llx", channel
, cs
.value
));
4359 NXGE_DEBUG_MSG((nxgep
, MEM2_CTL
, "<== nxge_rxdma_start_channel"));
4364 static nxge_status_t
4365 nxge_rxdma_stop_channel(p_nxge_t nxgep
, uint16_t channel
)
4367 npi_handle_t handle
;
4368 npi_status_t rs
= NPI_SUCCESS
;
4369 rx_dma_ctl_stat_t cs
;
4370 rx_dma_ent_msk_t ent_mask
;
4371 nxge_status_t status
= NXGE_OK
;
4373 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_rxdma_stop_channel"));
4375 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
4377 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "nxge_rxdma_stop_channel: "
4378 "npi handle addr $%p acc $%p",
4379 nxgep
->npi_handle
.regp
, nxgep
->npi_handle
.regh
));
4381 if (!isLDOMguest(nxgep
)) {
4383 * Stop RxMAC = A.9.2.6
4385 if (nxge_rx_mac_disable(nxgep
) != NXGE_OK
) {
4386 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4387 "nxge_rxdma_stop_channel: "
4388 "Failed to disable RxMAC"));
4392 * Drain IPP Port = A.9.3.6
4394 (void) nxge_ipp_drain(nxgep
);
4397 /* Reset RXDMA channel */
4398 rs
= npi_rxdma_cfg_rdc_reset(handle
, channel
);
4399 if (rs
!= NPI_SUCCESS
) {
4400 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4401 " nxge_rxdma_stop_channel: "
4402 " reset rxdma failed (0x%08x channel %d)",
4404 return (NXGE_ERROR
| rs
);
4407 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
4408 "==> nxge_rxdma_stop_channel: reset done"));
4410 /* Set up the interrupt event masks. */
4411 ent_mask
.value
= RX_DMA_ENT_MSK_ALL
;
4412 rs
= npi_rxdma_event_mask(handle
, OP_SET
, channel
,
4414 if (rs
!= NPI_SUCCESS
) {
4415 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4416 "==> nxge_rxdma_stop_channel: "
4417 "set rxdma event masks failed (0x%08x channel %d)",
4419 return (NXGE_ERROR
| rs
);
4422 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
4423 "==> nxge_rxdma_stop_channel: event done"));
4426 * Initialize the receive DMA control and status register
4429 status
= nxge_init_rxdma_channel_cntl_stat(nxgep
, channel
, &cs
);
4430 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_rxdma_stop_channel: control "
4431 " to default (all 0s) 0x%08x", cs
.value
));
4432 if (status
!= NXGE_OK
) {
4433 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4434 " nxge_rxdma_stop_channel: init rxdma"
4435 " control register failed (0x%08x channel %d",
4440 NXGE_DEBUG_MSG((nxgep
, RX_CTL
,
4441 "==> nxge_rxdma_stop_channel: control done"));
4444 * Make sure channel is disabled.
4446 status
= nxge_disable_rxdma_channel(nxgep
, channel
);
4448 if (status
!= NXGE_OK
) {
4449 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4450 " nxge_rxdma_stop_channel: "
4451 " init enable rxdma failed (0x%08x channel %d)",
4456 if (!isLDOMguest(nxgep
)) {
4458 * Enable RxMAC = A.9.2.10
4460 if (nxge_rx_mac_enable(nxgep
) != NXGE_OK
) {
4461 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4462 "nxge_rxdma_stop_channel: Rx MAC still disabled"));
4466 NXGE_DEBUG_MSG((nxgep
,
4467 RX_CTL
, "==> nxge_rxdma_stop_channel: disable done"));
4469 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "<== nxge_rxdma_stop_channel"));
4475 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep
)
4477 npi_handle_t handle
;
4478 p_nxge_rdc_sys_stats_t statsp
;
4479 rx_ctl_dat_fifo_stat_t stat
;
4480 uint32_t zcp_err_status
;
4481 uint32_t ipp_err_status
;
4482 nxge_status_t status
= NXGE_OK
;
4483 npi_status_t rs
= NPI_SUCCESS
;
4484 boolean_t my_err
= B_FALSE
;
4486 handle
= nxgep
->npi_handle
;
4487 statsp
= (p_nxge_rdc_sys_stats_t
)&nxgep
->statsp
->rdc_sys_stats
;
4489 rs
= npi_rxdma_rxctl_fifo_error_intr_get(handle
, &stat
);
4491 if (rs
!= NPI_SUCCESS
)
4492 return (NXGE_ERROR
| rs
);
4494 if (stat
.bits
.ldw
.id_mismatch
) {
4495 statsp
->id_mismatch
++;
4496 NXGE_FM_REPORT_ERROR(nxgep
, nxgep
->mac
.portnum
, NULL
,
4497 NXGE_FM_EREPORT_RDMC_ID_MISMATCH
);
4498 /* Global fatal error encountered */
4501 if ((stat
.bits
.ldw
.zcp_eop_err
) || (stat
.bits
.ldw
.ipp_eop_err
)) {
4502 switch (nxgep
->mac
.portnum
) {
4504 if ((stat
.bits
.ldw
.zcp_eop_err
& FIFO_EOP_PORT0
) ||
4505 (stat
.bits
.ldw
.ipp_eop_err
& FIFO_EOP_PORT0
)) {
4507 zcp_err_status
= stat
.bits
.ldw
.zcp_eop_err
;
4508 ipp_err_status
= stat
.bits
.ldw
.ipp_eop_err
;
4512 if ((stat
.bits
.ldw
.zcp_eop_err
& FIFO_EOP_PORT1
) ||
4513 (stat
.bits
.ldw
.ipp_eop_err
& FIFO_EOP_PORT1
)) {
4515 zcp_err_status
= stat
.bits
.ldw
.zcp_eop_err
;
4516 ipp_err_status
= stat
.bits
.ldw
.ipp_eop_err
;
4520 if ((stat
.bits
.ldw
.zcp_eop_err
& FIFO_EOP_PORT2
) ||
4521 (stat
.bits
.ldw
.ipp_eop_err
& FIFO_EOP_PORT2
)) {
4523 zcp_err_status
= stat
.bits
.ldw
.zcp_eop_err
;
4524 ipp_err_status
= stat
.bits
.ldw
.ipp_eop_err
;
4528 if ((stat
.bits
.ldw
.zcp_eop_err
& FIFO_EOP_PORT3
) ||
4529 (stat
.bits
.ldw
.ipp_eop_err
& FIFO_EOP_PORT3
)) {
4531 zcp_err_status
= stat
.bits
.ldw
.zcp_eop_err
;
4532 ipp_err_status
= stat
.bits
.ldw
.ipp_eop_err
;
4536 return (NXGE_ERROR
);
4541 status
= nxge_rxdma_handle_port_errors(nxgep
, ipp_err_status
,
4543 if (status
!= NXGE_OK
)
4550 static nxge_status_t
4551 nxge_rxdma_handle_port_errors(p_nxge_t nxgep
, uint32_t ipp_status
,
4552 uint32_t zcp_status
)
4554 boolean_t rxport_fatal
= B_FALSE
;
4555 p_nxge_rdc_sys_stats_t statsp
;
4556 nxge_status_t status
= NXGE_OK
;
4559 portn
= nxgep
->mac
.portnum
;
4560 statsp
= (p_nxge_rdc_sys_stats_t
)&nxgep
->statsp
->rdc_sys_stats
;
4562 if (ipp_status
& (0x1 << portn
)) {
4563 statsp
->ipp_eop_err
++;
4564 NXGE_FM_REPORT_ERROR(nxgep
, portn
, NULL
,
4565 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR
);
4566 rxport_fatal
= B_TRUE
;
4569 if (zcp_status
& (0x1 << portn
)) {
4570 statsp
->zcp_eop_err
++;
4571 NXGE_FM_REPORT_ERROR(nxgep
, portn
, NULL
,
4572 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR
);
4573 rxport_fatal
= B_TRUE
;
4577 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4578 " nxge_rxdma_handle_port_error: "
4579 " fatal error on Port #%d\n",
4581 status
= nxge_rx_port_fatal_err_recover(nxgep
);
4582 if (status
== NXGE_OK
) {
4583 FM_SERVICE_RESTORED(nxgep
);
4590 static nxge_status_t
4591 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep
, uint16_t channel
)
4593 npi_handle_t handle
;
4594 npi_status_t rs
= NPI_SUCCESS
;
4595 nxge_status_t status
= NXGE_OK
;
4596 p_rx_rbr_ring_t rbrp
;
4597 p_rx_rcr_ring_t rcrp
;
4599 rx_dma_ent_msk_t ent_mask
;
4600 p_nxge_dma_common_t dmap
;
4602 p_rx_msg_t rx_msg_p
;
4604 uint32_t nxge_port_rcr_size
;
4606 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "<== nxge_rxdma_fatal_err_recover"));
4607 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4608 "Recovering from RxDMAChannel#%d error...", channel
));
4611 * Stop the dma channel waits for the stop done.
4612 * If the stop done bit is not set, then create
4616 handle
= NXGE_DEV_NPI_HANDLE(nxgep
);
4617 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Rx DMA stop..."));
4619 rbrp
= (p_rx_rbr_ring_t
)nxgep
->rx_rbr_rings
->rbr_rings
[channel
];
4620 rcrp
= (p_rx_rcr_ring_t
)nxgep
->rx_rcr_rings
->rcr_rings
[channel
];
4622 MUTEX_ENTER(&rbrp
->lock
);
4623 MUTEX_ENTER(&rbrp
->post_lock
);
4625 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Disable RxDMA channel..."));
4627 rs
= npi_rxdma_cfg_rdc_disable(handle
, channel
);
4628 if (rs
!= NPI_SUCCESS
) {
4629 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4630 "nxge_disable_rxdma_channel:failed"));
4634 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Disable RxDMA interrupt..."));
4636 /* Disable interrupt */
4637 ent_mask
.value
= RX_DMA_ENT_MSK_ALL
;
4638 rs
= npi_rxdma_event_mask(handle
, OP_SET
, channel
, &ent_mask
);
4639 if (rs
!= NPI_SUCCESS
) {
4640 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4641 "nxge_rxdma_stop_channel: "
4642 "set rxdma event masks failed (channel %d)",
4646 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "RxDMA channel reset..."));
4648 /* Reset RXDMA channel */
4649 rs
= npi_rxdma_cfg_rdc_reset(handle
, channel
);
4650 if (rs
!= NPI_SUCCESS
) {
4651 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4652 "nxge_rxdma_fatal_err_recover: "
4653 " reset rxdma failed (channel %d)", channel
));
4657 nxge_port_rcr_size
= nxgep
->nxge_port_rcr_size
;
4659 mboxp
= (p_rx_mbox_t
)nxgep
->rx_mbox_areas_p
->rxmbox_areas
[channel
];
4661 rbrp
->rbr_wr_index
= (rbrp
->rbb_max
- 1);
4662 rbrp
->rbr_rd_index
= 0;
4664 rcrp
->comp_rd_index
= 0;
4665 rcrp
->comp_wt_index
= 0;
4666 rcrp
->rcr_desc_rd_head_p
= rcrp
->rcr_desc_first_p
=
4667 (p_rcr_entry_t
)DMA_COMMON_VPTR(rcrp
->rcr_desc
);
4669 rcrp
->rcr_desc_rd_head_pp
= rcrp
->rcr_desc_first_pp
=
4670 (p_rcr_entry_t
)(uint32_t)DMA_COMMON_IOADDR(rcrp
->rcr_desc
);
4672 rcrp
->rcr_desc_rd_head_pp
= rcrp
->rcr_desc_first_pp
=
4673 (p_rcr_entry_t
)DMA_COMMON_IOADDR(rcrp
->rcr_desc
);
4676 rcrp
->rcr_desc_last_p
= rcrp
->rcr_desc_rd_head_p
+
4677 (nxge_port_rcr_size
- 1);
4678 rcrp
->rcr_desc_last_pp
= rcrp
->rcr_desc_rd_head_pp
+
4679 (nxge_port_rcr_size
- 1);
4681 dmap
= (p_nxge_dma_common_t
)&rcrp
->rcr_desc
;
4682 bzero((caddr_t
)dmap
->kaddrp
, dmap
->alength
);
4684 cmn_err(CE_NOTE
, "!rbr entries = %d\n", rbrp
->rbr_max_size
);
4686 for (i
= 0; i
< rbrp
->rbr_max_size
; i
++) {
4687 rx_msg_p
= rbrp
->rx_msg_ring
[i
];
4688 ref_cnt
= rx_msg_p
->ref_cnt
;
4690 if (rx_msg_p
->cur_usage_cnt
!=
4691 rx_msg_p
->max_usage_cnt
) {
4692 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4693 "buf[%d]: cur_usage_cnt = %d "
4694 "max_usage_cnt = %d\n", i
,
4695 rx_msg_p
->cur_usage_cnt
,
4696 rx_msg_p
->max_usage_cnt
));
4698 /* Buffer can be re-posted */
4699 rx_msg_p
->free
= B_TRUE
;
4700 rx_msg_p
->cur_usage_cnt
= 0;
4701 rx_msg_p
->max_usage_cnt
= 0xbaddcafe;
4702 rx_msg_p
->pkt_buf_size
= 0;
4707 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "RxDMA channel re-start..."));
4709 status
= nxge_rxdma_start_channel(nxgep
, channel
, rbrp
, rcrp
, mboxp
);
4710 if (status
!= NXGE_OK
) {
4714 MUTEX_EXIT(&rbrp
->post_lock
);
4715 MUTEX_EXIT(&rbrp
->lock
);
4717 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4718 "Recovery Successful, RxDMAChannel#%d Restored",
4720 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "==> nxge_rxdma_fatal_err_recover"));
4724 MUTEX_EXIT(&rbrp
->post_lock
);
4725 MUTEX_EXIT(&rbrp
->lock
);
4726 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "Recovery failed"));
4727 return (NXGE_ERROR
| rs
);
4731 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep
)
4733 nxge_grp_set_t
*set
= &nxgep
->rx_set
;
4734 nxge_status_t status
= NXGE_OK
;
4735 p_rx_rcr_ring_t rcrp
;
4738 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "<== nxge_rx_port_fatal_err_recover"));
4739 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4740 "Recovering from RxPort error..."));
4741 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Disabling RxMAC...\n"));
4743 if (nxge_rx_mac_disable(nxgep
) != NXGE_OK
)
4748 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Stopping all RxDMA channels..."));
4750 for (rdc
= 0; rdc
< NXGE_MAX_RDCS
; rdc
++) {
4751 if ((1 << rdc
) & set
->owned
.map
) {
4752 rcrp
= nxgep
->rx_rcr_rings
->rcr_rings
[rdc
];
4754 MUTEX_ENTER(&rcrp
->lock
);
4755 if (nxge_rxdma_fatal_err_recover(nxgep
,
4757 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4758 "Could not recover "
4759 "channel %d", rdc
));
4761 MUTEX_EXIT(&rcrp
->lock
);
4766 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Resetting IPP..."));
4769 if (nxge_ipp_reset(nxgep
) != NXGE_OK
) {
4770 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4771 "nxge_rx_port_fatal_err_recover: "
4772 "Failed to reset IPP"));
4776 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Reset RxMAC..."));
4779 if (nxge_rx_mac_reset(nxgep
) != NXGE_OK
) {
4780 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4781 "nxge_rx_port_fatal_err_recover: "
4782 "Failed to reset RxMAC"));
4786 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Re-initialize IPP..."));
4788 /* Re-Initialize IPP */
4789 if (nxge_ipp_init(nxgep
) != NXGE_OK
) {
4790 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4791 "nxge_rx_port_fatal_err_recover: "
4792 "Failed to init IPP"));
4796 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Re-initialize RxMAC..."));
4798 /* Re-Initialize RxMAC */
4799 if ((status
= nxge_rx_mac_init(nxgep
)) != NXGE_OK
) {
4800 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4801 "nxge_rx_port_fatal_err_recover: "
4802 "Failed to reset RxMAC"));
4806 NXGE_DEBUG_MSG((nxgep
, RX_CTL
, "Re-enable RxMAC..."));
4808 /* Re-enable RxMAC */
4809 if ((status
= nxge_rx_mac_enable(nxgep
)) != NXGE_OK
) {
4810 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4811 "nxge_rx_port_fatal_err_recover: "
4812 "Failed to enable RxMAC"));
4816 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
,
4817 "Recovery Successful, RxPort Restored"));
4821 NXGE_ERROR_MSG((nxgep
, NXGE_ERR_CTL
, "Recovery failed"));
4826 nxge_rxdma_inject_err(p_nxge_t nxgep
, uint32_t err_id
, uint8_t chan
)
4828 rx_dma_ctl_stat_t cs
;
4829 rx_ctl_dat_fifo_stat_t cdfs
;
4832 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR
:
4833 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR
:
4834 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR
:
4835 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR
:
4836 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT
:
4837 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR
:
4838 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS
:
4839 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR
:
4840 case NXGE_FM_EREPORT_RDMC_RCRINCON
:
4841 case NXGE_FM_EREPORT_RDMC_RCRFULL
:
4842 case NXGE_FM_EREPORT_RDMC_RBRFULL
:
4843 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE
:
4844 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE
:
4845 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR
:
4846 RXDMA_REG_READ64(nxgep
->npi_handle
, RX_DMA_CTL_STAT_DBG_REG
,
4848 if (err_id
== NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR
)
4849 cs
.bits
.hdw
.rcr_ack_err
= 1;
4850 else if (err_id
== NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR
)
4851 cs
.bits
.hdw
.dc_fifo_err
= 1;
4852 else if (err_id
== NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR
)
4853 cs
.bits
.hdw
.rcr_sha_par
= 1;
4854 else if (err_id
== NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR
)
4855 cs
.bits
.hdw
.rbr_pre_par
= 1;
4856 else if (err_id
== NXGE_FM_EREPORT_RDMC_RBR_TMOUT
)
4857 cs
.bits
.hdw
.rbr_tmout
= 1;
4858 else if (err_id
== NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR
)
4859 cs
.bits
.hdw
.rsp_cnt_err
= 1;
4860 else if (err_id
== NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS
)
4861 cs
.bits
.hdw
.byte_en_bus
= 1;
4862 else if (err_id
== NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR
)
4863 cs
.bits
.hdw
.rsp_dat_err
= 1;
4864 else if (err_id
== NXGE_FM_EREPORT_RDMC_CONFIG_ERR
)
4865 cs
.bits
.hdw
.config_err
= 1;
4866 else if (err_id
== NXGE_FM_EREPORT_RDMC_RCRINCON
)
4867 cs
.bits
.hdw
.rcrincon
= 1;
4868 else if (err_id
== NXGE_FM_EREPORT_RDMC_RCRFULL
)
4869 cs
.bits
.hdw
.rcrfull
= 1;
4870 else if (err_id
== NXGE_FM_EREPORT_RDMC_RBRFULL
)
4871 cs
.bits
.hdw
.rbrfull
= 1;
4872 else if (err_id
== NXGE_FM_EREPORT_RDMC_RBRLOGPAGE
)
4873 cs
.bits
.hdw
.rbrlogpage
= 1;
4874 else if (err_id
== NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE
)
4875 cs
.bits
.hdw
.cfiglogpage
= 1;
4877 cmn_err(CE_NOTE
, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n",
4880 cmn_err(CE_NOTE
, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
4883 RXDMA_REG_WRITE64(nxgep
->npi_handle
, RX_DMA_CTL_STAT_DBG_REG
,
4886 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH
:
4887 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR
:
4888 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR
:
4890 if (err_id
== NXGE_FM_EREPORT_RDMC_ID_MISMATCH
)
4891 cdfs
.bits
.ldw
.id_mismatch
= (1 << nxgep
->mac
.portnum
);
4892 else if (err_id
== NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR
)
4893 cdfs
.bits
.ldw
.zcp_eop_err
= (1 << nxgep
->mac
.portnum
);
4894 else if (err_id
== NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR
)
4895 cdfs
.bits
.ldw
.ipp_eop_err
= (1 << nxgep
->mac
.portnum
);
4898 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4902 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4905 NXGE_REG_WR64(nxgep
->npi_handle
,
4906 RX_CTL_DAT_FIFO_STAT_DBG_REG
, cdfs
.value
);
4908 case NXGE_FM_EREPORT_RDMC_DCF_ERR
:
4910 case NXGE_FM_EREPORT_RDMC_RCR_ERR
:
4916 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p
)
4918 rxring_info_t
*ring_info
;
4920 uint32_t chunk_size
;
4924 NXGE_DEBUG_MSG((NULL
, DMA_CTL
, "==> nxge_rxdma_databuf_free"));
4926 if (rbr_p
== NULL
) {
4927 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
,
4928 "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
4932 if (rbr_p
->rbr_alloc_type
== DDI_MEM_ALLOC
) {
4933 NXGE_DEBUG_MSG((NULL
, DMA_CTL
,
4934 "<== nxge_rxdma_databuf_free: DDI"));
4938 ring_info
= rbr_p
->ring_info
;
4939 if (ring_info
== NULL
) {
4940 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
,
4941 "==> nxge_rxdma_databuf_free: NULL ring info"));
4944 num_blocks
= rbr_p
->num_blocks
;
4945 for (index
= 0; index
< num_blocks
; index
++) {
4946 kaddr
= ring_info
->buffer
[index
].kaddr
;
4947 chunk_size
= ring_info
->buffer
[index
].buf_size
;
4948 NXGE_DEBUG_MSG((NULL
, DMA_CTL
,
4949 "==> nxge_rxdma_databuf_free: free chunk %d "
4950 "kaddrp $%p chunk size %d",
4951 index
, kaddr
, chunk_size
));
4952 if (kaddr
== NULL
) continue;
4953 nxge_free_buf(rbr_p
->rbr_alloc_type
, kaddr
, chunk_size
);
4954 ring_info
->buffer
[index
].kaddr
= NULL
;
4957 NXGE_DEBUG_MSG((NULL
, DMA_CTL
, "<== nxge_rxdma_databuf_free"));
4960 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4961 extern void contig_mem_free(void *, size_t);
4965 nxge_free_buf(buf_alloc_type_t alloc_type
, uint64_t kaddr
, uint32_t buf_size
)
4967 NXGE_DEBUG_MSG((NULL
, DMA_CTL
, "==> nxge_free_buf"));
4969 if (kaddr
== NULL
|| !buf_size
) {
4970 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
,
4971 "==> nxge_free_buf: invalid kaddr $%p size to free %d",
4976 switch (alloc_type
) {
4978 NXGE_DEBUG_MSG((NULL
, DMA_CTL
,
4979 "==> nxge_free_buf: freeing kmem $%p size %d",
4982 KMEM_FREE((void *)(uint32_t)kaddr
, buf_size
);
4984 KMEM_FREE((void *)kaddr
, buf_size
);
4988 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4989 case CONTIG_MEM_ALLOC
:
4990 NXGE_DEBUG_MSG((NULL
, DMA_CTL
,
4991 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
4993 contig_mem_free((void *)kaddr
, buf_size
);
4998 NXGE_ERROR_MSG((NULL
, NXGE_ERR_CTL
,
4999 "<== nxge_free_buf: unsupported alloc type %d",
5004 NXGE_DEBUG_MSG((NULL
, DMA_CTL
, "<== nxge_free_buf"));