4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #ifndef _SYS_NXGE_NXGE_RXDMA_H
28 #define _SYS_NXGE_NXGE_RXDMA_H
34 #include <sys/nxge/nxge_rxdma_hw.h>
35 #include <npi_rxdma.h>
37 #define RXDMA_CK_DIV_DEFAULT 7500 /* 25 usec */
39 * Hardware RDC designer: 8 cache lines during Atlas bringup.
41 #define RXDMA_RED_LESS_BYTES (8 * 64) /* 8 cache line */
42 #define RXDMA_RED_LESS_ENTRIES (RXDMA_RED_LESS_BYTES/8)
43 #define RXDMA_RED_WINDOW_DEFAULT 0
44 #define RXDMA_RED_THRES_DEFAULT 0
46 #define RXDMA_RCR_PTHRES_DEFAULT 0x20
47 #define RXDMA_RCR_TO_DEFAULT 0x8
50 * hardware workarounds: kick 16 (was 8 before)
52 #define NXGE_RXDMA_POST_BATCH 16
54 #define RXBUF_START_ADDR(a, index, bsize) ((a & (index * bsize))
55 #define RXBUF_OFFSET_FROM_START(a, start) (start - a)
56 #define RXBUF_64B_ALIGNED 64
58 #define NXGE_RXBUF_EXTRA 34
60 * Receive buffer thresholds and buffer types
62 #define NXGE_RX_BCOPY_SCALE 8 /* use 1/8 as lowest granularity */
64 NXGE_RX_COPY_ALL
= 0, /* do bcopy on every packet */
65 NXGE_RX_COPY_1
, /* bcopy on 1/8 of buffer posted */
66 NXGE_RX_COPY_2
, /* bcopy on 2/8 of buffer posted */
67 NXGE_RX_COPY_3
, /* bcopy on 3/8 of buffer posted */
68 NXGE_RX_COPY_4
, /* bcopy on 4/8 of buffer posted */
69 NXGE_RX_COPY_5
, /* bcopy on 5/8 of buffer posted */
70 NXGE_RX_COPY_6
, /* bcopy on 6/8 of buffer posted */
71 NXGE_RX_COPY_7
, /* bcopy on 7/8 of buffer posted */
72 NXGE_RX_COPY_NONE
/* don't do bcopy at all */
73 } nxge_rxbuf_threshold_t
;
76 NXGE_RBR_TYPE0
= RCR_PKTBUFSZ_0
, /* bcopy buffer size 0 (small) */
77 NXGE_RBR_TYPE1
= RCR_PKTBUFSZ_1
, /* bcopy buffer size 1 (medium) */
78 NXGE_RBR_TYPE2
= RCR_PKTBUFSZ_2
/* bcopy buffer size 2 (large) */
81 typedef struct _rdc_errlog
{
82 rdmc_par_err_log_t pre_par
;
83 rdmc_par_err_log_t sha_par
;
84 uint8_t compl_err_type
;
90 typedef struct _nxge_rx_ring_stats_t
{
99 uint32_t rx_jumbo_pkts
;
100 uint32_t rx_multi_pkts
;
101 uint32_t rx_mtu_pkts
;
105 * Receive buffer management statistics.
107 uint32_t rx_new_pages
;
108 uint32_t rx_new_mtu_pgs
;
109 uint32_t rx_new_nxt_pgs
;
110 uint32_t rx_reused_pgs
;
111 uint32_t rx_mtu_drops
;
112 uint32_t rx_nxt_drops
;
117 uint32_t rx_rbr_tmout
;
118 uint32_t pkt_too_long_err
;
120 uint32_t l4_cksum_err
;
121 uint32_t fflp_soft_err
;
122 uint32_t zcp_soft_err
;
123 uint32_t rcr_unknown_err
;
126 uint32_t rsp_cnt_err
;
127 uint32_t byte_en_err
;
128 uint32_t byte_en_bus
;
129 uint32_t rsp_dat_err
;
130 uint32_t rcr_ack_err
;
131 uint32_t dc_fifo_err
;
132 uint32_t rcr_sha_par
;
133 uint32_t rbr_pre_par
;
134 uint32_t port_drop_pkt
;
136 uint32_t rbr_pre_empty
;
137 uint32_t rcr_shadow_full
;
144 uint32_t cfiglogpage
;
149 } nxge_rx_ring_stats_t
, *p_nxge_rx_ring_stats_t
;
151 typedef struct _nxge_rdc_sys_stats
{
154 uint32_t id_mismatch
;
155 uint32_t ipp_eop_err
;
156 uint32_t zcp_eop_err
;
157 } nxge_rdc_sys_stats_t
, *p_nxge_rdc_sys_stats_t
;
160 * Software reserved buffer offset
162 typedef struct _nxge_rxbuf_off_hdr_t
{
164 } nxge_rxbuf_off_hdr_t
, *p_nxge_rxbuf_off_hdr_t
;
167 typedef struct _rx_msg_t
{
168 nxge_os_dma_common_t buf_dma
;
169 nxge_os_mutex_t lock
;
170 struct _nxge_t
*nxgep
;
171 struct _rx_rbr_ring_t
*rx_rbr_p
;
172 boolean_t spare_in_use
;
175 #ifdef RXBUFF_USE_SEPARATE_UP_CNTR
176 uint32_t pass_up_cnt
;
179 nxge_os_frtn_t freeb
;
180 size_t bytes_arrived
;
181 size_t bytes_expected
;
183 uint32_t block_index
;
184 uint32_t pkt_buf_size
;
185 uint32_t pkt_buf_size_code
;
186 uint32_t max_pkt_bufs
;
187 uint32_t cur_usage_cnt
;
188 uint32_t max_usage_cnt
;
191 uint32_t shifted_addr
;
192 boolean_t use_buf_pool
;
194 boolean_t rx_use_bcopy
;
195 } rx_msg_t
, *p_rx_msg_t
;
197 typedef struct _rx_dma_handle_t
{
198 nxge_os_dma_handle_t dma_handle
; /* DMA handle */
199 nxge_os_acc_handle_t acc_handle
; /* DMA memory handle */
200 npi_handle_t npi_handle
;
201 } rx_dma_handle_t
, *p_rx_dma_handle_t
;
204 /* Receive Completion Ring */
205 typedef struct _rx_rcr_ring_t
{
206 nxge_os_dma_common_t rcr_desc
;
208 struct _nxge_t
*nxgep
;
210 p_nxge_rx_ring_stats_t rdc_stats
;
212 boolean_t poll_flag
; /* B_TRUE, if polling mode */
214 rcrcfig_a_t rcr_cfga
;
215 rcrcfig_b_t rcr_cfgb
;
217 nxge_os_mutex_t lock
;
220 boolean_t full_hdr_flag
; /* 1: 18 bytes header */
221 uint16_t sw_priv_hdr_len
; /* 0 - 192 bytes (SW) */
222 uint32_t comp_size
; /* # of RCR entries */
224 uint_t comp_wrap_mask
;
225 uint_t comp_rd_index
;
226 uint_t comp_wt_index
;
228 p_rcr_entry_t rcr_desc_first_p
;
229 p_rcr_entry_t rcr_desc_first_pp
;
230 p_rcr_entry_t rcr_desc_last_p
;
231 p_rcr_entry_t rcr_desc_last_pp
;
233 p_rcr_entry_t rcr_desc_rd_head_p
; /* software next read */
234 p_rcr_entry_t rcr_desc_rd_head_pp
;
236 uint64_t rcr_tail_pp
;
237 uint64_t rcr_head_pp
;
238 struct _rx_rbr_ring_t
*rx_rbr_p
;
239 uint32_t intr_timeout
;
240 uint32_t intr_threshold
;
241 uint64_t max_receive_pkts
;
242 mac_ring_handle_t rcr_mac_handle
;
243 uint64_t rcr_gen_num
;
244 uint32_t rcvd_pkt_bytes
; /* Received bytes of a packet */
248 } rx_rcr_ring_t
, *p_rx_rcr_ring_t
;
252 /* Buffer index information */
253 typedef struct _rxbuf_index_info_t
{
255 uint32_t start_index
;
259 } rxbuf_index_info_t
, *p_rxbuf_index_info_t
;
262 * Buffer index information
264 typedef struct _rxring_info_t
{
265 uint32_t hint
[RCR_N_PKTBUF_SZ
];
266 uint32_t block_size_mask
;
267 uint16_t max_iterations
;
268 rxbuf_index_info_t buffer
[NXGE_DMA_BLOCK
];
269 } rxring_info_t
, *p_rxring_info_t
;
273 RBR_POSTING
= 1, /* We may post rx buffers. */
274 RBR_UNMAPPING
, /* We are in the process of unmapping. */
275 RBR_UNMAPPED
/* The ring is unmapped. */
279 /* Receive Buffer Block Ring */
280 typedef struct _rx_rbr_ring_t
{
281 nxge_os_dma_common_t rbr_desc
;
282 p_rx_msg_t
*rx_msg_ring
;
283 p_nxge_dma_common_t
*dma_bufp
;
284 rbr_cfig_a_t rbr_cfga
;
285 rbr_cfig_b_t rbr_cfgb
;
287 log_page_vld_t page_valid
;
288 log_page_mask_t page_mask_1
;
289 log_page_mask_t page_mask_2
;
290 log_page_value_t page_value_1
;
291 log_page_value_t page_value_2
;
292 log_page_relo_t page_reloc_1
;
293 log_page_relo_t page_reloc_2
;
294 log_page_hdl_t page_hdl
;
298 nxge_os_mutex_t lock
;
299 nxge_os_mutex_t post_lock
;
301 struct _nxge_t
*nxgep
;
306 uint_t rbr_wrap_mask
;
312 uint_t pkt_buf_size0
;
313 uint_t pkt_buf_size0_bytes
;
314 uint_t npi_pkt_buf_size0
;
315 uint_t pkt_buf_size1
;
316 uint_t pkt_buf_size1_bytes
;
317 uint_t npi_pkt_buf_size1
;
318 uint_t pkt_buf_size2
;
319 uint_t pkt_buf_size2_bytes
;
320 uint_t npi_pkt_buf_size2
;
322 uint32_t *rbr_desc_vp
;
324 p_rx_rcr_ring_t rx_rcr_p
;
329 rxring_info_t
*ring_info
;
330 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
331 uint64_t hv_rx_buf_base_ioaddr_pp
;
332 uint64_t hv_rx_buf_ioaddr_size
;
333 uint64_t hv_rx_cntl_base_ioaddr_pp
;
334 uint64_t hv_rx_cntl_ioaddr_size
;
338 uint_t rbr_threshold_hi
;
339 uint_t rbr_threshold_lo
;
340 nxge_rxbuf_type_t rbr_bufsize_type
;
341 boolean_t rbr_use_bcopy
;
344 * <rbr_ref_cnt> is a count of those receive buffers which
345 * have been loaned to the kernel. We will not free this
346 * ring until the reference count reaches zero (0).
348 uint32_t rbr_ref_cnt
;
349 rbr_state_t rbr_state
; /* POSTING, etc */
351 * Receive buffer allocation types:
352 * ddi_dma_mem_alloc(), contig_mem_alloc(), kmem_alloc()
354 buf_alloc_type_t rbr_alloc_type
;
355 } rx_rbr_ring_t
, *p_rx_rbr_ring_t
;
357 /* Receive Mailbox */
358 typedef struct _rx_mbox_t
{
359 nxge_os_dma_common_t rx_mbox
;
360 rxdma_cfig1_t rx_cfg1
;
361 rxdma_cfig2_t rx_cfg2
;
365 nxge_os_mutex_t lock
;
367 struct _nxge_t
*nxgep
;
369 } rx_mbox_t
, *p_rx_mbox_t
;
372 typedef struct _rx_rbr_rings_t
{
373 p_rx_rbr_ring_t
*rbr_rings
;
375 boolean_t rxbuf_allocated
;
376 } rx_rbr_rings_t
, *p_rx_rbr_rings_t
;
378 typedef struct _rx_rcr_rings_t
{
379 p_rx_rcr_ring_t
*rcr_rings
;
381 boolean_t cntl_buf_allocated
;
382 } rx_rcr_rings_t
, *p_rx_rcr_rings_t
;
384 typedef struct _rx_mbox_areas_t
{
385 p_rx_mbox_t
*rxmbox_areas
;
387 boolean_t mbox_allocated
;
388 } rx_mbox_areas_t
, *p_rx_mbox_areas_t
;
391 * Global register definitions per chip and they are initialized
392 * using the function zero control registers.
396 typedef struct _rxdma_globals
{
398 uint16_t rxdma_ck_div_cnt
;
399 uint16_t rxdma_red_ran_init
;
400 uint32_t rxdma_eing_timeout
;
401 } rxdma_globals_t
, *p_rxdma_globals
;
405 * Receive DMA Prototypes.
407 nxge_status_t
nxge_init_rxdma_channels(p_nxge_t
);
408 void nxge_uninit_rxdma_channels(p_nxge_t
);
410 nxge_status_t
nxge_init_rxdma_channel(p_nxge_t
, int);
411 void nxge_uninit_rxdma_channel(p_nxge_t
, int);
413 nxge_status_t
nxge_init_rxdma_channel_rcrflush(p_nxge_t
, uint8_t);
414 nxge_status_t
nxge_reset_rxdma_channel(p_nxge_t
, uint16_t);
415 nxge_status_t
nxge_init_rxdma_channel_cntl_stat(p_nxge_t
,
416 uint16_t, p_rx_dma_ctl_stat_t
);
417 nxge_status_t
nxge_enable_rxdma_channel(p_nxge_t
,
418 uint16_t, p_rx_rbr_ring_t
, p_rx_rcr_ring_t
,
420 nxge_status_t
nxge_init_rxdma_channel_event_mask(p_nxge_t
,
421 uint16_t, p_rx_dma_ent_msk_t
);
423 nxge_status_t
nxge_rxdma_hw_mode(p_nxge_t
, boolean_t
);
424 void nxge_hw_start_rx(p_nxge_t
);
425 void nxge_fixup_rxdma_rings(p_nxge_t
);
426 nxge_status_t
nxge_dump_rxdma_channel(p_nxge_t
, uint8_t);
428 void nxge_rxdma_fix_channel(p_nxge_t
, uint16_t);
430 mblk_t
*nxge_rx_poll(void *, int);
431 int nxge_enable_poll(void *);
432 int nxge_disable_poll(void *);
434 void nxge_rxdma_regs_dump_channels(p_nxge_t
);
435 nxge_status_t
nxge_rxdma_handle_sys_errors(p_nxge_t
);
436 void nxge_rxdma_inject_err(p_nxge_t
, uint32_t, uint8_t);
438 extern nxge_status_t
nxge_alloc_rx_mem_pool(p_nxge_t
);
439 extern nxge_status_t
nxge_alloc_rxb(p_nxge_t nxgep
, int channel
);
440 extern void nxge_free_rxb(p_nxge_t nxgep
, int channel
);
442 int nxge_get_rxring_index(p_nxge_t
, int, int);
448 #endif /* _SYS_NXGE_NXGE_RXDMA_H */