Merge commit '008b34be09d7b9c3e7a18d3ce9ef8b5c4f4ff8b8'
[unleashed.git] / kernel / drivers / net / hxge / hxge_txdma.h
blobdb40dc63ad56da9389620d8a9b02dec45d4641f1
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #ifndef _SYS_HXGE_HXGE_TXDMA_H
27 #define _SYS_HXGE_HXGE_TXDMA_H
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
33 #include <sys/taskq.h>
34 #include <hxge_txdma_hw.h>
35 #include <hpi_txdma.h>
37 #define TXDMA_RECLAIM_PENDING_DEFAULT 64
38 #define TX_FULL_MARK 3
41 * Transmit load balancing definitions.
43 #define HXGE_TX_LB_TCPUDP 0 /* default policy */
44 #define HXGE_TX_LB_HASH 1 /* from the hint data */
45 #define HXGE_TX_LB_DEST_MAC 2 /* Dest. MAC */
48 * Descriptor ring empty:
49 * (1) head index is equal to tail index.
50 * (2) wrapped around bits are the same.
51 * Descriptor ring full:
52 * (1) head index is equal to tail index.
53 * (2) wrapped around bits are different.
56 #define TXDMA_RING_EMPTY(head, head_wrap, tail, tail_wrap) \
57 ((head == tail && head_wrap == tail_wrap) ? B_TRUE : B_FALSE)
59 #define TXDMA_RING_FULL(head, head_wrap, tail, tail_wrap) \
60 ((head == tail && head_wrap != tail_wrap) ? B_TRUE : B_FALSE)
62 #define TXDMA_DESC_NEXT_INDEX(index, entries, wrap_mask) \
63 ((index + entries) & wrap_mask)
65 typedef struct _tx_msg_t {
66 hxge_os_block_mv_t flags; /* DMA, BCOPY, DVMA (?) */
67 hxge_os_dma_common_t buf_dma; /* premapped buffer blocks */
68 hxge_os_dma_handle_t buf_dma_handle; /* premapped buffer handle */
69 hxge_os_dma_handle_t dma_handle; /* DMA handle for normal send */
70 hxge_os_dma_handle_t dvma_handle; /* Fast DVMA handle */
72 p_mblk_t tx_message;
73 uint32_t tx_msg_size;
74 size_t bytes_used;
75 int head;
76 int tail;
77 int offset_index;
78 } tx_msg_t, *p_tx_msg_t;
81 * TX Statistics.
83 typedef struct _hxge_tx_ring_stats_t {
84 uint64_t opackets;
85 uint64_t obytes;
86 uint64_t obytes_with_pad;
87 uint64_t oerrors;
89 uint32_t tx_inits;
90 uint32_t tx_no_buf;
92 uint32_t peu_resp_err;
93 uint32_t pkt_size_hdr_err;
94 uint32_t runt_pkt_drop_err;
95 uint32_t pkt_size_err;
96 uint32_t tx_rng_oflow;
97 uint32_t pref_par_err;
98 uint32_t tdr_pref_cpl_to;
99 uint32_t pkt_cpl_to;
100 uint32_t invalid_sop;
101 uint32_t unexpected_sop;
103 uint64_t count_hdr_size_err;
104 uint64_t count_runt;
105 uint64_t count_abort;
107 uint32_t tx_starts;
108 uint32_t tx_no_desc;
109 uint32_t tx_dma_bind_fail;
110 uint32_t tx_hdr_pkts;
111 uint32_t tx_ddi_pkts;
112 uint32_t tx_jumbo_pkts;
113 uint32_t tx_max_pend;
114 uint32_t tx_marks;
115 tdc_pref_par_log_t errlog;
116 } hxge_tx_ring_stats_t, *p_hxge_tx_ring_stats_t;
118 typedef struct _hxge_tdc_sys_stats {
119 uint32_t reord_tbl_par_err;
120 uint32_t reord_buf_ded_err;
121 uint32_t reord_buf_sec_err;
122 } hxge_tdc_sys_stats_t, *p_hxge_tdc_sys_stats_t;
124 typedef struct _tx_ring_t {
125 hxge_os_dma_common_t tdc_desc;
126 struct _hxge_t *hxgep;
127 mac_ring_handle_t ring_handle;
128 ddi_taskq_t *taskq;
129 p_tx_msg_t tx_msg_ring;
130 uint32_t tnblocks;
131 tdc_tdr_cfg_t tx_ring_cfig;
132 tdc_tdr_kick_t tx_ring_kick;
133 tdc_tdr_cfg_t tx_cs;
134 tdc_int_mask_t tx_evmask;
135 tdc_mbh_t tx_mbox_mbh;
136 tdc_mbl_t tx_mbox_mbl;
138 tdc_page_handle_t page_hdl;
140 hxge_os_mutex_t lock;
141 uint16_t index;
142 uint16_t tdc;
143 struct hxge_tdc_cfg *tdc_p;
144 uint_t tx_ring_size;
145 uint32_t num_chunks;
147 uint_t tx_wrap_mask;
148 uint_t rd_index;
149 uint_t wr_index;
150 boolean_t wr_index_wrap;
151 uint_t head_index;
152 boolean_t head_wrap;
153 tdc_tdr_head_t ring_head;
154 tdc_tdr_kick_t ring_kick_tail;
155 txdma_mailbox_t tx_mbox;
157 uint_t descs_pending;
158 boolean_t queueing;
160 p_mblk_t head;
161 p_mblk_t tail;
163 p_hxge_tx_ring_stats_t tdc_stats;
165 uint_t dvma_wr_index;
166 uint_t dvma_rd_index;
167 uint_t dvma_pending;
168 uint_t dvma_available;
169 uint_t dvma_wrap_mask;
171 hxge_os_dma_handle_t *dvma_ring;
173 mac_resource_handle_t tx_mac_resource_handle;
174 } tx_ring_t, *p_tx_ring_t;
177 /* Transmit Mailbox */
178 typedef struct _tx_mbox_t {
179 hxge_os_mutex_t lock;
180 uint16_t index;
181 struct _hxge_t *hxgep;
182 uint16_t tdc;
183 hxge_os_dma_common_t tx_mbox;
184 tdc_mbl_t tx_mbox_l;
185 tdc_mbh_t tx_mbox_h;
186 } tx_mbox_t, *p_tx_mbox_t;
188 typedef struct _tx_rings_t {
189 p_tx_ring_t *rings;
190 boolean_t txdesc_allocated;
191 uint32_t ndmas;
192 hxge_os_dma_common_t tdc_dma;
193 hxge_os_dma_common_t tdc_mbox;
194 } tx_rings_t, *p_tx_rings_t;
196 typedef struct _tx_mbox_areas_t {
197 p_tx_mbox_t *txmbox_areas_p;
198 boolean_t txmbox_allocated;
199 } tx_mbox_areas_t, *p_tx_mbox_areas_t;
202 * Transmit prototypes.
204 hxge_status_t hxge_init_txdma_channels(p_hxge_t hxgep);
205 void hxge_uninit_txdma_channels(p_hxge_t hxgep);
206 void hxge_setup_dma_common(p_hxge_dma_common_t, p_hxge_dma_common_t,
207 uint32_t, uint32_t);
208 hxge_status_t hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel,
209 uint64_t reg_data);
210 hxge_status_t hxge_init_txdma_channel_event_mask(p_hxge_t hxgep,
211 uint16_t channel, tdc_int_mask_t *mask_p);
212 hxge_status_t hxge_enable_txdma_channel(p_hxge_t hxgep, uint16_t channel,
213 p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p);
215 p_mblk_t hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads);
216 int hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p);
217 boolean_t hxge_txdma_reclaim(p_hxge_t hxgep,
218 p_tx_ring_t tx_ring_p, int nmblks);
220 void hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum,
221 int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp);
223 hxge_status_t hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable);
224 void hxge_txdma_stop(p_hxge_t hxgep);
225 void hxge_fixup_txdma_rings(p_hxge_t hxgep);
226 void hxge_txdma_hw_kick(p_hxge_t hxgep);
227 void hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel);
228 void hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
229 uint16_t channel);
230 void hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
231 uint16_t channel);
233 void hxge_check_tx_hang(p_hxge_t hxgep);
234 void hxge_fixup_hung_txdma_rings(p_hxge_t hxgep);
235 void hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel);
236 void hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
237 uint16_t channel);
239 mblk_t *hxge_tx_ring_send(void *arg, mblk_t *mp);
240 void hxge_reclaim_rings(p_hxge_t hxgep);
241 int hxge_txdma_channel_hung(p_hxge_t hxgep,
242 p_tx_ring_t tx_ring_p, uint16_t channel);
243 int hxge_txdma_hung(p_hxge_t hxgep);
244 int hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel);
245 hxge_status_t hxge_txdma_handle_sys_errors(p_hxge_t hxgep);
247 #ifdef __cplusplus
249 #endif
251 #endif /* _SYS_HXGE_HXGE_TXDMA_H */