em/emx: Integrate ifmedia flow control support.
[dragonfly.git] / sys / dev / netif / emx / if_emx.h
blob12b61c2d6f0fbcd8a3c7da736def72b8d921e3f4
1 /*
2 * Copyright (c) 2001-2008, Intel Corporation
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _IF_EMX_H_
33 #define _IF_EMX_H_
35 /* Tunables */
38 * EMX_TXD: Maximum number of Transmit Descriptors
39 * Valid Range: 256-4096 for others
40 * Default Value: 512
41 * This value is the number of transmit descriptors allocated by the driver.
42 * Increasing this value allows the driver to queue more transmits. Each
43 * descriptor is 16 bytes.
44 * Since TDLEN should be multiple of 128bytes, the number of transmit
45 * desscriptors should meet the following condition.
46 * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
48 #define EMX_MIN_TXD 256
49 #define EMX_MAX_TXD 4096
50 #define EMX_DEFAULT_TXD 512
53 * EMX_RXD - Maximum number of receive Descriptors
54 * Valid Range: 256-4096 for others
55 * Default Value: 512
56 * This value is the number of receive descriptors allocated by the driver.
57 * Increasing this value allows the driver to buffer more incoming packets.
58 * Each descriptor is 16 bytes. A receive buffer is also allocated for each
59 * descriptor. The maximum MTU size is 16110.
60 * Since TDLEN should be multiple of 128bytes, the number of transmit
61 * desscriptors should meet the following condition.
62 * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
64 #define EMX_MIN_RXD 256
65 #define EMX_MAX_RXD 4096
66 #define EMX_DEFAULT_RXD 512
69 * Receive Interrupt Delay Timer (Packet Timer)
71 * NOTE:
72 * RDTR and RADV are deprecated; use ITR instead. They are only used to
73 * workaround hardware bug on certain 82573 based NICs.
75 #define EMX_RDTR_82573 32
78 * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544)
80 * NOTE:
81 * RDTR and RADV are deprecated; use ITR instead. They are only used to
82 * workaround hardware bug on certain 82573 based NICs.
84 #define EMX_RADV_82573 64
87 * This parameter controls the duration of transmit watchdog timer.
89 #define EMX_TX_TIMEOUT 5
91 /* One for TX csum offloading desc, the other 2 are reserved */
92 #define EMX_TX_RESERVED 3
94 /* Large enough for 64K TSO segment */
95 #define EMX_TX_SPARE 33
97 #define EMX_TX_OACTIVE_MAX 64
99 /* Interrupt throttle rate */
100 #define EMX_DEFAULT_ITR 6000
102 /* Number of segments sent before writing to TX related registers */
103 #define EMX_DEFAULT_TXWREG 8
106 * This parameter controls whether or not autonegotation is enabled.
107 * 0 - Disable autonegotiation
108 * 1 - Enable autonegotiation
110 #define EMX_DO_AUTO_NEG 1
112 /* Tunables -- End */
114 #define EMX_AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | \
115 ADVERTISE_10_FULL | \
116 ADVERTISE_100_HALF | \
117 ADVERTISE_100_FULL | \
118 ADVERTISE_1000_FULL)
120 #define EMX_AUTO_ALL_MODES 0
122 /* PHY master/slave setting */
123 #define EMX_MASTER_SLAVE e1000_ms_hw_default
126 * Micellaneous constants
128 #define EMX_VENDOR_ID 0x8086
130 #define EMX_BAR_MEM PCIR_BAR(0)
131 #define EMX_BAR_FLASH PCIR_BAR(1)
133 #define EMX_JUMBO_PBA 0x00000028
134 #define EMX_DEFAULT_PBA 0x00000030
135 #define EMX_SMARTSPEED_DOWNSHIFT 3
136 #define EMX_SMARTSPEED_MAX 15
137 #define EMX_MAX_INTR 10
139 #define EMX_MCAST_ADDR_MAX 128
140 #define EMX_FC_PAUSE_TIME 1000
141 #define EMX_EEPROM_APME 0x400;
144 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
145 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
146 * also optimize cache line size effect. H/W supports up to cache line size 128.
148 #define EMX_DBA_ALIGN 128
151 * Speed mode bit in TARC0.
152 * 82571EB/82572EI only, used to improve small packet transmit performance.
154 #define EMX_TARC_SPEED_MODE (1 << 21)
157 * Multiple TX queues arbitration count mask in TARC0/TARC1.
159 #define EMX_TARC_COUNT_MASK 0x7f
161 #define EMX_MAX_SCATTER 64
162 #define EMX_TSO_SIZE (IP_MAXPACKET + \
163 sizeof(struct ether_vlan_header))
164 #define EMX_MAX_SEGSIZE PAGE_SIZE
165 #define EMX_MSIX_MASK 0x01F00000 /* For 82574 use */
167 #define EMX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
170 * 82574 has a nonstandard address for EIAC
171 * and since its only used in MSIX, and in
172 * the em driver only 82574 uses MSIX we can
173 * solve it just using this define.
175 #define EMX_EIAC 0x000DC
177 #define EMX_NRSSRK 10
178 #define EMX_RSSRK_SIZE 4
179 #define EMX_RSSRK_VAL(key, i) (key[(i) * EMX_RSSRK_SIZE] | \
180 key[(i) * EMX_RSSRK_SIZE + 1] << 8 | \
181 key[(i) * EMX_RSSRK_SIZE + 2] << 16 | \
182 key[(i) * EMX_RSSRK_SIZE + 3] << 24)
184 #define EMX_NRETA 32
185 #define EMX_RETA_SIZE 4
186 #define EMX_RETA_RINGIDX_SHIFT 7
188 #define EMX_NRX_RING 2
189 #define EMX_NTX_RING 2
190 #define EMX_NSERIALIZE 5
192 typedef union e1000_rx_desc_extended emx_rxdesc_t;
194 #define rxd_bufaddr read.buffer_addr /* 64bits */
195 #define rxd_length wb.upper.length /* 16bits */
196 #define rxd_vlan wb.upper.vlan /* 16bits */
197 #define rxd_staterr wb.upper.status_error /* 32bits */
198 #define rxd_mrq wb.lower.mrq /* 32bits */
199 #define rxd_rss wb.lower.hi_dword.rss /* 32bits */
201 #define EMX_RXDMRQ_RSSTYPE_MASK 0xf
202 #define EMX_RXDMRQ_NO_HASH 0
203 #define EMX_RXDMRQ_IPV4_TCP 1
204 #define EMX_RXDMRQ_IPV4 2
205 #define EMX_RXDMRQ_IPV6_TCP 3
206 #define EMX_RXDMRQ_IPV6 5
208 struct emx_softc;
210 struct emx_rxdata {
211 struct lwkt_serialize rx_serialize;
212 struct emx_softc *sc;
213 int idx;
216 * Receive definitions
218 * we have an array of num_rx_desc rx_desc (handled by the
219 * controller), and paired with an array of rx_buffers
220 * (at rx_buffer_area).
221 * The next pair to check on receive is at offset next_rx_desc_to_check
223 emx_rxdesc_t *rx_desc;
224 uint32_t next_rx_desc_to_check;
225 int num_rx_desc;
226 struct emx_rxbuf *rx_buf;
227 bus_dma_tag_t rxtag;
228 bus_dmamap_t rx_sparemap;
231 * First/last mbuf pointers, for
232 * collecting multisegment RX packets.
234 struct mbuf *fmp;
235 struct mbuf *lmp;
237 /* RX statistics */
238 unsigned long rx_pkts;
240 bus_dma_tag_t rx_desc_dtag;
241 bus_dmamap_t rx_desc_dmap;
242 bus_addr_t rx_desc_paddr;
243 } __cachealign;
245 struct emx_txdata {
246 struct lwkt_serialize tx_serialize;
247 struct emx_softc *sc;
248 struct ifaltq_subque *ifsq;
249 int idx;
250 uint32_t tx_flags;
251 #define EMX_TXFLAG_TSO_PULLEX 0x1
252 #define EMX_TXFLAG_ENABLED 0x2
253 #define EMX_TXFLAG_FORCECTX 0x4
256 * Transmit definitions
258 * We have an array of num_tx_desc descriptors (handled
259 * by the controller) paired with an array of tx_buffers
260 * (at tx_buffer_area).
261 * The index of the next available descriptor is next_avail_tx_desc.
262 * The number of remaining tx_desc is num_tx_desc_avail.
264 struct e1000_tx_desc *tx_desc_base;
265 struct emx_txbuf *tx_buf;
266 uint32_t next_avail_tx_desc;
267 uint32_t next_tx_to_clean;
268 int num_tx_desc_avail;
269 int num_tx_desc;
270 bus_dma_tag_t txtag; /* dma tag for tx */
271 int spare_tx_desc;
272 int oact_tx_desc;
274 /* Saved csum offloading context information */
275 int csum_flags;
276 int csum_lhlen;
277 int csum_iphlen;
279 int csum_thlen; /* TSO */
280 int csum_mss; /* TSO */
281 int csum_pktlen; /* TSO */
283 uint32_t csum_txd_upper;
284 uint32_t csum_txd_lower;
286 int tx_wreg_nsegs;
289 * Variables used to reduce TX interrupt rate and
290 * number of device's TX ring write requests.
292 * tx_nsegs:
293 * Number of TX descriptors setup so far.
295 * tx_int_nsegs:
296 * Once tx_nsegs > tx_int_nsegs, RS bit will be set
297 * in the last TX descriptor of the packet, and
298 * tx_nsegs will be reset to 0. So TX interrupt and
299 * TX ring write request should be generated roughly
300 * every tx_int_nsegs TX descriptors.
302 * tx_dd[]:
303 * Index of the TX descriptors which have RS bit set,
304 * i.e. DD bit will be set on this TX descriptor after
305 * the data of the TX descriptor are transfered to
306 * hardware's internal packet buffer. Only the TX
307 * descriptors listed in tx_dd[] will be checked upon
308 * TX interrupt. This array is used as circular ring.
310 * tx_dd_tail, tx_dd_head:
311 * Tail and head index of valid elements in tx_dd[].
312 * tx_dd_tail == tx_dd_head means there is no valid
313 * elements in tx_dd[]. tx_dd_tail points to the position
314 * which is one beyond the last valid element in tx_dd[].
315 * tx_dd_head points to the first valid element in
316 * tx_dd[].
318 int tx_intr_nsegs;
319 int tx_nsegs;
320 int tx_dd_tail;
321 int tx_dd_head;
322 #define EMX_TXDD_MAX 64
323 #define EMX_TXDD_SAFE 48 /* 48 <= val < EMX_TXDD_MAX */
324 int tx_dd[EMX_TXDD_MAX];
326 struct ifsubq_watchdog tx_watchdog;
328 /* TX statistics */
329 unsigned long tx_pkts;
330 unsigned long tso_segments;
331 unsigned long tso_ctx_reused;
333 bus_dma_tag_t tx_desc_dtag;
334 bus_dmamap_t tx_desc_dmap;
335 bus_addr_t tx_desc_paddr;
336 } __cachealign;
338 struct emx_softc {
339 struct arpcom arpcom;
340 struct e1000_hw hw;
341 int flags;
342 #define EMX_FLAG_SHARED_INTR 0x0001
343 #define EMX_FLAG_HAS_MGMT 0x0004
344 #define EMX_FLAG_HAS_AMT 0x0008
345 #define EMX_FLAG_HW_CTRL 0x0010
347 /* DragonFly operating-system-specific structures. */
348 struct e1000_osdep osdep;
349 device_t dev;
351 bus_dma_tag_t parent_dtag;
353 struct resource *memory;
354 int memory_rid;
356 struct resource *flash;
357 int flash_rid;
359 struct resource *intr_res;
360 void *intr_tag;
361 int intr_rid;
362 int intr_type;
364 struct ifmedia media;
365 struct callout timer;
366 int if_flags;
368 /* WOL register value */
369 int wol;
371 /* Multicast array memory */
372 uint8_t *mta;
374 /* Info about the board itself */
375 uint8_t link_active;
376 uint16_t link_speed;
377 uint16_t link_duplex;
378 uint32_t smartspeed;
379 int int_throttle_ceil;
381 int rx_npoll_off;
382 int tx_npoll_off;
384 struct lwkt_serialize main_serialize;
385 struct lwkt_serialize *serializes[EMX_NSERIALIZE];
387 int tx_ring_cnt;
388 int tx_ring_inuse;
389 struct emx_txdata tx_data[EMX_NTX_RING];
391 int rss_debug;
392 int rx_ring_cnt;
393 struct emx_rxdata rx_data[EMX_NRX_RING];
395 int ifm_flowctrl;
397 /* Misc stats maintained by the driver */
398 unsigned long rx_overruns;
400 struct e1000_hw_stats stats;
403 struct emx_txbuf {
404 struct mbuf *m_head;
405 bus_dmamap_t map;
408 struct emx_rxbuf {
409 struct mbuf *m_head;
410 bus_dmamap_t map;
411 bus_addr_t paddr;
414 #define EMX_IS_OACTIVE(tdata) \
415 ((tdata)->num_tx_desc_avail <= (tdata)->oact_tx_desc)
417 #define EMX_INC_TXDD_IDX(idx) \
418 do { \
419 if (++(idx) == EMX_TXDD_MAX) \
420 (idx) = 0; \
421 } while (0)
423 #endif /* !_IF_EMX_H_ */