igb: Use ringmap to do MSI-X cpu assignment and fill redirect table.
[dragonfly.git] / sys / dev / netif / igb / if_igb.h
blob3c25f43c7f0c71a03617831cdf44c8e556c903d0
1 /*
2 * Copyright (c) 2001-2013, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _IF_IGB_H_
33 #define _IF_IGB_H_
35 /* Tunables */
38 * Max ring count
40 #define IGB_MAX_RING_I210 4
41 #define IGB_MAX_RING_I211 2
42 #define IGB_MAX_RING_I350 8
43 #define IGB_MAX_RING_I354 8
44 #define IGB_MAX_RING_82580 8
45 #define IGB_MAX_RING_82576 16
46 #define IGB_MAX_RING_82575 4
47 #define IGB_MIN_RING 1
48 #define IGB_MIN_RING_RSS 2
51 * Max TX/RX interrupt bits
53 #define IGB_MAX_TXRXINT_I210 4
54 #define IGB_MAX_TXRXINT_I211 4
55 #define IGB_MAX_TXRXINT_I350 8
56 #define IGB_MAX_TXRXINT_I354 8
57 #define IGB_MAX_TXRXINT_82580 8
58 #define IGB_MAX_TXRXINT_82576 16
59 #define IGB_MAX_TXRXINT_82575 4 /* XXX not used */
60 #define IGB_MIN_TXRXINT 2 /* XXX VF? */
63 * Max IVAR count
65 #define IGB_MAX_IVAR_I210 4
66 #define IGB_MAX_IVAR_I211 4
67 #define IGB_MAX_IVAR_I350 4
68 #define IGB_MAX_IVAR_I354 4
69 #define IGB_MAX_IVAR_82580 4
70 #define IGB_MAX_IVAR_82576 8
71 #define IGB_MAX_IVAR_VF 1
74 * Default number of segments received before writing to RX related registers
76 #define IGB_DEF_RXWREG_NSEGS 32
79 * Default number of segments sent before writing to TX related registers
81 #define IGB_DEF_TXWREG_NSEGS 8
84 * IGB_TXD: Maximum number of Transmit Descriptors
86 * This value is the number of transmit descriptors allocated by the driver.
87 * Increasing this value allows the driver to queue more transmits. Each
88 * descriptor is 16 bytes.
89 * Since TDLEN should be multiple of 128bytes, the number of transmit
90 * desscriptors should meet the following condition.
91 * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
93 #define IGB_MIN_TXD 256
94 #define IGB_DEFAULT_TXD 1024
95 #define IGB_MAX_TXD 4096
98 * IGB_RXD: Maximum number of Transmit Descriptors
100 * This value is the number of receive descriptors allocated by the driver.
101 * Increasing this value allows the driver to buffer more incoming packets.
102 * Each descriptor is 16 bytes. A receive buffer is also allocated for each
103 * descriptor. The maximum MTU size is 16110.
104 * Since TDLEN should be multiple of 128bytes, the number of transmit
105 * desscriptors should meet the following condition.
106 * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
108 #define IGB_MIN_RXD 256
109 #define IGB_DEFAULT_RXD 512
110 #define IGB_MAX_RXD 4096
113 * This parameter controls when the driver calls the routine to reclaim
114 * transmit descriptors. Cleaning earlier seems a win.
116 #define IGB_TX_CLEANUP_THRESHOLD(sc) ((sc)->num_tx_desc / 2)
119 * This parameter controls whether or not autonegotation is enabled.
120 * 0 - Disable autonegotiation
121 * 1 - Enable autonegotiation
123 #define DO_AUTO_NEG 1
126 * This parameter control whether or not the driver will wait for
127 * autonegotiation to complete.
128 * 1 - Wait for autonegotiation to complete
129 * 0 - Don't wait for autonegotiation to complete
131 #define WAIT_FOR_AUTO_NEG_DEFAULT 0
133 /* Tunables -- End */
135 #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
136 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
137 ADVERTISE_1000_FULL)
139 #define AUTO_ALL_MODES 0
141 /* PHY master/slave setting */
142 #define IGB_MASTER_SLAVE e1000_ms_hw_default
145 * Micellaneous constants
147 #define IGB_VENDOR_ID 0x8086
149 #define IGB_JUMBO_PBA 0x00000028
150 #define IGB_DEFAULT_PBA 0x00000030
151 #define IGB_SMARTSPEED_DOWNSHIFT 3
152 #define IGB_SMARTSPEED_MAX 15
153 #define IGB_MAX_LOOP 10
155 #define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : \
156 ((hw->mac.type <= e1000_82576) ? 16 : 8))
157 #define IGB_RX_HTHRESH 8
158 #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
159 sc->msix_mem_res) ? 1 : 4)
161 #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
162 #define IGB_TX_HTHRESH 1
163 #define IGB_TX_WTHRESH 16
165 #define MAX_NUM_MULTICAST_ADDRESSES 128
166 #define IGB_FC_PAUSE_TIME 0x0680
168 #define IGB_INTR_RATE 6000
169 #define IGB_MSIX_RX_RATE 6000
170 #define IGB_MSIX_TX_RATE 4000
173 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
174 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
175 * also optimize cache line size effect. H/W supports up to cache line size 128.
177 #define IGB_DBA_ALIGN 128
179 /* PCI Config defines */
180 #define IGB_MSIX_BAR 3
181 #define IGB_MSIX_BAR_ALT 4
183 #define IGB_VFTA_SIZE 128
184 #define IGB_TSO_SIZE (IP_MAXPACKET + \
185 sizeof(struct ether_vlan_header))
186 #define IGB_HDR_BUF 128
187 #define IGB_TXPBSIZE 20408
188 #define IGB_PKTTYPE_MASK 0x0000FFF0
190 #define IGB_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
192 /* One for TX csum offloading desc, the other 2 are reserved */
193 #define IGB_TX_RESERVED 3
195 /* Large enough for 64K TSO */
196 #define IGB_MAX_SCATTER 33
198 #define IGB_NRSSRK 10
199 #define IGB_RSSRK_SIZE 4
200 #define IGB_RSSRK_VAL(key, i) (key[(i) * IGB_RSSRK_SIZE] | \
201 key[(i) * IGB_RSSRK_SIZE + 1] << 8 | \
202 key[(i) * IGB_RSSRK_SIZE + 2] << 16 | \
203 key[(i) * IGB_RSSRK_SIZE + 3] << 24)
205 #define IGB_NRETA 32
206 #define IGB_RETA_SIZE 4
207 #define IGB_RETA_SHIFT 0
208 #define IGB_RETA_SHIFT_82575 6
210 #define IGB_RDRTABLE_SIZE (IGB_NRETA * IGB_RETA_SIZE)
212 #define IGB_EITR_INTVL_MASK 0x7ffc
213 #define IGB_EITR_INTVL_SHIFT 2
215 /* Disable DMA Coalesce Flush */
216 #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000
218 struct igb_softc;
221 * Bus dma information structure
223 struct igb_dma {
224 bus_addr_t dma_paddr;
225 void *dma_vaddr;
226 bus_dma_tag_t dma_tag;
227 bus_dmamap_t dma_map;
231 * Transmit ring: one per queue
233 struct igb_tx_ring {
234 struct lwkt_serialize tx_serialize;
235 struct igb_softc *sc;
236 struct ifaltq_subque *ifsq;
237 uint32_t me;
238 uint32_t tx_flags;
239 #define IGB_TXFLAG_TSO_IPLEN0 0x1
240 #define IGB_TXFLAG_ENABLED 0x2
241 struct e1000_tx_desc *tx_base;
242 int num_tx_desc;
243 uint32_t next_avail_desc;
244 uint32_t next_to_clean;
245 uint32_t *tx_hdr;
246 int tx_avail;
247 struct igb_tx_buf *tx_buf;
248 bus_dma_tag_t tx_tag;
249 int tx_nsegs;
250 int intr_nsegs;
251 int wreg_nsegs;
252 int tx_intr_vec;
253 uint32_t tx_intr_mask;
254 struct ifsubq_watchdog tx_watchdog;
256 /* Soft stats */
257 u_long tx_packets;
259 struct igb_dma txdma;
260 bus_dma_tag_t tx_hdr_dtag;
261 bus_dmamap_t tx_hdr_dmap;
262 bus_addr_t tx_hdr_paddr;
263 int tx_intr_cpuid;
264 } __cachealign;
267 * Receive ring: one per queue
269 struct igb_rx_ring {
270 struct lwkt_serialize rx_serialize;
271 struct igb_softc *sc;
272 uint32_t me;
273 union e1000_adv_rx_desc *rx_base;
274 boolean_t discard;
275 int num_rx_desc;
276 uint32_t next_to_check;
277 struct igb_rx_buf *rx_buf;
278 bus_dma_tag_t rx_tag;
279 bus_dmamap_t rx_sparemap;
280 int rx_intr_vec;
281 uint32_t rx_intr_mask;
284 * First/last mbuf pointers, for
285 * collecting multisegment RX packets.
287 struct mbuf *fmp;
288 struct mbuf *lmp;
289 int wreg_nsegs;
291 struct igb_tx_ring *rx_txr; /* piggybacked TX ring */
293 /* Soft stats */
294 u_long rx_packets;
296 struct igb_dma rxdma;
297 } __cachealign;
299 struct igb_intr_data {
300 struct lwkt_serialize *intr_serialize;
301 driver_intr_t *intr_func;
302 void *intr_hand;
303 struct resource *intr_res;
304 void *intr_funcarg;
305 int intr_rid;
306 int intr_cpuid;
307 int intr_rate;
308 int intr_use;
309 #define IGB_INTR_USE_RXTX 0
310 #define IGB_INTR_USE_STATUS 1
311 #define IGB_INTR_USE_RX 2
312 #define IGB_INTR_USE_TX 3
313 const char *intr_desc;
314 char intr_desc0[64];
317 struct igb_softc {
318 struct arpcom arpcom;
319 struct e1000_hw hw;
321 struct e1000_osdep osdep;
322 device_t dev;
323 uint32_t flags;
324 #define IGB_FLAG_SHARED_INTR 0x1
325 #define IGB_FLAG_HAS_MGMT 0x2
327 bus_dma_tag_t parent_tag;
329 int mem_rid;
330 struct resource *mem_res;
332 struct ifmedia media;
333 struct callout timer;
334 int timer_cpuid;
336 int if_flags;
337 int max_frame_size;
338 int pause_frames;
339 uint16_t vf_ifp; /* a VF interface */
341 /* Management and WOL features */
342 int wol;
344 /* Info about the interface */
345 uint8_t link_active;
346 uint16_t link_speed;
347 uint16_t link_duplex;
348 uint32_t smartspeed;
349 uint32_t dma_coalesce;
351 /* Multicast array pointer */
352 uint8_t *mta;
354 int serialize_cnt;
355 struct lwkt_serialize **serializes;
356 struct lwkt_serialize main_serialize;
358 int intr_type;
359 uint32_t intr_mask;
360 int sts_msix_vec;
361 uint32_t sts_intr_mask;
364 * Transmit rings
366 int tx_ring_cnt;
367 int tx_ring_msix;
368 int tx_ring_inuse;
369 struct igb_tx_ring *tx_rings;
372 * Receive rings
374 int rss_debug;
375 int rx_ring_cnt;
376 int rx_ring_msix;
377 int rx_ring_inuse;
378 struct igb_rx_ring *rx_rings;
380 int ifm_flowctrl;
382 /* Misc stats maintained by the driver */
383 u_long dropped_pkts;
384 u_long mbuf_defrag_failed;
385 u_long no_tx_dma_setup;
386 u_long watchdog_events;
387 u_long rx_overruns;
388 u_long device_control;
389 u_long rx_control;
390 u_long int_mask;
391 u_long eint_mask;
392 u_long packet_buf_alloc_rx;
393 u_long packet_buf_alloc_tx;
395 void *stats;
397 int msix_mem_rid;
398 struct resource *msix_mem_res;
400 int intr_cnt;
401 struct igb_intr_data *intr_data;
403 struct if_ringmap *rx_rmap;
404 struct if_ringmap *rx_rmap_intr;
405 struct if_ringmap *tx_rmap;
406 struct if_ringmap *tx_rmap_intr;
408 int rdr_table[IGB_RDRTABLE_SIZE];
411 #define IGB_ENABLE_HWRSS(sc) ((sc)->rx_ring_cnt > 1)
412 #define IGB_ENABLE_HWTSS(sc) ((sc)->tx_ring_cnt > 1)
414 struct igb_tx_buf {
415 struct mbuf *m_head;
416 bus_dmamap_t map; /* bus_dma map for packet */
419 struct igb_rx_buf {
420 struct mbuf *m_head;
421 bus_dmamap_t map; /* bus_dma map for packet */
422 bus_addr_t paddr;
425 #define UPDATE_VF_REG(reg, last, cur) \
427 uint32_t new = E1000_READ_REG(hw, reg); \
428 if (new < last) \
429 cur += 0x100000000LL; \
430 last = new; \
431 cur &= 0xFFFFFFFF00000000LL; \
432 cur |= new; \
435 #define IGB_I210_LINK_DELAY 1000 /* unit: ms */
437 #endif /* _IF_IGB_H_ */